The 6.3VGGnet study of "TensorFlow combat"

Source: Internet
Author: User

This is my rewrite of the code, can run, but over-fitting phenomenon is serious, do not know how to modify the better

#-*-Coding:utf-8-*-"" "Created on Wed Dec 14:45:35 2017@author:administrator" "" #coding: utf-8# Copyright the Te Nsorflow Authors. All rights reserved.## Licensed under the Apache License, Version 2.0 (the "License");  n compliance with the license.# obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License was distributed on an " As is ' basis,# without warranties or CONDITIONS of any KIND, either express or implied.# see the License for the specific Language governing permissions and# limitations under the license.# ================================================== ============================import TensorFlow as Tfimport numpy as Npdata_name = ' yaleb_32x32.mat ' sele_num = 10import mat Lab.engineeng = Matlab.engine.start_matlab () t = Eng.data_imread_mse (data_name,sele_num) eng.quit () #t = Np.array (t) Train_ma = Np.arraY (T[0]). Astype (np.float32) Train_lab = Np.array (T[1]). Astype (np.int8) Test_ma = Np.array (T[2]). Astype (Np.float32) Test_lab = Np.array (T[3]). Astype (np.int8) Num_fea = Train_ma.shape[1]num_class = Train_lab.shape[1]image_row = 32imag E_column = 32def conv_op (input_op, name, KH, KW, n_out, DH, DW, p): n_in = Input_op.get_shape () [ -1].value with Tf.na Me_scope (name) as Scope:kernel = Tf.get_variable (scope+ "W", Shape=[kh, KW, N_in, n _out], Dtype=tf.float32, Initializer=tf.contrib.layers.xa Vier_initializer_conv2d ()) conv = tf.nn.conv2d (Input_op, Kernel, (1, DH, DW, 1), padding= ' same ') bias_init_v Al = Tf.constant (0.0, shape=[n_out], dtype=tf.float32) biases = tf. Variable (Bias_init_val, trainable=true, name= ' b ') z = tf.nn.bias_add (conv, biases) activation = Tf.nn.relu (z , name=scope) p + = [kernel, biases] return activation# full join layer function def FC_op (input_op, Name, N_out, p): n_in = Input_op.get_shape () [ -1].value with Tf.name_scope (name) as Scope:kerne L = tf.get_variable (scope+ "W", Shape=[n_in, N_out], dtype= Tf.float32, Initializer=tf.contrib.layers.xavier_initializer ()) biases = tf. Variable (Tf.constant (0.1, shape=[n_out], dtype=tf.float32), name= ' b ') activation = Tf.nn.relu_layer (Input_op, Kerne    L, biases, name=scope) p + = [kernel, biases] return activationdef mpool_op (input_op, name, kh, KW, DH, DW):  Return Tf.nn.max_pool (Input_op, ksize=[1, KH, KW, 1], strides=[1, DH, DW, 1], padding= ' same ', name=name) # assume Input_op shape is 224x22 4x3sess = tf. InteractiveSession () #----------define input and output---------------#x = Tf.placeholder (Tf.float32, [None, Num_fea]) Y_ = Tf.placeho Lder (Tf.float32, [None, Num_class]) X_image = Tf.reshape (x, [ -1,image_row,image_column,1]) Keep_prob = Tf.placeholder (tf.float32) # block 1 --Outputs 112x112x64p = []conv1_1 = Conv_op (X_image, name= "Conv1_1", Kh=3, Kw=3, n_out=64, Dh=1, Dw=1, p=p) conv1_2 = con V_op (Conv1_1, name= "Conv1_2", Kh=3, Kw=3, n_out=64, Dh=1, Dw=1, p=p) pool1 = Mpool_op (conv1_2, name= "Pool1", kh=2, K w=2, dw=2, dh=2) # block 2--Outputs 56x56x128conv2_1 = Conv_op (pool1, name= "Conv2_1", Kh=3, Kw=3, n_out=128, Dh=1, dw= 1, p=p) conv2_2 = Conv_op (Conv2_1, name= "Conv2_2", Kh=3, Kw=3, n_out=128, Dh=1, Dw=1, p=p) pool2 = Mpool_op (conv2_2, NA Me= "Pool2", kh=2, kw=2, dh=2, dw=2) # # block 3--Outputs 28x28x256conv3_1 = Conv_op (pool2, name= "Conv3_1", Kh=3, kw=  3, n_out=256, Dh=1, Dw=1, p=p) conv3_2 = Conv_op (Conv3_1, name= "Conv3_2", Kh=3, Kw=3, n_out=256, Dh=1, Dw=1, p=p) Conv3_3 =   Conv_op (Conv3_2, name= "Conv3_3", Kh=3, Kw=3, n_out=256, Dh=1, Dw=1, p=p) pool3 = Mpool_op (Conv3_3, name= "Pool3", kh=2, kw=2, dh=2, dw=2# Block 4--Outputs 14x14x512conv4_1 = Conv_op (pool3, name= "Conv4_1", Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) conv4_2 = Conv_op (Conv4_1, name= "Conv4_2", Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) Conv4_3 = Conv_op (Conv4_2, name= "Conv4_3", kh= 3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) Pool4 = Mpool_op (Conv4_3, name= "Pool4", kh=2, kw=2, dh=2, dw=2) # block 5--OUTP  UTS 7x7x512conv5_1 = Conv_op (Pool4, name= "Conv5_1", Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) Conv5_2 = Conv_op (Conv5_1, Name= "Conv5_2", Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) Conv5_3 = Conv_op (Conv5_2, name= "Conv5_3", Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) pool5 = Mpool_op (Conv5_3, name= "Pool5", kh=2, kw=2, dw=2, dh=2) # flattenshp = Pool5.get_shape () Flatt  Ened_shape = Shp[1].value * Shp[2].value * shp[3].valueresh1 = Tf.reshape (POOL5, [-1, Flattened_shape], name= "Resh1") # Fully connectedfc6 = Fc_op (Resh1, name= "Fc6", n_out=4096, p=p) Fc6_drop = Tf.nn.dropout (Fc6, Keep_prob, name= "Fc6_drop") F C7 = Fc_op (Fc6_drop, name= "Fc7", n_out=4096, p=p) Fc7_drop = Tf.nn.dropout (Fc7, Keep_prob, name= "Fc7_drop") Fc8 = Fc_op (Fc7_drop, name= "Fc8", n_out =num_class, p=p) predictions = Tf.nn.softmax (FC8) cross_entropy = Tf.reduce_mean (-tf.reduce_sum (Y_ * Tf.log (predictions ), reduction_indices=[1]) Train_step = Tf.train.AdamOptimizer (1e-4). Minimize (cross_entropy) correct_prediction = Tf.equal (Tf.argmax (predictions,1), Tf.argmax (y_,1)) accuracy = Tf.reduce_mean (Tf.cast (Correct_prediction,        Tf.float32)) Tf.global_variables_initializer (). Run () for I in range (£): Train_accuracy = Accuracy.eval (feed_dict={ X:train_ma, Y_: Train_lab, keep_prob:1.0}) Print ("Step%d, training accuracy%g"% (I, train_accuracy)) Train_ste P.run (Feed_dict={x:train_ma, Y_: Train_lab, keep_prob:0.8}) print ("Test accuracy%g"%accuracy.eval (feed_dict={x:test _ma, Y_: Test_lab, keep_prob:1.0}))

Another, more convenient way to rewrite

#-*-Coding:utf-8-*-"" "Created on Wed Dec 15:40:44 2017@author:administrator" "" #-*-Coding:utf-8-*-"" "Created on Wed Dec 14:45:35 2017@author:administrator "" "#coding: utf-8# Copyright the TensorFlow Authors. All rights reserved.## Licensed under the Apache License, Version 2.0 (the "License");  n compliance with the license.# obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License was distributed on an " As is ' basis,# without warranties or CONDITIONS of any KIND, either express or implied.# see the License for the specific Language governing permissions and# limitations under the license.# ================================================== ============================import TensorFlow as Tfimport numpy as Npdata_name = ' yaleb_32x32.mat ' sele_num = 10import mat Lab.engineeng = Matlab.engine.start_matlab () t= Eng.data_imread_mse (Data_name,sele_num) eng.quit () #t = Np.array (t) train_ma = Np.array (T[0]). Astype (Np.float32) Train_lab = Np.array (T[1]). Astype (np.int8) Test_ma = Np.array (T[2]). Astype (np.float32) Test_lab = Np.array (T[3]). Astype (np.int8) Num_fea = Train_ma.shape[1]num_class = Train_lab.shape[1]image_row = 32image_column = 32def Conv_op (in        PUT_OP, name, KH, KW, n_out, DH, DW, p): n_in = Input_op.get_shape () [ -1].value with Tf.name_scope (name) as scope:                                 Kernel = tf.get_variable (scope+ "W", Shape=[kh, KW, N_in, N_out],        Dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer_conv2d ()) Conv = tf.nn.conv2d (Input_op, Kernel, (1, DH, DW, 1), padding= ' same ') Bias_init_val = tf.constant (0.0, shape =[n_out], dtype=tf.float32) biases = tf. Variable (Bias_init_val, trainable=true, name= ' b ') z = tf.nn.bias_add (conv, biases) activation = TF.NN.RElu (z, name=scope) p + = [kernel, biases] return activation# full join layer function def fc_op (input_op, Name, N_out, p): N_                                 in = Input_op.get_shape () [ -1].value with Tf.name_scope (name) as Scope:kernel = Tf.get_variable (scope+ "W",                                  Shape=[n_in, N_out], Dtype=tf.float32, Initializer=tf.contrib.layers.xavier_initializer ()) biases = tf. Variable (Tf.constant (0.1, shape=[n_out], dtype=tf.float32), name= ' b ') activation = Tf.nn.relu_layer (Input_op, Kerne    L, biases, name=scope) p + = [kernel, biases] return activationdef mpool_op (input_op, name, kh, KW, DH, DW):  Return Tf.nn.max_pool (Input_op, ksize=[1, KH, KW, 1], strides=[1, DH, DW, 1], padding= ' same ', name=name) # assume Input_op shape is 224x22 4X3 # block 1--outputs 112x112x64def inference_op (Input_op, KEep_prob): P = [] # assume Input_op shape is 224x224x3 # block 1--outputs 112x112x64 conv1_1 = conv_op (input _op, Name= "Conv1_1", Kh=3, Kw=3, n_out=64, Dh=1, Dw=1, p=p) conv1_2 = Conv_op (conv1_1, name= "Conv1_2", Kh=3, Kw=3, N_o Ut=64, Dh=1, Dw=1, p=p) pool1 = Mpool_op (conv1_2, name= "Pool1", kh=2, kw=2, dw=2, dh=2) # block 2--outputs 56x 56x128 conv2_1 = Conv_op (pool1, name= "Conv2_1", Kh=3, Kw=3, n_out=128, Dh=1, Dw=1, p=p) conv2_2 = Conv_op (conv2_1 , name= "Conv2_2", Kh=3, Kw=3, n_out=128, Dh=1, Dw=1, p=p) pool2 = Mpool_op (conv2_2, name= "Pool2", kh=2, kw=2, dh=2 , dw=2) # # block 3--outputs 28x28x256 conv3_1 = Conv_op (pool2, name= "Conv3_1", Kh=3, Kw=3, n_out=256, Dh=1, DW =1, p=p) conv3_2 = Conv_op (Conv3_1, name= "Conv3_2", Kh=3, Kw=3, n_out=256, Dh=1, Dw=1, p=p) conv3_3 = Conv_op (conv3 _2, Name= "Conv3_3", Kh=3, Kw=3, n_out=256, Dh=1, Dw=1, p=p) pool3 = Mpool_op (Conv3_3, name= "Pool3", kh=2, kw=2 , dh=2, dw=2) # block 4--outputs 14x14x512 conv4_1 = Conv_op (pool3, name= "Conv4_1", Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) conv4_2  = Conv_op (Conv4_1, name= "Conv4_2", Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) Conv4_3 = Conv_op (Conv4_2, name= "Conv4_3",  Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) Pool4 = Mpool_op (Conv4_3, name= "Pool4", kh=2, kw=2, dh=2, dw=2) # block 5--Outputs 7x7x512 Conv5_1 = Conv_op (Pool4, name= "Conv5_1", Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) Conv5_2 = Conv_op (Conv5_1, name= "Conv5_2", Kh=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) Conv5_3 = Conv_op (Conv5_2, name= "Conv5_3", K     H=3, Kw=3, n_out=512, Dh=1, Dw=1, p=p) pool5 = Mpool_op (Conv5_3, name= "Pool5", kh=2, kw=2, dw=2, dh=2) # Flatten SHP = Pool5.get_shape () flattened_shape = Shp[1].value * Shp[2].value * Shp[3].value resh1 = Tf.reshape (POOL5, [ -1, Flattened_shape], name= "RESH1") # fully connected Fc6 = Fc_op (Resh1, name= "Fc6", n_out=4096, p=p) Fc6_drop = Tf.nn.dropout (Fc6, Keep_Prob, name= "Fc6_drop") Fc7 = Fc_op (Fc6_drop, name= "Fc7", n_out=4096, p=p) Fc7_drop = Tf.nn.dropout (fc7, Keep_prob, n Ame= "Fc7_drop") Fc8 = Fc_op (Fc7_drop, name= "Fc8", N_out=num_class, p=p) predictions = Tf.nn.softmax (FC8) return p Redictions, Fc8, p#----------define input and output---------------#sess = tf. InteractiveSession () x = Tf.placeholder (Tf.float32, [None, Num_fea]) Y_ = Tf.placeholder (Tf.float32, [None, Num_Class]) x _image = Tf.reshape (x, [ -1,image_row,image_column,1]) Keep_prob = Tf.placeholder (tf.float32) predictions, FC8, p = Inference_op (X_image, keep_prob) cross_entropy = Tf.reduce_mean (-tf.reduce_sum (y_) tf.log (predictions), reduction_ INDICES=[1]) Train_step = Tf.train.AdamOptimizer (1e-4). Minimize (cross_entropy) correct_prediction = Tf.equal ( Tf.argmax (predictions,1), Tf.argmax (y_,1)) accuracy = Tf.reduce_mean (Tf.cast (correct_prediction, Tf.float32)) Tf.global_variables_initializer (). Run () for I in range: Train_accuracy = accuracy.eval (feed_dict={x:train_m A, Y_: Train_lab, keep_prob:1.0}) Print ("Step%d, training accuracy%g"% (I, train_accuracy)) Train_step.run (feed_dict={x: Train_ma, Y_: Train_lab, keep_prob:0.8}) print ("Test accuracy%g"%accuracy.eval (feed_dict={x:test_ma, Y_: Test_Lab, keep_prob:1.0}))

  

  

The 6.3VGGnet study of "TensorFlow combat"

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.