Tensorflow13 "TensorFlow Practical Google Depth Learning framework" notes -06-02mnist LENET5 convolution neural Network Code

Source: Internet
Author: User
LeNet5 convolution neural network forward propagation
# TensorFlow actual combat Google Depth Learning Framework 06 image recognition and convolution neural network # WIN10 Tensorflow1.0.1 python3.5.3 # CUDA v8.0 cudnn-8.0-windows10-x64-v5.1 # filename:LeNet5_infernece.py # LeNet5 forward propagate import TensorFlow as TF # 1. Set the parameters of the neural network Input_node = 784 Output_node = Ten image_size = Num_channels = 1 Num_labels = Ten Conv1_deep = conv1_size = 5 Conv2_deep = Conv2_size = 5 Fc_size = 512 # 2. Define the process of forward propagation def inference (Input_tensor, Train, Regularizer): With Tf.variable_scope (' Layer1-conv1 '): Conv1_weigh ts = tf.get_variable ("Weight", [Conv1_size, Conv1_size, Num_channels, Conv1_deep], INITIALIZER=TF . Truncated_normal_initializer (stddev=0.1)) conv1_biases = Tf.get_variable ("bias", [conv1_deep], Initializer=tf.con
        Stant_initializer (0.0)) Conv1 = tf.nn.conv2d (Input_tensor, Conv1_weights, strides=[1, 1, 1, 1], padding= ' SAME ') RELU1 = Tf.nn.relu (Tf.nn.bias_add (CONV1, conv1_biases)) with Tf.name_scope ("Layer2-pool1"): Pool1 = tf. Nn.max_pool (Relu1, ksize = [1,2,2,1],strides=[1,2,2,1],padding= "SAME") with Tf.variable_scope ("Layer3-conv2"): conv2_weights = Tf.get_variable ("Weight", [Conv2_size, Conv2_size, Conv1_deep, Conv2_deep], Initializer=tf.trun Cated_normal_initializer (stddev=0.1)) conv2_biases = Tf.get_variable ("bias", [conv2_deep], initializer=tf.constant _initializer (0.0)) Conv2 = tf.nn.conv2d (Pool1, Conv2_weights, strides=[1, 1, 1, 1], padding= ' SAME ') relu2 = Tf.nn.relu (Tf.nn.bias_add (Conv2, conv2_biases)) with Tf.name_scope ("Layer4-pool2"): Pool2 = Tf.nn.max_pool (
        RELU2, Ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding= ' SAME ') Pool_shape = Pool2.get_shape (). As_list () nodes = pool_shape[1] * pool_shape[2] * pool_shape[3] reshaped = Tf.reshape (Pool2, [pool_shape[0], nodes]) WI
                                      Th tf.variable_scope (' layer5-fc1 '): Fc1_weights = tf.get_variable ("Weight", [nodes, Fc_size], InitiaLizer=tf.truncated_normal_initializer (stddev=0.1)) if Regularizer!= None:tf.add_to_collection (' losses ', Regulari

        Zer (fc1_weights)) fc1_biases = Tf.get_variable ("bias", [fc_size], Initializer=tf.constant_initializer (0.1)) FC1 = Tf.nn.relu (Tf.matmul (reshaped, fc1_weights) + fc1_biases) If train:fc1 = Tf.nn.dropout (FC1, 0.5) WI
                                      Th tf.variable_scope (' layer6-fc2 '): Fc2_weights = tf.get_variable ("Weight", [Fc_size, Num_labels], Initializer=tf.truncated_normal_initializer (stddev=0.1)) if Regularizer!= None:tf.add_to_ Collection (' Losses ', Regularizer (fc2_weights)) fc2_biases = Tf.get_variable ("bias", [num_labels], INITIALIZER=TF.C Onstant_initializer (0.1)) Logit = Tf.matmul (FC1, fc2_weights) + fc2_biases return logit
LENET5 convolution neural network training
# TensorFlow actual combat Google Depth Learning Framework 06 image recognition and convolution neural network # WIN10 Tensorflow1.0.1 python3.5.3 # CUDA v8.0 cudnn-8.0-windows10-x64-v5.1 # filename:LeNet5_train.py # LENET5 Training import TensorFlow as TF from tensorflow.examples.tutorials.mnist import Input_data I Mport lenet5_infernece import OS import numpy as NP # 1. Define neural network-related parameters batch_size = Learning_rate_base = 0.01 Learning_rate_decay = 0.99 Regularization_rate = 0.0001 Training_st EPS = 55000 Moving_average_decay = 0.99 Model_save_path = "lenet5_model/" # There are Lenet5_model subfolders in the current directory model_name = "LeNet5_ Model "# 2. Define training Process Def train (mnist): # define output as 4-d matrix placeholder x = Tf.placeholder (Tf.float32, [Batch_size, Lene T5_infernece. Image_size, Lenet5_infernece. Image_size, Lenet5_infernece. Num_channels], name= ' x-input ') Y_ = Tf.placeholder (Tf.float32, [None, Lenet5_infernece. Output_node], name= ' y-input ') Regularizer = Tf.contrib.layers.l2_regularizer (regularization_rate) y = lenet5_iNfernece.inference (x, True, regularizer) Global_step = tf.
    Variable (0, Trainable=false) # defines loss function, learning rate, sliding average operation, and training process. Variable_averages = Tf.train.ExponentialMovingAverage (Moving_average_decay, global_step) Variables_averages_op = Variable_averages.apply (Tf.trainable_variables ()) cross_entropy = Tf.nn.sparse_softmax_cross_entropy_with_logits ( Logits=y, Labels=tf.argmax (Y_, 1)) Cross_entropy_mean = Tf.reduce_mean (cross_entropy) loss = Cross_entropy_mean +
        Tf.add_n (Tf.get_collection (' losses ')) Learning_rate = Tf.train.exponential_decay (Learning_rate_base,  Global_step, Mnist.train.num_examples/batch_size, Learning_rate_decay, staircase=true) Train_step = Tf.train.GradientDescentOptimizer (learning_rate). Minimize (loss, global_step=global_step) with Tf.control_
    Dependencies ([Train_step, Variables_averages_op]): Train_op = Tf.no_op (name= ' train ') # initializes the TensorFlow persistence class. Saver = Tf.train.Saver () with TF. SesSion () as Sess:tf.global_variables_initializer (). Run () for I in Range (training_steps): XS, YS
                = Mnist.train.next_batch (batch_size) Reshaped_xs = Np.reshape (XS, batch_size, Lenet5_infernece. Image_size, Lenet5_infernece. Image_size, Lenet5_infernece. Num_channels) _, loss_value, step = Sess.run ([Train_op, Loss, Global_step], Feed_dict={x:reshaped_xs, Y_: Ys If I% 1000 = 0:print ("After%d training step (s), Loss on training batch is%g." , Loss_value)) Saver.save (Sess, Os.path.join (Model_save_path, Model_name), Global_step=global_step) # 3. Main program Entry def main (argv=none): Mnist = Input_data.read_data_sets ("... /.. /..
/datasets/mnist_data ", One_hot=true) train (mnist) if __name__ = = ' __main__ ': Main () ' ...
After 49001 training step (s), loss to training batch is 0.589334. After 50001 training step (s), loss onTraining batch is 0.601423.
After 51001 training step (s), loss to training batch is 0.639142.
After 52001 training step (s), loss to training batch is 0.610477.
After 53001 training step (s), loss to training batch is 0.58531.
After 54001 training step (s), loss to training batch is 0.626083. '''
LENET5 convolution neural network test
# TensorFlow actual combat Google Depth Learning Framework 06 image recognition and convolution neural network # WIN10 Tensorflow1.0.1 python3.5.3 # CUDA v8.0 cudnn-8.0-windows10-x64-v5.1 # filename:LeNet5_eval.py # Test Import time import math import tensorflow as TF import NumPy as NP from Tensorflow.examples Tutorials.mnist Import input_data Import lenet5_infernece import lenet5_train def evaluate (mnist): with TF. Graph (). As_default () as G: # defines placeholder x = Tf.placeholder for Output to 4-D matrices (Tf.float32, [mnist.test . Num_examples, #LeNet5_train. Batch_size, Lenet5_infernece. Image_size, Lenet5_infernece. Image_size, Lenet5_infernece. Num_channels], name= ' x-input ') Y_ = Tf.placeholder (Tf.float32, [None, Lenet5_infernece. Output_node], name= ' y-input ') Validate_feed = {x:mnist.test.images, y_: mnist.test.labels} global_step = Tf. Variable (0, trainable=false) Regularizer = Tf.contrib.layers.l2_regularizer (lenet5_train. Regularization_rate) y = lenet5_infernece.inference (x, False, regularizer) correct_prediction = Tf.equal (Tf.argmax (Y, 1  ), Tf.argmax (Y_, 1)) accuracy = Tf.reduce_mean (Tf.cast (correct_prediction, tf.float32)) variable_averages = Tf.train.ExponentialMovingAverage (Lenet5_train. Moving_average_decay) Variables_to_restore = Variable_averages.variables_to_restore () Saver = Tf.train.Sav ER (variables_to_restore) #n = Math.ceil (mnist.test.num_examples/lenet5_train.
            batch_size) n = Math.ceil (mnist.test.num_examples/mnist.test.num_examples) for I in Range (n): With TF. Session () as Sess:ckpt = Tf.train.get_checkpoint_state (Lenet5_train). Model_save_path) if Ckpt and Ckpt.model_checkpoint_path:saver.restore (Sess, Ckpt.mode
                    L_checkpoint_path) Global_step = Ckpt.model_checkpoint_path.split ('/') [ -1].split ('-') [-1] XS, ys = Mnist.test.next_baTCH (mnist.test.num_examples) #xs, ys = Mnist.test.next_batch (Lenet5_train.
                        batch_size) Reshaped_xs = Np.reshape (XS, (Mnist.test.num_examples, #LeNet5_train. Batch_size, Lenet5_infernece. Image_size, Lenet5_infernece. Image_size, Lenet5_infernece.
                    num_channels)) Accuracy_score = Sess.run (accuracy, FEED_DICT={X:RESHAPED_XS, y_:ys})
                    Print ("After%s training step (s), test accuracy =%g"% (Global_step, Accuracy_score)) Else: Print (' No checkpoint file found ') return # Main program def main (argv=none): Mnist = Input_data. Read_data_sets (".. /.. /.. /datasets/mnist_data ", one_hot=true) evaluate (mnist) If __name__ = ' __main__ ': Main ()" "after 54001 training s TEP (s), test accuracy = 0.9915 ""

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.