TensorFlow uses CNN to analyze mnist handwritten digital data sets

Source: Internet
Author: User
Import TensorFlow as TF import numpy as NP import OS os.environ[' tf_cpp_min_log_level '] = ' 2 ' from Tensorflow.examples.tut Orials.mnist import Input_data mnist = Input_data.read_data_sets ("mnist_data/", One_hot=true) TrX, TrY, TeX, TeY = mnist. Train.images, Mnist.train.labels, Mnist.test.images, Mnist.test.labels #把上述trX和teX的形状变为 [ -1,28,28,1],-1 indicates that the number of input pictures is not considered
, 28x28 is a picture of the long and wide pixels, # 1 is the number of channels (channel), because the mnist picture is black and white, so the channel is 1, if the RGB color image, the channel is 3. TrX = Trx.reshape ( -1, 1) # 28x28x1 input img teX = Tex.reshape ( -1,, 1) # 28x28x1 input img X = TF.PLACEH
Older ("float", [None, 1]) Y = Tf.placeholder ("float", [None,]) #初始化权重与定义网络结构. # Here, we're going to build a convolution neural network def init_weights (shape) with 3 convolution layers and 3 pool layers, followed by 1 full join layers and one output layer: return TF.  Variable (Tf.random_normal (Shape, stddev=0.01)) W = init_weights ([3, 3, 1, M]) # patch size is 3x3, input dimension is 1, output dimension is W2 = Init_weights ([3, 3,,]) # patch size is 3x3, input dimension is 32, output dimension is W3 = Init_weights ([3, 3,, 128]) # Patch Big Small for 3x3, input dimension of 64, output dimension is 128
W4 = Init_weights ([128 * 4 * 4, 625]) # Fully connected layer, input dimension is 128x4x4, the output data of the upper layer is transformed into one-dimensional, the output dimension is 625 w_o = init_weights ([625, 10]) # output layer, the input dimension is 625, the output dimension is 10, representing the 10 Class (labels) # Neural network model constructor, passing in the following parameters # X: Input data # W: weight of each layer # P_keep_conv,p_keep_hidden:dropout To preserve the neuron ratio def model (X, W, W2, W3, W4, W_o, P_keep_conv, P_keep_hidden): # The first set of convolution layer and pool layer, and finally dropout some neurons l1a = TF.NN.R  Elu (tf.nn.conv2d (X, W, strides=[1, 1, 1, 1], padding= ' SAME ')) # l1a shape= (?, M, m) L1 = Tf.nn.max_pool (l1a, Ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding= ' SAME ') # L1 shape= (?,,) L1 = tf.nn.dropout (L1, P_kee
    P_CONV) # Second set of convolution layer and pool layer, finally dropout some neurons l2a = Tf.nn.relu (tf.nn.conv2d (L1, W2, Strides=[1, 1, 1, 1], padding= ' SAME '))
    # l2a shape=, L2 = Tf.nn.max_pool (l2a, Ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding= ' SAME ') # L2 Shape= (?, 7, 7,) L2 = Tf.nn.dropout (L2, P_keep_conv) # The third set of convolution layer and pool layer, and finally dropout some neurons l3a = Tf.nn.relu (TF. nn.conv2d (L2, W3, StrideS=[1, 1, 1, 1], padding= ' SAME ') # l3a shape= (?, 7, 7, 128) L3 = Tf.nn.max_pool (l3a, Ksize=[1, 2, 2, 1], strides=[  1, 2, 2, 1], padding= ' SAME ') # L3 shape= (?, 4, 4, 128) L3 = Tf.reshape (L3, [-1, W4.get_shape (). As_list () [0]]) Reshape to (?, 2048) L3 = Tf.nn.dropout (L3, P_keep_conv) # fully connected layer, and finally dropout some neurons L4 = Tf.nn.relu (Tf.matmul (L3, W4) L4 = Tf.nn.dropout (L4, P_keep_hidden) # output Layer Pyx = Tf.matmul (L4, W_o) return Pyx #返回预测值 #我们定义dropout Placeholder--keep_conv, which indicates how much of the neuron is preserved in the first layer. Generate network model, get predictive value P_keep_conv = Tf.placeholder ("float") P_keep_hidden = Tf.placeholder ("float") py_x = Model (x, W, W2, W3, W4, W_o, P_keep_conv, P_keep_hidden) #得到预测值 #定义损失函数, here we still use the tf.nn.softmax_cross_entropy_with_logits to compare the difference between predicted and real values, and do mean processing; # define the operation of the Training (TRAIN_OP), using the Rmsprop algorithm to achieve the optimizer Tf.train.RMSPropOptimizer, the learning rate is 0.001, the attenuation value of 0.9, so that the loss of the smallest; # defines the operation of the forecast (predict_ OP) cost = Tf.reduce_mean (Tf.nn softmax_cross_entropy_with_logits (logits=py_x, labels=y)) Train_op = Tf.train.RMSPropOptimizer (0001, 0.9). Minimize (cost) Predict_op = Tf.argmax (py_x, 1) #定义训练时的批次大小和评估时的批次大小 batch_size = 128 test_size = 256 #在一个会话中启动图 , begin training and evaluation # Launch the graph in a session with TF. Session () as Sess: # Your need to initialize all variables TF. 
                             Global_variables_initializer (). Run () for I in Range (m): Training_batch = Zip (range (0, Len (trX), batch_size),
            Range (Batch_size, Len (TrX) +1, batch_size)) for start, end in Training_batch: Sess.run (Train_op, Feed_dict={x:trx[start:end], y:try[start:end], P_keep_ conv:0.8, p_keep_hidden:0.5}) test_indices = Np.arange (len (TeX)) # get A test Batch np.random.shuffle ( test_indices) test_indices = test_indices[0:test_size] Print (i, Np.mean (Np.argmax (tey[test_indices), axis
                                                         =1) = = Sess.run (Predict_op, feed_dict={x:tex[test_indices], P_keep_conv:1.0, p_keep_hidden:1.0}))
 

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.