Import TensorFlow as TF from tensorflow.examples.tutorials.mnist import Input_data def add_layer (inputs, in_size, Out_siz E, activation_function=none): Weights = tf. Variable (Tf.random_normal ([In_size, out_size]) biases = tf. Variable (Tf.zeros ([1, out_size]) + 0.1) Wx_plus_b = Tf.nn.softmax (Tf.matmul (inputs, Weights) + biases) if Activati On_function is none:outputs = wx_plus_b else:outputs = activation_function (wx_plus_b) return out Puts Mnist=input_data.read_data_sets ("Mnist_data", one_hot=true) batch_size=100 n_batch=mnist.train.num_examples// Batch_size X=tf.placeholder (tf.float32,[none,784]) Y=tf.placeholder (tf.float32,[none,10)) L1=add_layer (x,784,10, Activation_function=tf.nn.relu) Prediction=add_layer (l1,10,10,activation_function=none) #二次迭代 Loss=tf.reduce_mean (Tf.square (y-prediction)) #梯度下降 train_step = Tf.train.GradientDescentOptimizer (1). Minimize (loss) #初始化 init = Tf.global _variables_initializer () #结果存放在一个布尔列表中 CORRECT_PREDICTION=TF. Equal (Tf.argmax (y,1), Tf.argmax (prediction,1)) Accuracy=tf.reduce_mean (Tf.cast (Correct_prediction,tf.float32)) With TF. Session () as Sess:sess.run (init) to epoch in range (21000): For batch in range (N_batch): BATC
H_xs,batch_ys=mnist.train.next_batch (batch_size) Sess.run (Train_step,feed_dict={x:batch_xs,y:batch_ys}) Acc=sess.run (Accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}) print ("Iter" +str (Epoch) + ", Testing Acc
Uracy "+STR (ACC))