1vmware install Ubuntu (oneself follow other tutorial easy to install Ubuntu, then install TensorFlow always error), follow this step follow TensorFlow step successful click Open Link 2 ubuntu install TensorFlow (CPU) Use PIP to click Open Link 3minist data set test, but will error, follow the online method changed after correct run click Open Link a softmax modified code (placed in the desktop test.py file)
From tensorflow.examples.tutorials.mnist import input_data import tensorflow as TF # import data Mnist = Input_data.rea D_data_sets (' Mnist_data ', one_hot=true) # Create the Model X = Tf.placeholder (Tf.float32, [None, 784]) W = tf. Variable (Tf.zeros ([784, ten])) B = tf. Variable (Tf.zeros ([ten])) y = Tf.matmul (x, W) + B # Define loss and Optimizer y_ = Tf.placeholder (Tf.float32, [None, 10] ) Cross_entropy = Tf.reduce_mean (Tf.nn.softmax_cross_entropy_with_logits (Labels=y_, logits=y)) Train_step = Tf.tra In. Gradientdescentoptimizer (0.5). Minimize (cross_entropy) Sess = tf. InteractiveSession () Tf.initialize_all_variables (). Run () # Train for _ in range: Batch_xs, Batch_ys = mnist.tr Ain.next_batch (Sess.run) (Train_step, Feed_dict={x:batch_xs, Y_: Batch_ys}) # Test trained model CORRECT_PREDICTI On = Tf.equal (Tf.argmax (y, 1), Tf.argmax (Y_, 1)) accuracy = Tf.reduce_mean (Tf.cast (correct_prediction, tf.float32)) Prin T (Sess.run (accuracy, Feed_dict={x:mnist.test.imaGES, Y_: Mnist.test.labels}))
Run result b convolutional network
modified code (named testcon.py)
From tensorflow.examples.tutorials.mnist import input_data import tensorflow as TF # import data Mnist = Input_data.read_d Ata_sets (' Mnist_data ', one_hot=true) x = Tf.placeholder (Tf.float32, [None, 784]) Y_ = Tf.placeholder (Tf.float32, [None, 1 0]) Sess = tf. InteractiveSession () def weight_variable (shape): initial = Tf.truncated_normal (shape, stddev=0.1) return TF. Variable (initial) def bias_variable (shape): initial = Tf.constant (0.1, Shape=shape) return TF. Variable (initial) def conv2d (X, W): Return tf.nn.conv2d (x, W, strides=[1, 1, 1, 1], padding= ' same ') def max_pool_2x2 (x) : Return Tf.nn.max_pool (x, Ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding= ' same ') W_conv1 = W Eight_variable ([5, 5, 1, +]) B_conv1 = bias_variable ([+]) X_image = Tf.reshape (x, [ -1,28,28,1]) h_conv1 = Tf.nn.relu (con V2d (X_image, W_CONV1) + b_conv1) H_pool1 = max_pool_2x2 (h_conv1) w_conv2 = Weight_variable ([5, 5, (+)]) B_conv2 = Bias_ Variable ([+]) H_conv2 = Tf.nn.relu (conv2d (H_pool1, W_conv2) + b_conv2) H_pool2 = max_pool_2x2 (h_conv2) w_fc1 = weight_variable ([7 * 7 *, 1024x768]) B_FC1 = Bias_vari
Able ([1024x768]) H_pool2_flat = Tf.reshape (H_pool2, [-1, 7*7*64]) H_fc1 = Tf.nn.relu (Tf.matmul (H_pool2_flat, W_FC1) + b_fc1) Keep_prob = Tf.placeholder (tf.float32) H_fc1_drop = Tf.nn.dropout (H_FC1, keep_prob) W_FC2 = Weight_variable ([1024x768, ten]) b _FC2 = Bias_variable ([ten]) Y_conv = Tf.matmul (H_fc1_drop, W_FC2) + b_fc2 cross_entropy = Tf.reduce_mean (Tf.nn.softma X_cross_entropy_with_logits (Labels=y_, logits=y_conv)) Train_step = Tf.train.AdamOptimizer (1e-4). Minimize (Cross_ Entropy) Correct_prediction = Tf.equal (Tf.argmax (y_conv,1), Tf.argmax (y_,1)) accuracy = Tf.reduce_mean (Tf.cast ( Correct_prediction, Tf.float32)) Sess.run (Tf.initialize_all_variables ()) for I in Range (20000): Batch = Mnist.train.nex T_batch () if i%100 = = 0:train_accuracy = Accuracy.eval (feed_dict={x:batch[0], Y_: batch[1], keep_prob:1 .0}) Print ("Step%d, training accuracy%g"%(i, train_accuracy)) Train_step.run (Feed_dict={x:batch[0], Y_: batch[1], keep_prob:0.5}) print ("Test accuracy%g"%accuracy.eval (feed_dict ={x:mnist.test.images, Y_: Mnist.test.labels, keep_prob:1.0}))
Run results