Import TensorFlow as Tfimport NumPy as Npimport matplotlib.pyplot as Plt#import MNIST datafrom Tensorflow.examples.tutoria Ls.mnist Import input_datamnist=input_data.read_data_sets ("/niu/mnist_data/", One_hot=false) # Parameterlearning_ Rate = 0.01training_epochs = Batch_size = 256display_step = 1examples_to_show = 10# Network parametersn_input = 784 #tf Graph input (only pictures) X=tf.placeholder ("float", [none,n_input]) # hidden Layer settingsn_hidden_1 = N_hidden_2 = 1 28
weights = {' encoder_h1 ': TF. Variable (Tf.random_normal ([n_input,n_hidden_1])), ' Encoder_h2 ': TF. Variable (Tf.random_normal ([n_hidden_1,n_hidden_2])), ' decoder_h1 ': TF. Variable (Tf.random_normal ([n_hidden_2,n_hidden_1])), ' Decoder_h2 ': TF. Variable (Tf.random_normal ([N_hidden_1, N_input])),}biases = {' encoder_b1 ': TF. Variable (Tf.random_normal ([n_hidden_1])), ' encoder_b2 ': TF. Variable (Tf.random_normal ([n_hidden_2])), ' decoder_b1 ': TF. Variable (Tf.random_normal ([n_hidden_1])), ' decoder_b2 ': TF. Variable (Tf.random_normal ([N_input])),} #定义encoderdef Encoder (x): # encoder Hidden layer with sigmoid activation #1 layer_1 = Tf.nn.sigmoid (Tf.add (Tf.matmul (x, weights[' encoder_h1 ']), biases[' Encoder_ B1 ']) # Decoder Hidden layer with sigmoid activation #2 layer_2 = tf.nn.sigmoid (Tf.add (Tf.matmul (layer_1, weights[') Encoder_h2 ']), biases[' encoder_b2 ')) return layer_2 #定义decoderdef decoder (x): # Encoder Hidden layer with sigmoid activation #1 layer_1 = tf.nn.sigmoid (Tf.add (Tf.matmul (x, weights[' Decoder_ H1 ']), biases[' decoder_b1 ')) # decoder Hidden layer with sigmoid activation #2 LA yer_2 = Tf.nn.sigmoid (Tf.add (Tf.matmul (layer_1, weights[' decoder_h2 ']), biases[' Decoder_ B2 '])) return layer_2# Construct modelencoder_op = encoder (X) # featuresdecoder_op = Decoder (encoder_op ) # 784 features# predictiony_pred = decoder_op # Targets (Labels) is the input data.y_true = X # Defin e loss and optimizer, minimize the squared errorcost = Tf.reduce_mean (Tf.pow (y_true-y_pred, 2)) optimizer = Tf.train.Adam Optimizer (learning_rate). Minimize (Cost) # Launch the Graphwith TF. Session () as Sess:
Sess.run (Tf.initialize_all_variables ()) total_batch = Int (mnist.train.num_examples/batch_size) # Training Cycle For epoch in range (Training_epochs): # Loop through all batches for I in Range (Total_batch): BATC H_xs, Batch_ys = Mnist.train.next_batch (batch_size) # max (x) = 1, min (x) = 0 # Run optimization op (backprop) and cost op (to get loss value) _, c = Sess.run ([Optimizer, Cost], feed_dict={x:batch_xs}) # Display lo GS per epoch step if epoch% Display_step = = 0:print ("Epoch:", '%04d '% (epoch+1), "Co St= "," {:. 9f} ". Format (c)) print (" Optimization finished! ") # # Applying encode and decode over test set encode_decode = Sess.run (y_pred, feed_dict={x:mnist.test.images[: Examples_to_show]}) # Compare original images with their reconstructions f, a = Plt.subplots (2, Figsize= (10, 2)) Plt.title (' Matplotlib,ae--jason Niu ') for I in Range (examples_to_show): A[0][i].imshow (Np.reshape (Mnist.test.images[i], ()) A[1][i].imshow (Np.reshape (Encode_decode[i), (28, 28) )) Plt.show ()
The ae:ae of TF realizes the real value comparison of the TF self-brought data set compared with AE first encoder the accurate comparison of decoder predictive numbers-jason NIU