Directly attach the code:
1 ImportNumPy as NP2 Importsklearn.preprocessing as Prep3 ImportTensorFlow as TF4 fromTensorflow.examples.tutorials.mnistImportInput_data5 6 defXavier_init (fan_in,fan_out,constant=1):7Low=-constant*np.sqrt (6.0/(fan_in+fan_out))8High=constant*np.sqrt (6.0/(fan_in+fan_out))9 returnTf.random_uniform (fan_in,fan_out), minval=low,maxval=high,dtype=Tf.float32)Ten One classAdditivegaussiannoiseautoencoder (object): A def __init__(Self,n_input,n_hidden,transfer_function=tf.nn.softplus,optimizer=tf.train.adamoptimizer (), scale=0.1): -self.n_input=N_input -self.n_hidden=N_hidden theSelf.transfer=transfer_function -self.scale=Tf.placeholder (Tf.float32) -self.training_scale= Scale -network_weights=self._initialize_weights () +self.weights=network_weights - +self.x=Tf.placeholder (Tf.float32,[none,self.n_input]) ASelf.hidden=self.transfer (Tf.add (Tf.matmul (Self.x+scale*tf.random_normal (N_input,)), self.weights['W1']), self.weights['B1'])) atSelf.reconstruction=tf.add (Tf.matmul (self.hidden,self.weights['W2']), self.weights['B2']) -Self.cost=0.5*tf.reduce_sum (Tf.pow (Tf.sub (self.reconstruction,self.x), 2.0)) -Self.optimizer=optimizer.minimize (self.cost) - -init=tf.initialize_all_variables () -self.sess=TF. Session () in Self.sess.run (init) - to def_initialize_weights (self): +all_weights=dict () -all_weights['W1']=TF. Variable (Xavier_init (Self.n_input,self.n_hidden)) theall_weights['B1']=TF. Variable (Tf.zeros ([self.n_hidden],dtype=tf.float32)) *all_weights['W2']=TF. Variable (Tf.zeros ([self.n_hidden,self.n_input],dtype=tf.float32)) $all_weights['B2']=TF. Variable (Tf.zeros ([self.n_input],dtype=tf.float32))Panax Notoginseng - returnall_weights the + defPartial_fit (self,x): A theCost,opt=self.sess.run (Self.cost,self.optimizer), feed_dict={Self.x:x,self.scale:self.training_scale}) + - return Cost $ $ defCalc_total_cost (self,x): - returnSelf.sess.run (self.cost,feed_dict={Self.x:x,self.scale:self.training_scale}) - the deftransform (self,x): - returnSelf.sess.run (self.hidden,feed_dict={Self.x:x,self.scale:self.training_scale})Wuyi the defGenerate (self,hidden=None): - ifHidden isNone: WuHidden=np.random.normal (size=self.weights["B1"]) - returnSelf.sess.run (self.reconstruction,feed_dict={Self.hidden:hidden}) About $ defReconstruct (self,x): - returnSelf.sess.run (self.reconstruction,feed_dict={Self.x:x,self.scale:self.training_scale}) - - defgetweights (self): A returnSelf.sess.run (self.weights['W1']) + the defgetbiases (self): - returnSelf.sess.run (self.weights['B1']) $ theMnist=input_data.read_data_sets ('Mnist_data', one_hot=True) the the defStandard_scale (x_train,x_test): thePreprocessor=Prep. Standardscaler (). Fit (X_train) -x_train=preprocessor.transform (X_train) inx_test=preprocessor.transform (x_test) the returnx_train,x_test the About defGet_random_block_from_data (data,batch_size): theStart_index=np.random.randint (0,len (data)-batch_size) the returnData[start_index: (start_index+batch_size)] the +x_train,x_test=Standard_scale (mnist.train.images,mnist.test.images) -n_samples=Int (mnist.train.num_examples) theTraining_epochs=20Bayibatch_size=128 theDiaplay_step=1 theAutoencoder=additivegaussiannoiseautoencoder (N_input=784,n_hidden=200,transfer_function=tf.nn.softplus, Optimizer=tf.train.adamoptimizer (learning_rate=0.001), scale=0.01) - forEpochinchRange (Training_epochs): -avg_cost=0 theTotal_batch=int (n_samples/batch_size) the forIinchRange (Total_batch): thebatch_xs=Get_random_block_from_data (x_train,batch_size) the -cost=Autoencoder.partial_fit (BATCH_XS) theAvg_cost+=cost/n_samples*batch_size the the ifepoch%diaplay_step==0:94 Print("Epoch:",'%04d'% (epoch+1),"cost=","{:. 9f}". Format (avg_cost)) the the Print("Total Cost:"+str (Autoencoder.calc_total_cost (x_test)))
View Code
tesorflow-Automatic encoder (Autoencoder)