TensorFlow realizes wgan-gp mnist picture Generation __tensorflow

Source: Internet
Author: User
Tags generator

Generate the Fight network Gan currently has a very good application in image generation and confrontation training, this article aims to do a simple tf wgan-gp mnist generation Tutorial, the code used is very simple, and we hope to learn together. The code is as follows:
The use of the environment:
TensorFlow 1.2.0
GPU acceleration, the CPU is also OK, is very slow, you can change the batchsize small, with a good CPU training some, and by the way to create image code department to change, My batchsize64,save_images parameter is [8,8], if batchsize=16, change to [4,4]

#coding: Utf-8 import OS import numpy as NP import Scipy.misc import TensorFlow as TF from tensorflow.examples.tutorials.mn
    IST import input_data #as mnist_data def conv2d (name, Tensor,ksize, Out_dim, stddev=0.01, stride=2, padding= ' SAME '): With Tf.variable_scope (name): w = tf.get_variable (' W ', [Ksize, Ksize, Tensor.get_shape () [ -1],out_dim], DTYPE=TF.FL Oat32, Initializer=tf.random_normal_initializer (stddev=stddev)) var = tf.nn.conv2d (Ten Sor,w,[1,stride, stride,1],padding=padding) b = tf.get_variable (' B ', [Out_dim], ' float32 ', Initializer=tf.constant_ Initializer (0.01)) return Tf.nn.bias_add (Var, b) def deconv2d (name, tensor, Ksize, Outshape, stddev=0.01, stride= 2, padding= ' SAME '): With Tf.variable_scope (name): w = tf.get_variable (' W ', [Ksize, Ksize, outshape[-1], tensor
        . Get_shape () [-1]], Dtype=tf.float32, Initializer=tf.random_normal_initializer (Stddev=stddev)) var = tf.nn.conv2d_transpose (Tensor, W, Outshape, strides=[1, Stride, stride, 1], padding=padding) b = tf.get_variable (' B ', [outsh APE[-1]], ' float32 ', Initializer=tf.constant_initializer (0.01)) return Tf.nn.bias_add (Var, b) def fully_connected (Name,value, Output_shape): With Tf.variable_scope (name, reuse=none) as Scope:shape = Value.get_shape (). As_li St () W = tf.get_variable (' W ', [shape[1], Output_shape], Dtype=tf.float32, Init Ializer=tf.random_normal_initializer (stddev=0.01)) b = tf.get_variable (' B ', [Output_shape], Dtype=tf.float32, Init Ializer=tf.constant_initializer (0.0)) return Tf.matmul (value, W) + b def relu (name, tensor): Return tf.nn.re 
Lu (tensor, name) def lrelu (Name,x, leak=0.2): Return Tf.maximum (x, Leak * x, name=name) DEPTH = Output_size = 28 Batch_size = discriminator def (name,inputs,reuse): With Tf.variable_scope (name, reuse=reuse): output = tf. reshape (inputs, [-1, 28, 1]) output1 = conv2d (' d_conv_1 ', output, ksize=5, out_dim=depth) output2 = Lrelu (' d_lrelu_1 ', output1

        ) Output3 = conv2d (' d_conv_2 ', Output2, ksize=5, out_dim=2*depth) Output4 = Lrelu (' d_lrelu_2 ', OUTPUT3)

        OUTPUT5 = conv2d (' D_conv_3 ', Output4, ksize=5, out_dim=4*depth) output6 = Lrelu (' d_lrelu_3 ', OUTPUT5)

        # output7 = conv2d (' D_conv_4 ', Output6, Ksize=5, out_dim=8*depth) # output8 = Lrelu (' d_lrelu_4 ', output7) Chanel = Output6.get_shape (). As_list () Output9 = Tf.reshape (OUTPUT6, [Batch_size, Chanel[1]*chanel[2]*chanel
    [3]] output0 = fully_connected (' D_fc ', Output9, 1) return output0 def generator (name, Reuse=false):

        With Tf.variable_scope (name, reuse=reuse): noise = Tf.random_normal ([batch_size, 128]) #.astype (' float32 ')
        Noise = Tf.reshape (Noise, [Batch_size, 128], ' noise ') output = fully_connected (' g_fc_1 ', Noise, 2*2*8*depth) Output = Tf.reshape(output, [Batch_size, 2, 2, 8*depth], ' g_conv ') output = deconv2d (' g_deconv_1 ', output, ksize=5, Outshape=[batch_ 

        Size, 4, 4, 4*depth] output = tf.nn.relu (output) output = Tf.reshape (output, [Batch_size, 4, 4, 4*depth]) Output = deconv2d (' g_deconv_2 ', output, ksize=5, Outshape=[batch_size, 7, 7, 2* DEPTH]) output = TF.NN.R  Elu (output) output = deconv2d (' g_deconv_3 ', output, ksize=5, Outshape=[batch_size,,, DEPTH]) output = Tf.nn.relu (output) output = deconv2d (' g_deconv_4 ', output, ksize=5, Outshape=[batch_size, Output_size, Output_ SIZE, 1]) # output = Tf.nn.relu (output) output = tf.nn.sigmoid (output) return Tf.reshape (output,[- 1,784] def save_images (images, size, path): # picture normalized img = (images + 1.0)/2.0 h, W = img.shape[1], Img.sha PE[2] merge_img = Np.zeros ((h * size[0], W * size[1], 3)) for IDX, image in enumerate (images): i = idx% s IZE[1] J = idx//size[1] MERGE_IMG[J * h:j * H + H, I * w:i * w + W,:] = image return Scipy.misc.imsave (path, merge_img) L  Ambda = EPOCH = Def train (): # print OS.GETCWD () with Tf.variable_scope (Tf.get_variable_scope ()): # Real_data = Tf.placeholder (Dtype=tf.float32, Shape=[-1, output_size*output_size*3]) path = OS.GETCWD () da Ta_dir = path + "/train.tfrecords" #准备使用自己的数据集 # print Data_dir ' get data ' z = Tf.placeholder (dtype =tf.float32, shape=[batch_size]) #build placeholder real_data = Tf.placeholder (Tf.float32, shape=[batch_size,7
            ) with Tf.variable_scope (Tf.get_variable_scope ()): Fake_data = Generator (' gen ', Reuse=false) Disc_real = discriminator (' Dis_r ', real_data,reuse=false) Disc_fake = discriminator (' Dis_r ', Fake_data,reu se=true) T_vars = Tf.trainable_variables () D_vars = [var for var in t_vars if ' d_ ' in Var.name] G _vars = [Var for var inT_vars if ' g_ ' in Var.name] ' calculate loss ' Gen_cost = Tf.reduce_mean (disc_fake) disc_cost =-tf.reduc E_mean (Disc_fake) + Tf.reduce_mean (disc_real) alpha = Tf.random_uniform (shape=[batch_size, 1],minval
        =0.,maxval=1.) Differences = Fake_data-real_data Interpolates = real_data + (alpha * differences) gradients = Tf.gradie NTS (discriminator (' Dis_r ', interpolates,reuse=true), [interpolates]) [0] slopes = tf.sqrt (Tf.reduce_sum (Tf.square (GR adients), reduction_indices=[1]) gradient_penalty = Tf.reduce_mean ((slopes-1.) * * 2) Disc_cost + = LAMBD A * Gradient_penalty with Tf.variable_scope (Tf.get_variable_scope (), reuse=none): Gen_train_op = tf.t Rain. Adamoptimizer (learning_rate=1e-4,beta1=0.5,beta2=0.9). Minimize (Gen_cost,var_list=g_vars) disc _train_op = Tf.train.AdamOptimizer (learning_rate=1e-4,beta1=0.5,beta2=0.9). Minimize (Disc_cost,var_lisT=d_vars) Saver = Tf.train.Saver () # os.environ[' cuda_visible_devices '] = str (0) #gpu环境 # Config = TF. Configproto () # config.gpu_options.per_process_gpu_memory_fraction = 0.5# calls 50%gpu resource # sess = tf. InteractiveSession (config=config) sess = tf. InteractiveSession () coord = Tf.train.Coordinator () threads = Tf.train.start_queue_runners (sess=sess, coor D=coord) if not os.path.exists (' img '): Os.mkdir (' img ') init = Tf.global_variables_initializer  () # init = Tf.initialize_all_variables () sess.run (init) mnist = input_data.read_data_sets ("Data",
        one_hot=true) # mnist = mnist_data.read_data_sets ("Data", One_hot=true, Reshape=false, validation_size=0) For epoch in range (1, epoch): IDXS = 1000 to Iters in range (1, IDXS): img, _ =
   Mnist.train.next_batch (batch_size) # IMG2 = Tf.reshape (img, [Batch_size, 784])             For x in range (0,5): _, D_loss = Sess.run ([Disc_train_op, Disc_cost], Feed_dict={real_da Ta:img}) _, G_loss = Sess.run ([Gen_train_op, Gen_cost]) # print "fake_data:%5f disc_real: %5f disc_fake:%5f "% (Tf.reduce_mean (fake_data) #, Tf.reduce_mean (disc_real), tf.red Uce_mean (Disc_fake)) print ("[%4d:%4d/%4d] D_loss:%.8f, G_loss:%.8f" (Epoch, Iters, Idxs, D_loss, G_loss)
                ) with Tf.variable_scope (Tf.get_variable_scope ()): samples = Generator (' gen ', reuse=true)
                Samples = Tf.reshape (samples, shape=[batch_size, 28,28,1]) Samples=sess.run (samples)
                Save_images (samples, [8,8], OS.GETCWD () + '/img/' + ' sample_%d_epoch.png '% (epoch)) if epoch>=39: 
                Checkpoint_path = Os.path.join (OS.GETCWD (), ' my_wgan-gp.ckpt ') Saver.sAve (Sess, Checkpoint_path, Global_step=epoch) print ' ********* model saved ********* ' coord . Request_stop () coord.join (threads) sess.close () If __name__ = ' __main__ ': Train ()

Build results:

First Epoch Build results


39th Epoch Generation Results

Experimental Summary: The first use of Dcgan to do experiments, but how to adjust is not convergent, Dcgan need careful balance generator and the training of the identification of Chengdu, the middle of a number of learning rate, the effect is not ideal, the use of WGAN-GP, the latter is good training more, do not have to worry about the problem of training imbalance, It is very handy to use.

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.