Tensorflow32 "TensorFlow Combat" note -05 TensorFlow realize convolutional neural Network code

Source: Internet
Author: User
01 Simple Convolution network
# "TensorFlow Combat" TensorFlow realize convolution neural network # WIN10 Tensorflow1.0.1 python3.5.3 # CUDA v8.0 cudnn-8.0-windows10-x64-v5.1 # Filen ame:sz05.01.py # Simple convolution network from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf mnist = Input_ Data.read_data_sets ("mnist_data/", one_hot=true) Sess = tf. InteractiveSession () def weight_variable (shape): initial = Tf.truncated_normal (shape, stddev=0.1) return TF. Variable (initial) def bias_variable (shape): initial = Tf.constant (0.1, Shape=shape) return TF. Variable (initial) def conv2d (X, W): Return tf.nn.conv2d (x, W, strides = [1, 1, 1, 1], padding = "SAME") def Max_pool _2X2 (x): Return Tf.nn.max_pool (x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME") x = Tf.placeholder ( Tf.float32, [None, 784]) Y_ = Tf.placeholder (Tf.float32, [None,]) X_image = Tf.reshape (x, [-1, 1]) W_conv1 = W Eight_variable ([5, 5, 1,,]) B_CONV1 = Bias_variable ([o]) h_conv1 = Tf.nn.relu (conv2d (X_image, W_CONV1) + B_cONV1) H_pool1 = max_pool_2x2 (h_conv1) w_conv2 = Weight_variable ([5, 5, ()]) B_conv2 = bias_variable ([+]) H_conv2 = t 
F.nn.relu (conv2d (h_pool1, w_conv2) + b_conv2) H_pool2 = max_pool_2x2 (h_conv2) w_fc1 = weight_variable ([7 * 7 * 64, 1024]) B_FC1 = Bias_variable ([1024]) H_pool2_flat = Tf.reshape (H_pool2, [-1, 7*7*64]) H_fc1 = Tf.nn.relu (Tf.matmul (H_pool2_flat , W_FC1) + b_fc1) Keep_prob = Tf.placeholder (tf.float32) H_fc1_drop = Tf.nn.dropout (H_FC1, keep_prob) W_FC2 = Weight_var Iable ([1024]) B_FC2 = Bias_variable ([ten]) Y_conv = Tf.nn.softmax (Tf.matmul (H_fc1_drop, W_FC2) + b_fc2) cross_entropy = Tf.reduce_mean (-tf.reduce_sum (Y_ * Tf.log (Y_CONV), reduction_indices=[1])) Train_step = Tf.train.AdamOptimizer (1e-4 ). Minimize (cross_entropy) correct_prediction = Tf.equal (Tf.argmax (Y_conv, 1), Tf.argmax (Y_, 1)) accuracy = Tf.reduce_ Mean (Tf.cast (correct_prediction, Tf.float32)) Tf.global_variables_initializer (). Run () to I in range (20000): Batch = Mnist.train.next_batch (x) ifI% 1000 = = 0:train_accuracy = Accuracy.eval (feed_dict = {X:batch[0], Y_: batch[1], keep_prob:1.0}) PRI NT ("Step%d, training accuracy%g"% (I, train_accuracy)) Train_step.run (feed_dict = {X:batch[0], Y_: batch[1], keep_p rob:0.5}) Print ("Test accuracy%g"%accuracy.eval (Feed_dict={x:mnist.test.images, Y_: Mnist.test.labels, keep_prob:1.) 0}) "Step 0, training accuracy 0.04 Step 1000, training accuracy 0.96 step, training accuracy 0.92 ... step 16000 , training accuracy 0.98 step 17000, training accuracy 1 step 18000, training accuracy 1 step 19000, training accuracy 1 t EST accuracy 0.9918 ' "
Cifar10 Convolution Network
# "TensorFlow Combat" TensorFlow realize convolution neural network # WIN10 Tensorflow1.0.1 python3.5.3 # CUDA v8.0 cudnn-8.0-windows10-x64-v5.1 # Filen ame:sz05.02.py # Cifar10 Convolution network # CIFAR10 Cifar10_input can get import tensorflow_models\tutorials\image\cifar10 from Cifar10, Cifar10_input Import TensorFlow as TF import numpy as NP import time Max_steps = 3000 batch_size = 128 # Http://www.cs.to Ronto.edu/~kriz/cifar-10-binary.tar.gz # to extract cifar-10-binary.tar.gz to cifar10_data/cifar10-10-batches-bin data_dir = " Cifar10_data/cifar10-10-batches-bin "def Variable_with_weight_loss" (Shape, StdDev, wl): var = tf. Variable (Tf.truncated_normal (shape, StdDev = StdDev)) If WL is not None:weight_loss = tf.multiply (Tf.nn.l2_lo SS (Var), WL, name= "Weight_loss") tf.add_to_collection (' losses ', Weight_loss) return var Cifar10.maybe_downloa D_and_extract () images_train, Labels_train = Cifar10_input.distorted_inputs (Data_dir=data_dir, Batch_size=batch_ Size) images_test, labels_test = cifar10_input.inputs (Eval_data = True, Data_dir=data_dir, batch_size=batch_size) Image_holder = Tf.placeholder (Tf.float32, [Batch_size, 3]) label _holder = Tf.placeholder (Tf.int32, [batch_size]) weight1 = Variable_with_weight_loss (Shape=[5, 5, 3,), StdDev = 5e-2, WL = 0.0) Kernel1 = tf.nn.conv2d (Image_holder, weight1, [1, 1, 1, 1], padding = ' SAME ') BIAS1 = tf. Variable (tf.constant (0.0, shape = [)) Conv1 = Tf.nn.relu (Tf.nn.bias_add (Kernel1, bias1)) Pool1 = Tf.nn.max_pool (conv1 , ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = ' SAME ') Norm1 = TF.NN.LRN (pool1, 4, bias = 1.0, alpha = 0.001/ 9.0, beta = 0.75) weight2 = variable_with_weight_loss (Shape = [5, 5,, 5e-2, StdDev = wl) 0.0 = Kernel2 Nv2d (Norm1, Weight2, [1, 1, 1, 1], padding = ' SAME ') BIAS2 = tf. Variable (Tf.constant (0.1, shape = [)) Conv2 = Tf.nn.relu (Tf.nn.bias_add (Kernel2, bias2)) Norm2 = Tf.nn.lrn (conv2, 4, Bi As = 1.0, alpha = 0.001/9.0, beta = 0.75) pool2 = Tf.nn.max_pool (norm2, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = ' SAME ') reshape = Tf.reshape (pool2, [Batch_size,-1]) Dim = Reshape.get_shape () [1].value WEIGHT3 = Variable_with_weight_loss (Shape = [Dim, 384], StdDev = 0.04, wl = 0.004) BIAS3 = tf. Variable (Tf.constant (0.1, shape = [384])) Local3 = Tf.nn.relu (Tf.matmul (reshape, WEIGHT3) + bias3) Weight4 = Variable_wit H_weight_loss (shape = [384,), StdDev = 0.04, wl = 0.004) Bias4 = tf. Variable (0.1, shape = [Tf.constant]) Local4 = Tf.nn.relu (Tf.matmul (Local3, WEIGHT4) + bias4) Weight5 = Variable_with _weight_loss (shape = [StdDev,], 1/192.0 = wl = 0.0) BIAS5 = tf.
    Variable (tf.constant (0.0, shape = [ten])) Logits = Tf.add (Tf.matmul (Local4, WEIGHT5), BIAS5) def loss (logits, labels): Labels = tf.cast (labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits (logits = l ogits, labels = labels, name = ' Cross_entropy_per_example ') Cross_entropy_mean = Tf.reduce_mean (cross_entropy, name = ' Cross_entropy ') tf.add_to_collectiOn (' losses ', Cross_entropy_mean) return Tf.add_n (Tf.get_collection (' losses '), name = ' Total_loss ') loss = loss (logit S, label_holder) Train_op = Tf.train.AdamOptimizer (1e-3). Minimize (loss) Top_k_op = Tf.nn.in_top_k (Logits, Label_holder , 1) sess = tf. InteractiveSession () Tf.global_variables_initializer (). Run () tf.train.start_queue_runners () for step in range (max_ Steps): Start_time = Time.time () image_batch, Label_batch = Sess.run ([Images_train, Labels_train]) _, Loss_val UE = Sess.run ([Train_op, loss], feed_dict = {image_holder:image_batch, label_holder:label_batch}) Duration = Time.ti Me ()-start_time if step% = = 0:examples_per_sec = batch_size/duration Sec_per_batch = Float (du  Ration) format_str= (' step%d, loss =%.2f (%.1f examples/sec;%.3f sec/batch) ") Print (format_str% (step, Loss_value, Examples_per_sec, sec_per_batch)) Num_examples = 10000 import Math num_iter = Int (Math.ceil (num_examples/b atch_size)) True_couNT = 0 Total_sample_count = num_iter * Batch_size step = 0 While step < Num_iter:image_batch, Label_batch = SESS.R Un ([images_test, labels_test]) predictions = Sess.run ([top_k_op], feed_dict = {Image_holder:image_batch, label_holder : Label_batch}) True_count + + np.sum (predictions) Step = 1 predictions = true_count/total_sample_count print (' P Recision @ 1 =%.3f '% predictions) ' step 0, loss = 4.67 (6.0 examples/sec; 21.268 sec/batch) step, loss = 3.65 (773 .7 Examples/sec; 0.165 sec/batch) ... step 2970, loss = 0.95 (877.4 examples/sec; 0.146 sec/batch) step 2980, loss = 1.12 (862.6 examples/s Ec 0.148 Sec/batch) Step 2990, loss = 1.06 (967.1 examples/sec; 0.132 sec/batch) precision @ 1 = 0.705 ""

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.