Deeplearning.ai the first week of class fourth, the TensorFlow realization of convolutional neural network

Source: Internet
Author: User
Tags eval scalar

1. Loading requires modules and functions:

Import Math
Import numpy as NP
import h5py
import matplotlib.pyplot as Plt
import scipy from
PIL impo RT Image from
scipy import ndimage
import TensorFlow as TF from
tensorflow.python.framework import Ops
From cnn_utils import *

%matplotlib inline
np.random.seed (1)

2. Loading data and processing:

# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, y_test_orig, classes = Load_dataset ()

x_ Train = x_train_orig/255.
X_test = x_test_orig/255.
Y_train = Convert_to_one_hot (Y_train_orig, 6). T
y_test = Convert_to_one_hot (Y_test_orig, 6). T
Print ("number of training examples =" + str (x_train.shape[0]))
print ("number of test examples =" + str (x_t Est.shape[0])
print ("X_train shape:" + str (x_train.shape))
print ("Y_train shape:" + str (y_train.shape)) C20/>print ("x_test shape:" + str (x_test.shape))
print ("y_test shape:" + str (y_test.shape))
conv_layers = {}

Second, the model definition begins:
1. Define Place_holder creation function:

# graded Function:create_placeholders

def create_placeholders (N_h0, N_w0, N_c0, n_y): "" "
    creates the Placeholders for the TensorFlow session.

    Arguments:
    n_h0-scalar, height of an input image
    n_w0-scalar, width of an input image
    n_c0-scalar, nu Mber
    of channels of the input n_y-scalar, number of classes

    Returns:
    X--placeholder for the data input, O f shape [None, N_h0, N_w0, n_c0] and Dtype "float"
    Y--placeholder for the input labels, of shape [None, n_y] and DT Ype "float" "" "

    # # # START CODE here # # # (≈2 lines)
    X = Tf.placeholder (tf.float32,[none,n_h0,n_w0,n_c0]) C14/>y = Tf.placeholder (tf.float32,[none,n_y])
    # # # END CODE here # #

    return X, Y

2. Define the initialization function:

# graded Function:initialize_parameters

def initialize_parameters (): "" "
    initializes weight parameters To build a neural network with TensorFlow. The shapes are:
                        W1: [4, 4, 3, 8]
                        W2: [2, 2, 8,]
    Returns:
    Parameters--A dictionary of tensors Contai Ning W1, W2
    "" "

    tf.set_random_seed (1)                              # So this your" random "numbers match ours

    # # START CODE here # # # (approx. 2 lines of code)
    W1 = tf.get_variable (' W1 ', [4,4,3,8],initializer=tf.contrib.layers.xavier_initializer (seed=0))
    W2 = tf.get_ Variable (' W2 ', [2,2,8,16],initializer=tf.contrib.layers.xavier_initializer (Seed=0)]
    # # # END CODE here # # #

    parameters = {"W1": W1,
                  "W2": W2}

    return parameters

3, define the forward propagation function (here to the full join layer, and no activation function)

# graded Function:forward_propagation def forward_propagation (X, parameters): "" "Implements The Forward Propaga conv2d tion for the model:conv2d, RELU, Maxpool, RELU, Maxpool nected arguments:x--input dataset placeholder, of shape (input size, number of examples) parameters--PYT

    Hon Dictionary containing your parameters "W1", "W2" the shapes is given in initialize_parameters RETURNS:Z3-The output of the last LINEAR unit "" "# Retrieve the parameters from the dictionary" Paramete rs "W1 = parameters[' W1 '] W2 = parameters[' W2 ') # # # START CODE here # # # Conv2d:stride of 1, padding ' Same ' Z1 = tf.nn.conv2d (x,w1,strides=[1,1,1,1],padding= ' same ') # RELU A1 = Tf.nn.relu (Z1) # Maxpool:windo W 8x8, Sride 8, padding ' same ' P1 = Tf.nn.max_pool (a1,ksize=[1,8,8,1],strides=[1,8,8,1],padding= ' same ') # conv2d: Filters W2, Stride 1, PADding ' same ' Z2 = tf.nn.conv2d (p1,w2,strides=[1,1,1,1],padding= ' same ') # RELU A2 = Tf.nn.relu (Z2) # Maxpoo  L:window 4x4, Stride 4, padding ' same ' P2 = Tf.nn.max_pool (a2,ksize=[1,4,4,1],strides=[1,4,4,1],padding= ' same ') #  FLATTEN P2 = Tf.contrib.layers.flatten (P2) # fully-connected without non-linear activation function (not on call
    Softmax). # 6 neurons in output layer. Hint:one of the arguments should be "activation_fn=none" Z3 = tf.contrib.layers.fully_connected (p2,6,activation_fn=n One) # # # END CODE here # # return Z3

4, define the cost function calculation:

# graded Function:compute_cost 

def compute_cost (Z3, Y): "" "computes the cost

    Arguments:
    Z3-- Output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
    Y--"true" labels ve  ctor placeholder, same shape as Z3

    Returns: Cost-tensor of the cost
    function "" "

    # # # START CODE here # # # (1 line of code)
    Cost = Tf.reduce_mean (Tf.nn.softmax_cross_entropy_with_logits (logits=z3,labels=y))
    # # # END CODE here # # #

    return cost

5, model definition: Create the model should follow the following steps:
Create placeholders
Initialize parameters
Forward Propagate
Compute the cost
Create an Optimizer

# graded Function:model def model (X_train, Y_train, X_test, y_test, learning_rate = 0.009, Num_epochs = +, M Inibatch_size = Print_cost = True): "" "Implements a three-layer convnet in tensorflow:conv2d RELU Maxpool, conv2d, RELU, Maxpool, FLATTEN fullyconnected--Arguments:x_train Ning set, of shape (none, 3) Y_train--Test set, of shape (None, n_y = 6) x_test--training set, of Shap E (None, 6, 3) y_test--Test set, of shape (none, n_y =) learning_rate--learning rate of the Optimizatio n Num_epochs--Number of epochs of the optimization loop Minibatch_size--size of a minibatch print_cost-- True to print the cost every epochs returns:train_accuracy – real number, accuracy on the train set (X_trai N) test_accuracy-real number, testing accuracy on the test set (x_test) parameters – parameters learnt by the Model. They can and beUsed to predict. "" "Ops.reset_default_graph () # to being able to rerun the model without overwriting TF variable                                          s Tf.set_random_seed (1) # to keep results consistent (TensorFlow seed) seed = 3             
    # to keep results consistent (NumPy Seed) (M, N_h0, n_w0, n_c0) = X_train.shape n_y = y_train.shape[1] costs = [] # T o Keep track of the cost # Create placeholders of the correct shape # # # START CODE here # # # (1 line) X, Y = C Reate_placeholders (n_h0,n_w0,n_c0,n_y) # # # END code here # # # Initialize Parameters # # # START code here # # # (1 line) parameters = Initialize_parameters () # # # # END CODE here # # # # Forward Propagation:build the Forward Propagation in the TensorFlow graph # # # START CODE here # # # (1 line) Z3 = Forward_propagation (x,parameters) # # # END code here # # $ function:add cost function to TensorFlow Graph # # # START CODE here # # # (1 line) cost = Compute_cost (Z3,y) # # # # END CODE here # # # Backpropagation:define the TensorFlow optimizer.
    Use a adamoptimizer that minimizes the cost. # # # START code here # # # (1 line) optimizer = Tf.train.AdamOptimizer (learning_rate). Minimize (Cost) # # # END CODE Her E # # # # Initialize All the variables globally init = Tf.global_variables_initializer () # Start the session T o Compute the TensorFlow graph with TF. Session () as Sess: # Run the Initialization sess.run (INIT) # do the Training loop for EP
            och in range (num_epochs): minibatch_cost = 0.  num_minibatches = Int (m/minibatch_size) # Number of minibatches of size minibatch_size in the train set seed = seed + 1 minibatches = Random_mini_batches (X_train, Y_train, minibatch_size, seed) for MinibaTch in Minibatches: # Select a Minibatch (minibatch_x, minibatch_y) = Minibatch
                # Important:the Line, runs the graph on a minibatch.
                # Run The session to execute the optimizer and the cost, the feedict should contain a minibatch for (x, y). # # # START CODE here # # # (1 line) _, Temp_cost = Sess.run ([optimizer,cost],feed_dict={x:minibatch_x,y:mi


            Nibatch_y}) # # # END CODE here # # # Minibatch_cost + = Temp_cost/num_minibatches  # Print The cost every epoch if print_cost = = True and epoch% 5 = = 0:print ("Cost after Epoch%i:%f "% (epoch, minibatch_cost)) if print_cost = = True and epoch% 1 = = 0:costs.appe nd (minibatch_cost) # Plot the cost Plt.plot (Np.squeeze (costs)) Plt.ylabel (' cost ') PLT.XL Abel (' iterations (per tens) ') Plt.title ("Learning rate ="+ str (learning_rate)) plt.show () # Calculate the correct predictions predict_op = Tf.argmax (Z3, 1
        ) Correct_prediction = Tf.equal (Predict_op, Tf.argmax (Y, 1)) # Calculate accuracy on the test set accuracy = Tf.reduce_mean (Tf.cast (correct_prediction, "float")) print (accuracy) train_accuracy = accuracy . Eval ({x:x_train, y:y_train}) Test_accuracy = Accuracy.eval ({x:x_test, y:y_test}) print ("Train Accurac Y: ", train_accuracy) print (" Test accuracy: ", test_accuracy) return train_accuracy, Test_accuracy, Paramet ERs

6. Model Training:

_, _, Parameters = Model (X_train, Y_train, X_test, Y_test)

7. The results are as follows:

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.