Example of a Python neural network

Source: Internet
Author: User

#http://python.jobbole.com/82758/#import NumPy as NP#### sigmoid function#def nonlin (x, deriv=false):#if (Deriv = = True):#return x * (1-x)#return 1/(1 + np.exp (-X))#### Input DataSet#X = Np.array ([[0, 0, 1],#[0, 1, 1],#[1, 0, 1],#[1, 1, 1]])### Output DataSet#y = np.array ([[0, 0, 1, 1]]). T### Seed Random numbers to make calculation## deterministic (just a good practice)#np.random.seed (1)### Initialize weights randomly with mean 0#syn0 = 2 * Np.random.random ((3, 1))-1##For ITER in range (10000):## Forward Propagation#l0 = X#L1 = Nonlin (Np.dot (L0, syn0))### How much did we miss?#l1_error = Y-l1### Multiply how much we missed by the## slope of the sigmoid at the values in L1#L1_delta = l1_error * Nonlin (L1, True)### Update Weights#Syn0 + = Np.dot (l0. T, L1_delta) #反向传播, w = w + f (y) * L1_delta#print ("Output after Training:")#print (L1)ImportNumPy as NPdefNonlin (x, deriv=False):if(Deriv = =True):returnX * (1-x)return1/(1 + np.exp (-x)) X= Np.array ([[0, 0, 1], [0,1, 1],              [1, 0, 1],              [1, 1, 1]]) y=Np.array ([[0], [1],              [1], [0]]) np.random.seed (1)#randomly initialize our weights with mean 0Syn0 = 2 * Np.random.random ((3, 4))-1syn1= 2 * Np.random.random ((4, 1))-1 forJinchRange (60000):    #Feed forward through layers 0, 1, and 2L0 =X L1=Nonlin (Np.dot (L0, syn0)) L2=Nonlin (Np.dot (L1, syn1))#How much did we miss the target value?L2_error = y-L2if(j% 10000) = =0:Print("Error:"+Str (Np.mean (Np.abs (l2_error) ))#direction is the target value?    #were we really sure? If so, don ' t change too much.L2_delta = L2_error * Nonlin (L2, deriv=True)#How much do each L1 value contribute to the L2 error (according to the weights)?L1_error =L2_delta.dot (syn1. T)#direction is the target L1?    #were we really sure? If so, don ' t change too much.L1_delta = L1_error * Nonlin (L1, deriv=True) syn1+=L1. T.dot (L2_delta) syn0+=L0. T.dot (L1_delta)Print("Output after Training:")Print(L2)

#1.#on the nonlinear transformation equation (non-linear#Transformation#function)##sigmoid function (S#curve) used as a activation#function:##1.1#Hyperbolic function (tanh)##1.2#logical functions (Logistic#function)###2.#implementation of a simple neural network algorithmImportNumPy as NPdeftanh (x):returnNp.tanh (x)defTanh_deriv (x):return1.0-np.tanh (x) *Np.tanh (x)defLogistic (x):return1/(1 + np.exp (-x))deflogistic_derivative (x):returnLogistic (x) * (1-Logistic (x))classneuralnetwork:def __init__(self, layers, activation='Tanh'):        """:p Aram Layers:a list containing the number of units in each layer. Should is at least the values:p Aram activation:the activation function to be used. Can be "logistic" or "Tanh""""        ifActivation = ='Logistic': Self.activation=Logistic Self.activation_deriv=logistic_derivativeelifActivation = ='Tanh': Self.activation=Tanh Self.activation_deriv=Tanh_deriv self.weights= []         forIinchRange (1, Len (layers)-1):            #Layers[i-1] The number of nodes for the previous input layer is +1 plus a bias point,            #Layers[i] The number of output nodes for the current layer is +1 plus a bias point,Self.weights.append ((2 * np.random.random ((layers[i-1] + 1, layers[i] + 1))-1) * 0.25) Self.weights.append (2 * Np.random.random ((Layers[i] + 1, layers[i + 1]))-1) * 0.25)    defFit (self, X, y, learning_rate=0.2, epochs=10000): X= np.atleast_2d (X)#determine if the input training set is two-dimensionaltemp = Np.ones ([x.shape[0], x.shape[1] + 1]) temp[:, 0:-1] = X#adding the bias unit to the input layerX =Temp y=Np.array (y) forKinchRange (epochs): I=Np.random.randint (x.shape[0]) a=[X[i]]#Len (self.weights) is the number of output nodes, each output node corresponds to a set of weights is a row in weight self.weights[l]             forLinchRange (len (self.weights)):#going forward network, for each layer                #Computer The node value for each layer (o_i) using activation function                #A[l] is the characteristic value of the input data                Print(A[l])Print(Self.weights[l]) a.append (Self.activation (Np.dot (A[l], self.weights[l] )) error= Y[i]-a[-1]#computer The error at the top layerDeltas = [ERROR * SELF.ACTIVATION_DERIV (a[-1])]#for output layer, ERR calculation (delta is updated error)            #Staring backprobagation             forLinchRange (Len (a)-2, 0,-1):#we need to begin at the second-last layer                #Compute The updated error (I,e, deltas) for each node going from top layer to input layerDeltas.append (Deltas[-1].dot (self.weights[l). T) *Self.activation_deriv (A[l]) deltas.reverse () forIinchRange (len (self.weights)): Layer=np.atleast_2d (a[i]) Delta=np.atleast_2d (Deltas[i]) self.weights[i]+ = Learning_rate *layer. T.dot (Delta)defPredict (self, x): x=np.array (x) Temp= Np.ones (X.shape[0] + 1) Temp[0:-1] =x a=Temp forLinchRange (0, Len (self.weights)): a=self.activation (Np.dot (A, self.weights[l]))returnaPrint("Simple Nonlinear relational data set test (XOR)")#1. Simple nonlinear relational data set test (XOR):##x:y#0 0 0#0 1 1#1 0 1#1 1 0#From neuralnetwork import neuralnetworkImportNumPy as Npnn= Neuralnetwork ([2,2,1],'Tanh') X= Np.array ([[0, 0], [0, 1], [1, 0], [1, 1]]) y= Np.array ([0, 1, 1, 0]) Nn.fit (X, y) forIinch[[0, 0], [0, 1], [1, 0], []]:    Print(i, nn.predict (i))Print("\ n-Handwritten digit recognition")#2. Handwritten digit recognition:##each picture 8x8#identification number: 0,1,2,3,4,5,6,7,8,9ImportNumPy as NP fromSklearn.datasetsImportload_digits fromSklearn.metricsImportConfusion_matrix, Classification_report fromSklearn.preprocessingImportLabelbinarizer#From neuralnetwork import neuralnetwork fromSklearn.cross_validationImporttrain_test_splitdigits=load_digits () X=Digits.datay=Digits.targetx-= X.min ()#normalize the values to bring them into the range 0-1X/=X.max () nn= Neuralnetwork ([64,100,10],'Logistic') X_train, X_test, Y_train, Y_test=Train_test_split (X, y) labels_train=Labelbinarizer (). Fit_transform (y_train) labels_test=Labelbinarizer (). Fit_transform (y_test)Print("Start Fitting") Nn.fit (X_train,labels_train,epochs=3000) Predictions= [] forIinchRange (x_test.shape[0]): o=nn.predict (X_test[i]) predictions.append (Np.argmax (o) )Print(Confusion_matrix (y_test,predictions))Print(Classification_report (y_test,predictions))

Example of a Python neural network

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.