Handwritten digit recognition
DataSet: Mnist m n i s t mnist (50000 50000 50000 training picture + 10000 10000 10000 test picture)
Training method: Random gradient descent
Code (pytho N3 p y t h o n 3 python3):
"mnist_loader.py mnist_loader.pymnist\_loader.py"
Import Pickle
import NumPy as NP
def load_data ():
ff=open (' e:/dl_python3/data/mnist.pkl ', ' RB ')
Training_data, Validation_data, Test_data=pickle.load (ff, encoding= ' iso-8859-1 ')
return (Training_data, Validation_data, Test_data)
def load_data_wrapper ():
tr_d, va_d, te_d = Load_data ()
training_inputs = [ Np.reshape (x, (784, 1)) for x in tr_d[0] "
training_results = [Vectorized_result (y) for y in Tr_d[1]]
training_data = List (Zip (training_inputs, training_results))
validation_inputs = [Np.reshape (x, (784, 1)) for x in va_d[0]]
Validation_data = List (Zip (validation_inputs, va_d[1]))
test_inputs = [Np.reshape (x, (784, 1)) for X in Te_d[0]]
test_data = List (Zip (test_inputs, te_d[1))
return (Training_data, Validation_data, Test_data)
def Vectorized_result (j):
E=np.zeros ((ten, 1))
e[j]=1.0
return E
"leoly_network.py l e o l y _ n e t w o r k." P y leoly\_network.py "
Import NumPy as NP import random def sigmoid (x): Return 1.0/(1.0+np.exp (-X)) def d_sigmoid (x): Return sigmoid
(x) * (1.0-sigmoid (x)) def d_cost (ans, y): Return 2.0* (Ans-y) class Network (object): Def __init__ (self, size): Self.num=len (size) self.size=size self.bias=[np.random.randn (x, 1) for x in size[1::]] Self.we Ight=[np.random.randn (x, y) for x, y in list (Zip (size[1::], Size[:-1:])) [Def feed_forward (Self, a): for W, b
In list (Zip (self.weight, Self.bias)): A=sigmoid (Np.dot (w, a) +b) return a def backprop (self, x, y):
N_w=[np.zeros (W.shape) for W in Self.weight] N_b=[np.zeros (b.shape) for B in Self.bias] Ans=x
ALL_ANS=[X] all_z=[] for W, b in list (Zip (self.weight, Self.bias)): Z=np.dot (w, ans) +b All_z.append (z) ans=sigmoid (z) all_ans.append (ans) delta=d_cost (all_ans[-1], y) *
D_sigmoid (All_z[-1]) N_w[-1]=np.dot (Delta, All_ans[-2].transpose ()) N_b[-1]=delta for I in range (2, self.num): Delta=2.0*np.dot (Self.weight[-i+1].transpose (), Delta) *d_sigmoid (All_z[-i]) N_w[-i]=np.dot (Delta, all_ans[-i- 1].transpose ()) N_b[-i]=delta return (N_w, n_b) def update (self, Mini_batch, eta): N_W=[NP
. Zeros (W.shape) for W in Self.weight] N_b=[np.zeros (b.shape) for B in Self.bias] for I, J in Mini_batch:
Delta_n_w, Delta_n_b=self.backprop (i, J) N_w=[nw+dnw for NW, DNW in list (Zip (N_w, delta_n_w))] N_b=[nb+dnb for NB, dnb in list (Zip (N_b, Delta_n_b))] self.weight=[w-eta*nw for W, NW in list (Zip (Self.wei
Ght, N_w)] self.bias=[b-eta*nb for B, NB in list (Zip (Self.bias, N_b))] def evaluate (self, test_data):
result=[(Np.argmax (Self.feed_forward (i)), J) for (I, J) and Test_data] return sum (int (i==j) for (I, j) in result) def solve (self, traiNing_data, Epoch, mini_batch_size, ETA, Test_data=none): N=len (training_data) if Test_data:nn =len (Test_data) for Now_epoch in range (Epoch): Random.shuffle (Training_data) mini_batches= [Training_data[i:min (I+mini_batch_size, N):] for I in range (0, N, mini_batch_size)] for Mini_batch in Mini_bat Ches:self.update (Mini_batch, 1.0*eta) if Test_data:print (' Epoch{0}:{1}/{2} '. Format (Now_epoch, Self.evaluate (test_data), nn)) else:print (' epoch{0}:complete! '. Format (Now_epoch))
"Do.py d O. P y do.py "
Import Mnist_loader as ml
import leoly_network as ln
training_data, Validation_data, Test_data=ml.load_data_ Wrapper ()
net=ln.network ([784, ten,])
net.solve (training_data, +, 0.3, Test_data=test_data)