[UFLDL] Python implementation of multilayer neural networks

Source: Internet
Author: User

Last week finished writing the code, but because did not notice the implementation of the Softmax related to the result is not correct, correct results can be corrected, with 200 pictures to iterate 200 times can be more than 90% of the correct rate, parameter settings have yet to be optimized, in addition to consider using multi-threaded acceleration, There is still a problem here (need to be modified, use caution).

Please refer to the previous article http://blog.csdn.net/xuanyuansen/article/details/41214115 for deduction.

#coding =utf-8 "Created on 2014?? 11?? 15?? @author: wangshuai13 ' import numpy#import matplotlib.pyplot as Pltimport structimport mathimport randomimport Timeimport Threading Class MyThread (threading. Thread): Def __init__ (self,threadname,tann,idx_start,idx_end): Threading. THREAD.__INIT__ (Self,name=threadname) self. Ann=tann Self.idx_start=idx_start self.idx_end=idx_end def run (self): cdetaw,cdetab,cerror=self. Ann.backwardpropogation (self. ann.traindata[self.idx_start],0) for IDX in range (self.idx_start+1,self.idx_end): Detawtemp,detabtemp,er Rortemp=self. Ann.backwardpropogation (self.            ANN.TRAINDATA[IDX],IDX) Cerror + = errortemp #cDetaW + = detawtemp #cDetaB + = Detabtemp  For Idx_w in range (0,len (Cdetaw)): Cdetaw[idx_w] + = Detawtemp[idx_w] For Idx_b in range (0,len (cdetab)): Cdetab[idx_b] + = Detabtemp[idx_b] return Cdetaw, Cdetab,cerror def sigmoid (InX): Return 1.0/(1.0+math.exp (-inx)) def Softmax (Inmatrix): M,n=numpy.shape (Inmatr ix) Outmatrix=numpy.mat (Numpy.zeros ((m,n)) soft_sum=0 for IDX in range (0,n): outmatrix[0,idx] = Math.exp ( INMATRIX[0,IDX]) Soft_sum + = Outmatrix[0,idx] for idx in range (0,n): Outmatrix[0,idx]/= soft_sum Retu RN outmatrixdef Tangenth (InX): Return (1.0*math.exp (InX) -1.0*math.exp (-inx))/(1.0*math.exp (InX) +1.0*math.exp (-inx) ) def difsigmoid (InX): Return sigmoid (InX) * (1.0-sigmoid (InX)) def Sigmoidmatrix (Inputmatrix): M,n=numpy.shape (Inputma Trix) Outmatrix=numpy.mat (Numpy.zeros ((m,n))) for Idx_m in range (0,m): For Idx_n in Range (0,n): OU Tmatrix[idx_m,idx_n]=sigmoid (Inputmatrix[idx_m,idx_n]) return outmatrixdef loadmnistimage (AbsFilePathandName, datanum=60000): Images=open (Absfilepathandname, ' RB ') Buf=images.read () Index=0 magic, Numimages, NumRows, Nu Mcolumns = Struct.unpack_from (' &GT;IIII', buf, index) print magic, numimages, NumRows, numcolumns index + = struct.calcsize (' &GT;IIII ') if magic! = 2 051:raise Exception datasize=int (784*datanum) datablock= ">" +str (datasize) + "B" #nextmatrix =struct.unpac K_from (' >47040000b ', buf, index) nextmatrix=struct.unpack_from (DataBlock, buf, index) Nextmatrix=numpy.array (next Matrix)/255.0 #nextmatrix =nextmatrix.reshape (numimages,numrows,numcolumns) Nextmatrix=nextmatrix.reshape (datanum , 1,numrows*numcolumns) #for idx in range (0,numimages): # Test=nextmatrix[idx,:,:] # print Idx,numpy.shape (t EST) #im = struct.unpack_from (' >784b ', buf, index) #move =struct.calcsize (' >784b ') #print move #index + = Struct.calcsize (' >784b ') #im =numpy.array (IM) #im = Im.reshape (14,56) #row, Col=numpy.shape (IM) #print row,co L #fig = plt.figure () #plotwindow = Fig.add_subplot (111) #plt. Imshow (IM, cmap= ' gray ') #plt. Show () #nextsum =59999*28*28 #print nextsUm #nextmatrix =struct.unpack_from (' >47039216b ', buf, index) #nextmatrix =numpy.array (Nextmatrix) #nextmatrix =ne Xtmatrix.reshape (59999,28,28) #for idx in range (1,59999): #temp =nextmatrix[idx,:,:] #plt. Imshow (Temp,cmap = ' Gray ') #plt. Show () #print temp #print next #for lines in Images.readlines (): #print t Ype (lines), lines return Nextmatrix, Numimages def loadmnistlabels (absfilepathandname,datanum=60000): Labels=o Pen (absfilepathandname, ' RB ') Buf=labels.read () index=0 magic, numlabels = Struct.unpack_from (' >ii ', buf, in DEX) Print Magic, numlabels index + = struct.calcsize (' >ii ') if magic! = 2049:raise Exception da tablock= ">" +str (datanum) + "B" #nextmatrix =struct.unpack_from (' >60000b ', buf, index) nextmatrix=struct.unpack_ From (DataBlock, buf, index) Nextmatrix=numpy.array (Nextmatrix) #for idx in range (0,numlabels): # Test=nextmatr IX[IDX] # Print IDX,Type (test), Test return Nextmatrix, Numlabelsclass Muiltilayerann (object): #NumofNodesinHiddenlayers should be s List of int def __init__ (self,numofhiddenlayers,numofnodesinhiddenlayers,inputdimension,outputdimension=1,maxiter= ): self.traindatanum=200 self.decayrate=0.2 self.punishfactor=0.05 self.eps=0.00001 s Elf.numofhl=numofhiddenlayers self. Nl=int (numofhiddenlayers+2) self. Nodesinhidden=[] for element in Numofnodesinhiddenlayers:self. The nodesinhidden.append (int (element)) #self. B=[] Self.inputdi=int (inputdimension) self.outputdi=int (outputdimension) self.maxiteration=int (maxIte R) def settraindatanum (self,datanum): Self.traindatanum=datanum return def loadtraindata (SELF,ABSF Ilepathandname): Self.traindata,self.        Totalnumoftraindata=loadmnistimage (Absfilepathandname,self.traindatanum) #print self.traindata[1] Return def LOADTRAINLAbel (Self,absfilepathandname): Self.trainlabel,self. Totalnumoftrainlabels=loadmnistlabels (Absfilepathandname,self.traindatanum) if self. Totalnumoftrainlabels! = self.        Totalnumoftraindata:raise Exception return def initialweights (self): #initial matrix #nodesinLayers is a list self.nodesinlayers=[] self.nodesinLayers.append (int (SELF.INPUTDI)) SELF.N Odesinlayers + = self. Nodesinhidden self.nodesinLayers.append (int (SELF.OUTPUTDI)) #self. nodesinb=[] #self. nodesinb + = self. Nodesinhidden #self. Nodesinb.append (int (SELF.OUTPUTDI)) #for element in Self.nodesinlayers: #self . Nodesinlayers=int (Self.nodesinlayers[idx]) #weight matrix, it's a list and each element is a numpy matrix # Weight matrix, here are Wij, and in BP we could inverse it into Wji #here we store the matrix as Numpy.array SE Lf.weightmatrix=[] Self. B=[] for IDX in range (0,self.NL-1): #Xaxier ' s scaling factor #X. Glorot, Y. Bengio. Understanding the difficulty of training #deep feedforward neural networks.            Aistats 2010. S=MATH.SQRT (6)/math.sqrt (self.nodesinlayers[idx]+self.nodesinlayers[idx+1]) #s =random.uniform (self.nodesinLayer S[IDX],SELF.NODESINLAYERS[IDX+1]) *2.0*s-s Tempmatrix=numpy.zeros (self.nodesinlayers[idx],self.nodesinlayers[ IDX+1])) for row_m in range (0,self.nodesinlayers[idx]): For col_m in range (0,self.nodesinlayers[ IDX+1]): Tempmatrix[row_m,col_m]=random.random () *2.0*s-s self.weightMatrix.append (Numpy.mat ( Tempmatrix)) self. B.append (Numpy.mat (Numpy.zeros (1,self.nodesinlayers[idx+1))) return 0 def printweightmatrix (self) : For IDX in range (0,int (self). NL)-1): Print Self.weightmatrix[idx] print self. B[IDX] return 0 def forwardpropogation (self,singledataINPUT,CURRENTDATAIDX): #self. Tempusedata=inputdata ztemp=[] #Ztemp. Append (Numpy.mat (inputdata) *SELF.W Eightmatrix[0]+self. B[0]) ztemp.append (Numpy.mat (singledatainput) *self.weightmatrix[0]+self. B[0]) atemp=[] #print ztemp for IDX in range (1,self. NL-1): Atemp.append (Sigmoidmatrix (ztemp[idx-1)) Ztemp.append (atemp[idx-1]*self.weightmatrix[idx]+se Lf. B[IDX]) #print ztemp atemp.append (Sigmoidmatrix (ztemp[self. NL-2])) #store temp error by FP Outlabels=numpy.mat (Numpy.zeros (1,SELF.OUTPUTDI)) Outlabels[0,int (s ELF.TRAINLABEL[CURRENTDATAIDX])]=1.0 ######### #for test##################### #print atemp[self. NL-2] #errorMat =atemp[self. Nl-2]-outlabels #softmax Errormat=softmax (atemp[self. NL-2])-outlabels errorsum=0.0 for IDX in range (0,SELF.OUTPUTDI): Errorsum + = 0.5* ((errormat[0,idx ]) * (Errormat[0,idx])) return Atemp,ztemp,erRorsum def calthetanl (self,anl,y,znl): thetanl=anl-y #print "Error", Thetanl ################# #for idx in range (0,SELF.OUTPUTDI): #thetaNl [0,idx]=thetanl[0,idx]*difsigmoid (Znl[0,idx]) return Thetanl def backwardpropogation (self,singledatainput,currentdataidx): Atemp,ztemp,temperror=self.forwardprop        Ogation (Numpy.mat (singledatainput), currentdataidx) #print ' single error ', Temperror #Theta is stored inverse Theta=[] Outlabels=numpy.mat (Numpy.zeros (1,SELF.OUTPUTDI)) Outlabels[0,int (self.trainlabel[currentd ATAIDX])]=1.0 #print outlabels thetanl=self.calthetanl (atemp[self. NL-2], Outlabels, ztemp[self.        NL-2]) #print thetanl theta.append (thetanl) #????????????? For IDX in range (1,self. NL-1): Inverseidx=self. Nl-1-idx #print inverseidx Thetalplus1=theta[idx-1] Weightl=self.weightmatrix[iNVERSEIDX] zl=ztemp[inverseidx-1] thetal=thetalplus1*weightl.transpose () #print "Thetal t Emp ", Thetal row_theta,col_theta=numpy.shape (thetal) if row_theta! = 1:raise exceptio n #print Col_theta for Idx_col in range (0,col_theta): #print Idx_col #            Print "dif", Difsigmoid (Zl[0,idx_col]) Thetal[0,idx_col] =thetal[0,idx_col]*difsigmoid (Zl[0,idx_col])        #print thetal theta.append (thetal) #print Theta #DetaW, Detab is also stored inverse Detaw=[] detab=[] for the IDX in range (0,self. NL-2): Inverse_idx=self. Nl-2-1-idx ####################################################### #??? Pay great attention to the Deminson of Matrix??? # # # ####################################################### #dW =THETA[IDX]*ATEMP[INVERSE_IDX].TRANSP OSE () dw=atemp[inverse_idX].transpose () *theta[idx] #print DW Db=theta[idx] detaw.append (DW) Detab.appen D (DB) Detaw.append (Singledatainput.transpose () *theta[self. NL-2]) detab.append (theta[self. NL-2]) #print "Detaw", Detaw #print "Detab", Detab return Detaw,detab,temperror def Updatepara (s Elf,detaw,detab): #update parameters for IDX in range (0,self. NL-1): #print Detaw[idx] #print Detab[idx] inverse_idx=self. Nl-1-1-idx Self.weightmatrix[inverse_idx]-= self.decayrate* ((1.0/self.traindatanum) *DetaW[idx]+self.punishFact OR*SELF.WEIGHTMATRIX[INVERSE_IDX]) #self. Weightmatrix[inverse_idx]-= (self.decayrate* (detaw[idx]+s ELF.PUNISHFACTOR*SELF.WEIGHTMATRIX[INVERSE_IDX]) self. B[INVERSE_IDX]-= self.decayrate* (1.0/self.traindatanum) *detab[idx] #self. B[INVERSE_IDX] = Self.decayrate*detab[idx] #print self.weightmatrix #priNT self. B def calpunish (self): punishment=0.0 for IDX in range (0,self. NL-1): Temp=self.weightmatrix[idx] Idx_m,idx_n=numpy.shape (temp) for i_m in range (0,idx_m ): For I_n in Range (0,idx_n): Punishment + = Temp[i_m,i_n]*temp[i_m,i_n] return 0. 5*self.punishfactor*punishment def Trainann (self): error_old=10000000000.0 iter_idx=0 while it Er_idx<self.maxiteration:print "iter num:", Iter_idx, "===============================" Iter_idx + = 1 cdetaw,cdetab,cerror=self.backwardpropogation (self.traindata[0],0) for IDX in range                (1,self.traindatanum): Detawtemp,detabtemp,errortemp=self.backwardpropogation (SELF.TRAINDATA[IDX],IDX) Cerror + = errortemp #cDetaW + = detawtemp #cDetaB + = Detabtemp for Idx_w in range (0,len (Cdetaw)): Cdetaw[idX_w] + = Detawtemp[idx_w] for Idx_b in range (0,len (cdetab)): Cdetab[i Dx_b] + = Detabtemp[idx_b] #print "Error", Cerror cerror/=self.traindatanum Cerror             + = Self.calpunish () print "Old error", Error_old print "New error", Cerror Error_new=cerror If error_old-error_new < Self.eps:break error_old=error_new SELF.UPDA Tepara (Cdetaw, Cdetab) return def trainannwithmultithread (self): error_old=10000000000.0 iter_idx=            0 while Iter_idx<self.maxiteration:print "iter num:", Iter_idx, "==============================="                        Iter_idx + = 1 cdetaw,cdetab,cerror=self.backwardpropogation (self.traindata[0],0) Segnum=int (SELF.TRAINDATANUM/3) Work1 = MyThread (' Work1 ', self,1,segnum) cdetaw1,cdetab1    , Cerror1=work1.run ()        WORK2 = MyThread (' Work2 ', Self,segnum,int (2*segnum)) Cdetaw2,cdetab2,cerror2=work2.run () work                                    3 = MyThread (' Work3 ', Self,int (2*segnum), Self.traindatanum) Cdetaw3,cdetab3,cerror3=work3.run ()                While Work1.isalive () or work2.isalive () or work3.isalive (): Time.sleep (0.005)            Continue CDETAW=CDETAW+CDETAW1+CDETAW2+CDETAW3 Cdetab=cdetab+cdetab1+cdetab2+cdetab3            Cerror=cerror+cerror1+cerror2+cerror3 cerror/=self.traindatanum Cerror + = Self.calpunish () Print "Old error", Error_old print "New error", Cerror error_new=cerror if Error_old-er        Ror_new < Self.eps:break error_old=error_new Self.updatepara (Cdetaw, Cdetab)            return def gettrainaccuracy (self): accuracycount=0 for IDX in range (0,self.traindatanum): Atemp,ztemp,errorSum=self.forwardpropogation (SELF.TRAINDATA[IDX],IDX) trainpredict=atemp[self. NL-2] Print trainpredict plist=trainpredict.tolist () labelpredict=plist[0].index (Max (plis T[0]) print "Labelpredict", labelpredict print "Trainlabel", Self.trainlabel[idx] if int (L abelpredict) = = Int (Self.trainlabel[idx]): Accuracycount + = 1 print "Accuracy:", float (accuracycount )/float (Self.traindatanum) return


[UFLDL] Python implementation of multilayer neural networks

Related Article

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.