Regression: Assuming there are some data points, we use a straight line to fit these points (the line is called the best fit Line), the fitting process is called regression.
The purpose of Logistic regression is to find the best fitting parameters of a nonlinear function sigmoid, and the solving process can be accomplished by the optimization algorithm.
The specific implementation code is as follows:
"' Logistic regression Working Module '" From numpy import * def loaddataset (): Datamat = [];
Labelmat = [] fr = open (' testSet.txt ') for line in Fr.readlines (): Linearr = Line.strip (). Split () Datamat.append ([1.0, Float (linearr[0]), float (linearr[1)]) labelmat.append (int (linearr[2))) return Datamat,la Belmat def sigmoid (InX): Return 1.0/(1+exp (-inx)) def gradascent (Datamatin, classlabels): Datamatrix = Mat (Datam Atin) #convert to numpy matrix Labelmat = Mat (Classlabels). Transpose () #convert to numpy matrix M,n = Shape (datamatrix) Alpha = 0.001 maxcycles = weights = Ones ((n,1)) for K in range (Maxcycles): #heavy on matrix Operations h = sigmoid (datamatrix*weights) #matrix mult error = (labelmat-h) #vector Subtraction weights = weights + Alpha * datamatrix.transpose () * ERROR #matrix mult return W Eights def plotbestfit (weights): Import Matplotlib.pyplot as Plt datamat,labelmat=loaddataset () Dataarr = Array (datamat) n = shape (Dataarr) [0] X Cord1 = []; Ycord1 = [] Xcord2 = []; Ycord2 = [] for i in range (n): If int (labelmat[i]) = = = 1:xcord1.append (dataarr[i,1]); Ycord1.append (dataarr[i,2]) else:xcord2.append (dataarr[i,1]); Ycord2.append (dataarr[i,2]) FIG = plt.figure () ax = Fig.add_subplot (a) ax.scatter (Xcord1, Ycord1, s=30, c= ' r Ed ', marker= ' s ') Ax.scatter (Xcord2, Ycord2, s=30, c= ' green ') x = Arange ( -3.0, 3.0, 0.1) y = (-weights[0]-weigh ts[1]*x)/weights[2] Ax.plot (x, y) plt.xlabel (' X1 ');
Plt.ylabel (' X2 '); Plt.show () def stocGradAscent0 (Datamatrix, classlabels): M,n = shape (datamatrix) Alpha = 0.01 weights = ones ( N) #initialize to all ones for I in range (m): h = sigmoid (sum (datamatrix[i]*weights)) error = Classl
Abels[i]-H weights = weights + Alpha * ERROR * Datamatrix[i]Return weights def stocGradAscent1 (Datamatrix, Classlabels, numiter=150): M,n = shape (datamatrix) weights = ones (
N) #initialize to all ones with J in Range (Numiter): Dataindex = Range (m) for I in Range (m): Alpha = 4/(1.0+j+i) +0.0001 #apha decreases with iteration, does not randindex = Int (Random.uniform (0,l En (dataindex)) #go to 0 because of the constant H = sigmoid (sum (datamatrix[randindex]*weights)) ER ROR = Classlabels[randindex]-H weights = weights + Alpha * ERROR * Datamatrix[randindex] del (DAT Aindex[randindex]) return weights def classifyvector (InX, weights): prob = sigmoid (sum (inx*weights)) if prob > 0.5:return 1.0 else:return 0.0 def colictest (): Frtrain = open (' HorseColicTraining.txt '); frtest = open (' horseColicTest.txt ') trainingset = [];
Traininglabels = [] for line in Frtrain.readlines (): Currline = Line.strip (). Split (' t ') Linearr =[] for I in range: Linearr.append (float (currline[i)) Trainingset.append (Lin Earr) Traininglabels.append (float (currline[21)) Trainweights = StocGradAscent1 (Array (trainingset), Trainingla BELs, 1000) errorcount = 0;
Numtestvec = 0.0 for line in Frtest.readlines (): Numtestvec + = 1.0 Currline = Line.strip (). Split (' t ') Linearr =[] for I in range: Linearr.append (float (currline[i)) if Int (Classifyvec Tor (Array (Linearr), trainweights))!= Int (currline[21]): Errorcount = 1 errorrate = (float (errorcount)/num Testvec) print "The error rate of this test are:%f"% errorrate return errorrate def multitest (): Numtests = 10; errorsum=0.0 for K in range (numtests): Errorsum + = Colictest () print "After%d iterations the average Erro
R Rate is:%f "% (numtests, errorsum/float (numtests))