This is a personal learning run code, the results are not posted, there is a need to run their own, for reference only, there is no known can communicate privately, there are problems can also contact me. Of course, I can only provide a little advice, after all, I am only a beginner
First page
#-*-Coding:utf-8-*-
#previous Row is a-to-use Chinese
From Sklearn import datasets
From sklearn.cross_validation import Train_test_split
From Sklearn Import preprocessing
Import Matplotlib.pyplot as Plt
Import Sklearn as SK
Import NumPy as NP
From sklearn.linear_model.stochastic_gradient import Sgdclassifier
#use the other function instead of it due to don't find the function named '. Linear_modelsklearn._model ' while wrote by the Book
From Sklearn Import Metrics
Iris = Datasets.load_iris ()
X_iris, Y_iris = Iris.data, Iris.target
#print X_iris.shape, Y_iris.shape
#print X_iris[0],y_iris[0]
X, y = X_iris[:,:2],y_iris
X_train, X_test, Y_train,y_test=train_test_split (x,y,test_size=0.25,random_state=33)
#print X_train.shape,y_train.shape
Scaler = preprocessing. Standardscaler (). Fit (X_train)
X_train = Scaler.transform (X_train)
X_test = Scaler.transform (x_test)
colors = [' Red ', ' greenyellow ', ' Blue ']
For i in xrange (len (colors)):
xs = x_train[:, 0][y_train==i]
ys = x_train[:, 1][y_train==i]
Plt.scatter (Xs,ys, c=colors[i])
Plt.legend (Iris.target_names)
Plt.xlabel (' sepal length ')
Plt.ylabel (' sepal width ')
#plt. Show ()
CLF = Sgdclassifier ()
Clf.fit (X_train, Y_train)
#print clf.coef_
#print Clf.intercept_
X_min, X_max = X_train[:,0].min ()-.5,x_train[:,0].max () +.5
Y_min,y_max = X_train[:,1].min ()-. 5,x_train[:,1].max () +.5
Xs=np.arange (x_min,x_max,0.5)
Fig,axes=plt.subplots (1,3)
Fig.set_size_inches (10,6)
For i in [0,1,2]:
Axes[i].set_aspect (' equal ')
Axes[i].set_title (' Class ' + str (i) + ' versus the rest ')
Axes[i].set_xlabel (' sepal length ')
Axes[i].set_ylabel (' sepal width ')
Axes[i].set_xlim (X_min,x_max)
Axes[i].set_ylim (Y_min,y_max)
Plt.sca (Axes[i]) #sca is belong to Matplotlib.pyplot we couldn ' t use it directly
Plt.scatter (X_train[:,0],x_train[:,1],c=y_train,cmap=plt.prism ()) #we can ' t find the CM so use prism () to replace it
Ys= (-clf.intercept_[i]-
xs*clf.coef_[i,0])/clf.coef_[i,1] #Xs is not defined so I use Xs to replaced
Plt.plot (Xs,ys,hold=true)
Plt.show ()
#print clf.predict (Scaler.transform ([[4.7,3.1]])
#print clf.decision_function (Scaler.transform ([[4.7,3.1]])
Y_train_pred=clf.predict (X_train)
#print Metrics.accuracy_score (y_train,y_train_pred)
Y_pred=clf.predict (X_test)
#print Metrics.accuracy_score (y_test,y_pred)
#print Metrics.classification_report (Y_test,y_pred,target_names=iris.target_names)
Print Metrics.confusion_matrix (y_test,y_pred)
The second page runs separately, but the first page may be called, which uses cross-validation.
From sklearn.cross_validation import Cross_val_score,kfold
From Sklearn.pipeline Import pipeline
From Sklearn Import preprocessing
From Sklearn import datasets
From sklearn.linear_model.stochastic_gradient import Sgdclassifier
From scipy.stats import sem
Import NumPy as NP
Iris = Datasets.load_iris ()
X_iris, Y_iris = Iris.data, Iris.target
X, y = X_iris[:,:2],y_iris
Clf=pipeline ([' Scaler ', preprocessing. Standardscaler ()), (' Linear_model ', Sgdclassifier ())])
Cv=kfold (x.shape[0],5,shuffle=true,random_state=33)
Scores=cross_val_score (CLF,X,Y,CV=CV)
#print Scores
def mean_score (scores):
Return ("Mean score: {0:.3f} (+/-{1:.3f})"). Format (Np.mean (scores), SEM (scores))
Print Mean_score (scores)
Using linear classifier to predict the kind of iris (python)