Ython Calculation of KL divergence
ImportNumPy as NPImportscipy.statsx= [Np.random.randint (1,11) forIinchRange (10)]Print(x)Print(Np.sum (x)) PX= X/np.sum (x)#NormalizationPrint(px) y= [Np.random.randint (1, 11) forIinchRange (10)]Print(y)Print(Np.sum (y)) py= Y/np.sum (y)#NormalizationPrint(PY)## scipy Calculation functions can handle non-normalization cases, so using # scipy.stats.entropy (x, y) or scipy.stats.entropy (px, py) can beKL =scipy.stats.entropy (x, y)Print(KL)#self-programming implementationkl= 0.0 forIinchRange (10): KL+ = px[i] * Np.log (px[i]/Py[i])Print(KL)
#TensorFlow的神经网络
Importsys; Sys.path.append ("/home/hxj/anaconda3/lib/python3.6/site-packages")ImportTensorFlow as TFImportNumPy as Npx_data= Np.random.rand (100). Astype (np.float32) Y_data= x_data*0.1+0.3Print(X_data)Print(y_data) Weights= TF. Variable (Tf.random_uniform ([1],-1.0, 1.0)) Biases= TF. Variable (Tf.zeros ([1])) y= Weights*x_data +biasesPrint(y) loss= Tf.reduce_mean (Tf.square (yy_data)) Optimizer= Tf.train.GradientDescentOptimizer (0.5) Train=optimizer.minimize (loss) init=Tf.global_variables_initializer () sess=TF. Session () sess.run (init) forStepinchRange (201): Sess.run (train)ifStep% 20 = =0:Print(Step, Sess.run (Weights), Sess.run (biases))
#Python画2D图
fromFunctoolsImportPartialImportNumPy fromMatplotlibImportPyplot#Define a PDFX_samples= Numpy.arange (-3, 3.01, 0.01) PDF=numpy.empty (x_samples.shape) pdf[x_samples< 0] = Numpy.round (X_samples[x_samples < 0] + 3.5)/3Pdf[x_samples>= 0] = 0.5 * Numpy.cos (NUMPY.PI * x_samples[x_samples >= 0]) + 0.5PDF/=numpy.sum (PDF)#Calculate approximated CDFCDF=numpy.empty (pdf.shape) cumulated=0 forIinchRange (Cdf.shape[0]): cumulated+=Pdf[i] Cdf[i]=cumulated#Generate SamplesGenerate= Partial (Numpy.interp, XP=CDF, fp=x_samples) U_rv= Numpy.random.random (10000) x=Generate (U_RV)#VisualizationFig, (ax0, AX1)= Pyplot.subplots (ncols=2, figsize= (9, 4) ) Ax0.plot (X_samples, PDF) Ax0.axis ([-3.5, 3.5, 0, Numpy.max (PDF) *1.1]) ax1.hist (x,100) pyplot.show ()
#Python画3D图
ImportMatplotlib.pyplot as PltImportNumPy as NP fromMpl_toolkits.mplot3dImportAxes3Dnp.random.seed (42)#Number of SamplesN_samples = 500Dim= 3#sir, a set of 3-D normal distribution data, the data direction is completely randomSamples =Np.random.multivariate_normal (Np.zeros (Dim), Np.eye (Dim), N_samples)#by matching each sample to the origin and evenly distributing the sample evenly distributed within the ball body. forIinchRange (Samples.shape[0]): R= Np.power (Np.random.random (), 1.0/3.0) Samples[i]*= R/Np.linalg.norm (samples[i]) upper_samples=[]lower_samples= [] forX, Y, ZinchSamples:#3x+2y-z=1 as discriminant plane ifZ > 3*x + 2*y-1: Upper_samples.append ((x, Y, z))Else: Lower_samples.append ((x, Y, z)) FIG= Plt.figure ('3D Scatter plot') Ax= Fig.add_subplot (111, projection='3d') Uppers=Np.array (upper_samples) lowers=Np.array (lower_samples)#A sample of the top and bottom of a plane is represented by an icon of different color shapes#The upper part of the discriminant plane is a red dot and the lower half is the green triangle .Ax.scatter (uppers[:, 0], uppers[:, 1], uppers[:, 2], c='R', marker='o') Ax.scatter (lowers[:, 0], lowers[:,1], lowers[:, 2], c='g', marker='^') plt.show ()
TensorFlow first use +python to draw 3D graphs and calculate KL divergence