Neural network model is generally used for classification, regression prediction model is not common, this paper based on a classification of BP neural Network, modified it to achieve a regression model for indoor positioning. The main change of the model is to remove the non-linear transformation of the third layer, or replace the nonlinear activation function sigmoid with the f (x) =x function. The main reason for this is that the output range of the sigmoid function is too small, between 0-1 and the output range of the regression model is large. The model is modified as follows:
The code is as follows:
#coding: UTF8 ' ' Author:huangyuliang ' ' Import JSON import random import sys import numpy as NP # Define the Quadr Atic and cross-entropy cost functions Class Crossentropycost (object): @staticmethod def fn (A, y): return Np.sum (Np.nan_to_num (-y*np.log (a)-(1-y) *np.log (1-a)) @staticmethod def Delta (Z, A, y): Return (a-y) ### # Main Network Class Class Network (object): Def __init__ (self, sizes, cost=crossentropycost): Self.num_layer s = Len (sizes) self.sizes = sizes Self.default_weight_initializer () self.cost=cost def defaul T_weight_initializer (self): self.biases = [Np.random.randn (y, 1) for y in Self.sizes[1:]] self.weights = [Np.random.randn (y, x)/np.sqrt (x) for x, y in Zip (self.sizes[:-1], self.sizes[1:]) def Large_ Weight_initializer (self): self.biases = [Np.random.randn (y, 1) for y in Self.sizes[1:]] self.weights = [n
P.random.randn (y, x) For x, y in Zip (self.sizes[:-1], self.sizes[1:)] def feedforward (self, a): "" "
Output of the network if ' a ' is input. "" " For B, W in Zip (Self.biases[:-1], self.weights[:-1]): # front n-1 Layer a = sigmoid (Np.dot (w, a) +b) b = Self.bi ASES[-1] # last layer w = self.weights[-1] A = Np.dot (W, a) +b return a def SGD (self, Training_dat A, epochs, mini_batch_size, eta, Lmbda = 0.0, Evaluation_data=none, monitor_evaluation _accuracy=false): # Training with a random gradient descent algorithm n = len (training_data) for J-Xrange (epochs): random.sh Uffle (training_data) mini_batches = [Training_data[k:k+mini_batch_size] for K in xrange (0, N, mini_batch_size) ] for Mini_batch in Mini_batches:self.update_mini_batch (Mini_batch, ETA, LMBDA,
Len (training_data)) print ("Epoch%s training complete"% j) If Monitor_evaluation_accuracy:print ("accuracy on evaluation data: {}/{}". Format (self.accuracy (EV Aluation_data), J) def Update_mini_batch (self, mini_batch, ETA, LMBDA, N): "" "Update the network ' s Weights and biases by applying gradient descent using BackPropagation to a single mini batch. The ' Mini_batch ' is a list of tuples ' (x, y) ', ' ETA ' was the learning rate, ' lmbda ' is the Regulari
Zation parameter, and ' n ' is the total size of the training data set.
"" "Nabla_b = [Np.zeros (b.shape) for B in self.biases] nabla_w = [Np.zeros (w.shape) for W in Self.weights] For x, y in mini_batch:delta_nabla_b, delta_nabla_w = Self.backprop (x, y) nabla_b = [nb+
DNB for NB, dnb in Zip (Nabla_b, delta_nabla_b)] Nabla_w = [NW+DNW for NW, DNW in Zip (Nabla_w, delta_nabla_w)]
Self.weights = [(1-eta* (lmbda/n)) *w-(Eta/len (mini_batch)) *NW For W, NW in Zip (self.weights, nabla_w)] self.biases = [B (Eta/len (mini_batch)) *NB For B, NB in Zip (self.biases, nabla_b)] def backprop (self, x, y): "" "Return a Tuple" (Nabla_b, Nabla _w) ' representing the gradient for the cost function c_x. ' Nabla_b ' and ' nabla_w ' are layer-by-layer lists of numpy arrays, similar to ' ' self.biases ' and ' ' sel
F.weights '. ' "" Nabla_b = [Np.zeros (b.shape) for B in self.biases] nabla_w = [Np.zeros (w.shape) for W in Self.weights] # f Eedforward activation = x activations = [x] # list to store all of the activations, layer by layer ZS = [] # list to store all of the z vectors, layer by layer for B, w in zip (self.biases[:-1), self.weights[:-1]): #
Forward propagation front n-1 layer z = Np.dot (w, activation) +b zs.append (z) activation = sigmoid (z)
Activations.append (activation) # last layer, no nonlinearity b = self.biases[-1] w = self.weights[-1] Z = Np.dot (w, activation) +b zs.append (z) activa tion = z activations.append (activation) # Backward pass reverse Propagate delta = (self.cost). Delta (zs[-1), act Ivations[-1], y) # error tj-oj nabla_b[-1] = Delta Nabla_w[-1] = Np.dot (Delta, Activations[-2].transpose ()) # (TJ-OJ) * O (J-1) for L in Xrange (2, self.num_layers): Z = zs[-l] # W*a + b S p = Sigmoid_prime (z) # Z * (1-z) delta = Np.dot (Self.weights[-l+1].transpose (), Delta) * SP # z* (1-z) * (err*w Hidden layer error Nabla_b[-l] = Delta Nabla_w[-l] = Np.dot (Delta, Activations[-l-1].transpose ()) # ERRJ * O I return (Nabla_b, nabla_w) def accuracy (self, data): results = [(Self.feedforward (x), y) for (x, y) In data] alist=[np.sqrt ((x[0][0]-y[0)) **2+ (x[1][0]-y[1]) **2) for (x,y) in results] return Np.mean (Alis T) def save (self, FIlename): "" "Save the neural network to the file ' filename '." "" Data = {"Sizes": self.sizes, "weights": [W.tolist () for W in Self.weights], "biases": [b.t Olist () for B in self.biases], "cost": Str (self.cost.__name__)} f = open (filename, "w") JS On.dump (data, F) f.close () # # Loading a network def load (filename): "" "" "" Load a neural network from the file ' filename '.
Returns an instance of network. "" "F = open (filename," r ") data = Json.load (f) f.close () cost = GetAttr (sys.modules[__name__], data[" cost "]" NET = Network (data["sizes"], cost=cost) net.weights = [Np.array (w) for W in data["weights"]] net.biases =
[Np.array (b) for B-data["biases"]] return net def sigmoid (z): "" "" "the sigmoid function." "
Return 1.0/(1.0+np.exp (z)) def sigmoid_prime (z): "" "derivative of the sigmoid function." "" return sigmoid (z) * (1-sigmoid (z))
Call a neural network to train and save parameters:
#coding: UTF8
import my_datas_loader_1
import network_0
training_data,test_data = my_datas_loader_1. Load_data_wrapper ()
Training Network, save the trained parameter
net = Network_0.network ([14,100,2],cost = Network_0.crossentropycost )
Net.large_weight_initializer ()
net. SGD (Training_data,1000,316,0.005,lmbda =0.1,evaluation_data=test_data,monitor_evaluation_accuracy=true)
Filename=r ' C:\Users\hyl\Desktop\Second_158\Regression_Model\parameters.txt '
net.save (filename)
The results of the 第190-199 round training are as follows:
Call the saved parameters to locate the prediction:
#coding: UTF8
import my_datas_loader_1
import network_0
import matplotlib.pyplot as plt
test_data = my _datas_loader_1.load_test_data ()
calls the trained network to make predictions
filename=r ' D:\Workspase\Nerual_networks\ Parameters.txt ' # # File Save Training parameters
net = network_0.load (filename) # # Call parameters, form the network
fig=plt.figure (1)
Ax=fig.add_subplot (1,1,1)
ax.axis ("equal")
# Plt.grid (color= ' b ', linewidth= ' 0.5 ', linestyle= '-') # add Grid
x=[-0.3,-0.3,-17.1,-17.1,-0.3] # # This is the outline of the nine floor terrain
y=[-0.3,26.4,26.4,-0.3,-0.3]
m=[ 1.5,1.5,-18.9,-18.9,1.5]
n=[-2.1,28.2,28.2,-2.1,-2.1]
ax.plot (x,y,m,n,c= ' K ') for
I in range (Len ( Test_data):
pre = Net.feedforward (Test_data[i][0]) # PRE is the predicted coordinates
BX=PRE[0]
by=pre[1]
Ax.scatter (bx,by,s=4,lw=2,marker= '. ', alpha=1) #散点图
plt.pause (0.001)
The positioning accuracy reached about 1.5 meters. The positioning effect is shown in the following illustration:
The real path is for pedestrians to circle round the circular corridor from the origin point.