Go to: 50488727
Input data becomes price forecast:
105.0,2,0.89,510.0
105.0,2,0.89,510.0
138.0,3,0.27,595.0
135.0,3,0.27,596.0
106.0,2,0.83,486.0
105.0,2,0.89,510.0
105.0,2,0.89,510.0
143.0,3,0.83,560.0
108.0,2,0.91,450.0
Recently, a method is used to write a paper, which is based on the optimal combination prediction of neural network, the main ideas are as follows: based on the combination forecasting model base of regression model, grey Prediction model and BP Neural Network prediction model, the combination of the above three single prediction models is used to form the BP neural network combined forecasting model. (I am referring to this article: Lu Yulong, Han Jing, Yu Si, Zhang Hongyan. Application of BP neural network combination forecast in the prediction of municipal solid waste production
My purpose.
I need a BP neural network to make continuous predictions. There are a lot of Python implementations on the BP neural network, but most of them are used for categorical decision making, so we have to figure out how to change the code.
Here is my reference to a BP neural network classification decision-making implementation (my continuous prediction code is based on the following link to change, thank you here):
Https://www.cnblogs.com/Finley/p/5946000.html
Modify the idea:
(1) The last layer does not activate, direct output. Or think of the activation function as f (x) =x
(2) The loss function function is changed to MSE
Code
The code with two # ——-around is the part I corrected.
Import Math
Import Random
Random.seed (0)
def rand (A, B):
Return (B-A) * Random.random () + A
Def Make_matrix (M, N, fill=0.0):
Mat = []
For I in range (m):
Mat.append ([fill] * N)
return mat
def sigmoid (x):
return 1.0/(1.0 + MATH.EXP (-X))
def sigmoid_derivative (x):
return x * (1-x)
Class Bpneuralnetwork:
def __init__ (self):
Self.input_n = 0
Self.hidden_n = 0
Self.output_n = 0
Self.input_cells = []
Self.hidden_cells = []
Self.output_cells = []
Self.input_weights = []
Self.output_weights = []
Self.input_correction = []
Self.output_correction = []
Def setup (self, NI, NH, no):
Self.input_n = ni + 1
self.hidden_n = NH
Self.output_n = no
# init cells< br> self.input_cells = [1.0] * self.input_n
Self.hidden_cells = [1.0] * self.hidden_n
Self.output_cells = [1.0] * Self.output_n
# init weights
Self.input_weights = Make_matrix (Self.input_n, self.hidden_n)
Self.output_ weights = Make_matrix (Self.hidden_n, Self.output_n)
# Random Activate
for I in Range (self.input_n):
for H in R Ange (self.hidden_n):
Self.input_weights[i][h] = rand ( -0.2, 0.2)
for h in range (Self.hidden_n):
for O in range ( Self.output_n):
Self.output_weights[h][o] = rand ( -2.0, 2.0)
# init correction matrix
Self.input_correction = Make_matrix (Self.input_n, self.hidden_n)
Self.output_correction = Make_matrix (Self.hidden_n, Self.output_n)
Def predict (self, inputs):
# Activate input layer
for I in Range (self.input_n-1):
Self.input_cells[i] = Inputs[i] #输入层输出值
# activate hidden layer
for J in Range (Self.hidden_n):
Total = 0.0
For i in range (self.in Put_n):
Total + = self.input_cells[i] * self.input_weights[i][j] #隐藏层输入值
Self.hidden_cells[j] = sigmoid Hidden layer Output Value
# Activate output layer
for K in range (Self.output_n):
Total = 0.0
for j in Range (Self.hidden_n): br> Total + = self.hidden_cells[j] * self.output_weights[j][k]
#-----------------------------------------------
# Self.output_cells[k] = sigmoid (total)
Self.output_cells[k] The excitation function of the =total# output layer is f (x) =x
#------------------- ----------------------------
return self.output_cells[:]
Def back_propagate (self, case, label, learn, correct): #x, Y, modify the maximum number of iterations, learning rate λ, correction rate μ three parameters.
# Feed forward
Self.predict (case)
# get output layer error
Output_deltas = [0.0] * Self.output_n
for o in range (self.output_n):
Error = Label[o]-self.output_cells[o]
#------------------------------------------ -----
# Output_deltas[o] = sigmoid_derivative (Self.output_cells[o]) * ERROR
Output_deltas[o] = Error
#------ -----------------------------------------
# get hidden layer error
Hidden_deltas = [0.0] * Self.hidden_n
for H In range (self.hidden_n):
Error = 0.0
for o in range (self.output_n):
Error + = output_deltas[o] * self.output_w Eights[h][o]
Hidden_deltas[h] = sigmoid_derivative (self.hidden_cells[h]) * ERROR
# Update Output weights
For h in range (Self.hidden_n):
For O in range (self.output_n):
Change = output_deltas[o] * Self.hidden_cells[h]
Self.output_weights[h][o] + = learn * change + correct * self.output_correction[h][o]#????????????????
Self.output_correction[h][o] = Change
# Update Input weights
For I in Range (Self.input_n):
For h in range (Self.hidden_n):
Change = hidden_deltas[h] * Self.input_cells[i]
SELF.INPUT_WEIGHTS[I][H] + = learn * change + correct * self.input_correction[i][h]
SELF.INPUT_CORRECTION[I][H] = Change
# Get global Error
Error = 0.0
For O in range (len label):
Error + = 0.5 * (Label[o]-self.output_cells[o]) * * 2
return error
Def train (self, cases, labels, limit=10000, learn=0.05, correct=0.1):
For j in range (limit):
Error = 0.0
For I in range (len (cases)):
label = Labels[i]
case = Cases[i]
Error + = Self.back_propagate (case, label, learn, correct)
def test (self):
cases = [
[10.5,2,0.89],
[10.5,2,0.89],
[13.8,3,0.27],
[13.5,3,0.27],
]
labels = [[0.51], [0.51], [0.595], [0.596]]
Self.setup (3, 5, 1)
Self.train (cases, labels, 10000, 0.05, 0.1)
For case in cases:
Print (self.predict (case))
if __name__ = = ' __main__ ':
nn = Bpneuralnetwork ()
Nn.test ()
Experimental results:
[0.5095123779256603]
[0.5095123779256603]
[0.5952606219141522]
[0.5939697670509705]
Neural network for regression prediction of continuous variables (python)