BP algorithm-written in Java

Source: Internet
Author: User
Tags rewind

// BP neural network algorithm implementation </P> <p> # include <stdio. h> <br/> # include <math. h> <br/> # include <conio. h> <br/> # include <stdlib. h> </P> <p> # define NH 3/* input layer */<br/> # define Ni 4/* hidden layer */<br/> # define NJ 1 /* output layer */<br/> # define NK 100/* Sample size */<br/> # define test 70/* Test Set capacity */<br/> # define NR 0.7/* learning efficiency */<br/> # define EPS 0.00001 </P> <p> float X [NK] [NH], d [NK] [NJ], whi [NH] [Ni], wij [Ni] [NJ], Thi [Ni], THJ [NJ]; <br/> int H, I, J, K, ff; <br/> fl Oat xmin [NH], xmax [NH], dmin [NJ], dmax [NJ]; <br/> file * FP1, * fp2, * fp3, * fp4; </P> <p> void Init (void); <br/> void startleaning (void); <br/> void testsample (void ); <br/> void readw (void); <br/> void readt (void); <br/> void writew (void ); <br/> float sigmoid (float a); <br/> double ranu (void); <br/> char filename1 [] = {"samplefile.txt "}; </P> <p> void Init (void) <br/> {<br/> int min, Max; <br/> If (FP1 = 0) <br/>{< br/> System ("CLS"); <br/> printf ("can not find the learning sample file! /N "); <br/> exit (0); <br/>}< br/> for (k = 0; k <NK; k ++) <br/> {<br/> for (H = 0; H <nH; H ++) <br/> fscanf (FP1, "% F ,", & X [k] [H]); // neural network input <br/> for (j = 0; j <NJ; j ++) <br/> fscanf (FP1, "% F,", & D [k] [J]); // Neural Network output <br/>}< br/> for (H = 0; H <nH; H ++) <br/> {<br/> min = 1; <br/> max = 1; <br/> for (k = 0; k <NK; k ++) <br/> {<br/> If (X [k] [H] <X [Min] [H]) min = K; <br/> If (X [k] [H]> X [Max] [H]) max = K; <br/>}< br/> xmin [H] = x [Min] [H]; <br/> xmax [H] = x [Max] [H]; <Br/> for (k = 0; k <NK; k ++) /* neural network input normalization */<br/> X [k] [H] = (X [k] [H]-xmin [H]) /(xmax [H]-xmin [H]); <br/>/* X [k] [H] = x [k] [H]/xmax [H]; */<br/>}< br/> for (j = 0; j <NJ; j ++) <br/>{< br/> min = 1; max = 1; <br/> for (k = 0; k <NK; k ++) <br/> {<br/> If (d [k] [J] <D [Min] [J]) <br/> min = K; <br/> If (d [k] [J]> d [Max] [J]) <br/> max = K; <br/>}< br/> dmin [J] = d [Min] [J]; <br/> dmax [J] = d [Max] [J]; <br/> for (k = 0; k <NK; k ++) /* Neural Network output normalization */<br/> d [k] [J] = (d [k] [J]-DMI N [J])/(Dmax [J]-dmin [J]); <br/>/* d [k] [J] = d [k] [J]/dmax [J]; */<br/>}< br/>/* ---------------------------------------------------- */<br/> void startlearning (void) <br/>{< br/> long int nt, N; <br/> float T, error [NK], gerror, XJ [NJ], Xi [Ni], YJ [NJ], Yi [Ni], PXI [Ni], pxj [NJ]; <br/> float U0 = 0, u1 = 0, U2 = 0, u3 = 0; <br/> float v0, V1, V2, V3; </P> <p> for (I = 0; I <Ni; I ++) <br/> {<br/> for (H = 0; H <nH; H ++) <br/> whi [H] [I] =-0.8 + 1.6 * Ranu (); <br/> for (j = 0; j <NJ; j ++) <br/> wij [I] [J] =-0.8 + 1.6 * ranu (); </P> <p> Thi [I] =-0.5 + ranu (); <br/>}< br/> for (j = 0; j <NJ; j ++) <br/> THJ [J] =-0.5 + ranu (); <br/> fp2 = fopen ("cmdtxt", "W + "); /* initialization of weights and thresholds */</P> <p>/* Start of learning */<br/> printf ("/T/nplease enter the learning times: /n "); <br/> scanf (" % lD ", & NT); <br/> for (n = 0; n <NT; n ++) /* learning times */<br/> {<br/> gerror = 0; <br/> for (k = 0; k <NK; k ++) /* single sample loop */<br/> {<br/> for (I = 0; I <Ni; I ++) <br/> {<Br/> T = 0; <br/> for (H = 0; H <nH; H ++) <br/> T + = whi [H] [I] * X [k] [H]; <br/> Xi [I] = T + Thi [I]; <br/> Yi [I] = sigmoid (XI [I]);/* hidden layer output */<br/>}< br/> for (j = 0; j <NJ; j ++) <br/>{< br/> T = 0; <br/> for (I = 0; I <Ni; I ++) <br/> T + = wij [I] [J] * Yi [I]; <br/> XJ [J] = T + THJ [J]; <br/> YJ [J] = sigmoid (XJ [J]); <br/>}/ * output layer */</P> <p> for (j = 0; j <NJ; j ++) /* error change rate of single sample points in the output layer */<br/> pxj [J] = YJ [J] * (1-yj [J]) * (YJ [J]-d [k] [J]); <br/> for (I = 0; I <Ni; I ++) /* hidden layer single sample point error change rate * /<Br/> {<br/> T = 0; <br/> for (j = 0; j <NJ; j ++) <br/> T + = pxj [J] * wij [I] [J]; <br/> PXI [I] = Yi [I] * (1-yi [I]) * t; <br/>}< br/> for (j = 0; j <NJ; j ++) <br/> {<br/> // THJ [J] = THJ [J]-Nr * pxj [J]; <br/> V0 = THJ [J]; <br/> THJ [J] = THJ [J]-0.7 * Nr * pxj [J] + 0.3 * U0; <br/> // THJ [J] = THJ [J]-Nr * pxj [J]; <br/> U0 = THJ [J]-V0; <br/> for (I = 0; I <Ni; I ++) <br/> {<br/> V1 = wij [I] [J]; <br/> wij [I] [J] = wij [I] [J]-0.7 * Nr * pxj [J] * Yi [I] + 0.3 * U1; <br/> U1 = wij [I] [J]-V1; <Br/> // wij [I] [J] = wij [I] [J]-Nr * pxj [J] * Yi [I]; /* modify the weight from the hidden layer to the output layer, where NR is the step size */<br/>}< br/> for (I = 0; I <Ni; I ++) <br/>{< br/> v2 = Thi [I]; <br/> Thi [I] = Thi [I]-0.7 * Nr * PXI [I] + 0.3 * U2; <br/> u2 = Thi [I]-V2; <br/> // Thi [I] = Thi [I]-Nr * PXI [I]; <br/> for (H = 0; H <nH; h ++) <br/>{< br/> V3 = whi [H] [I]; <br/> whi [H] [I] = whi [H] [I]-0.7 * Nr * PXI [I] * X [k] [H] + 0.3 * U3; <br/> U3 = whi [H] [I]-V3; <br/> // whi [H] [I] = whi [H] [I]-Nr * PXI [I] * X [k] [H]; /* input layer Hidden Layer weight correction, where NR is the step */<br/>}< br/> T = 0; <br/> for (j = 0; j <NJ; j ++) <br/> T + = (YJ [J]-d [k] [J]) * (YJ [J]-d [k] [J])/2.0; <br/> error [k] = T; <br/> gerror + = Error [k];/* Global error g (lobal) error */<br/>}< br/>/* single sample cycle ends */</P> <p> If (gerror <EPS) <br/> break; <br/>}< br/>/* learning cycle ends */<br/> printf ("% F, % F ", Thi [0], THJ [0], wij [0] [0], wij [0] [1], whi [1] [0], whi [0] [1]); <br/> writew (); <br/> printf ("/T/nglobal error = % F/N", gerror ); <Br/> printf ("/T/Nare you satisfied with the global error? /N "); <br/> printf (" press any key to choose a next task! /N "); <br/> getch (); <br/>}< br/>/* ----------------------------------------------- */<br/> void testsample (void) <br/> {<br/> float TX [NH], T, XJ [NJ], Xi [Ni], YJ [NJ], Yi [Ni]; <br/> If (fp2 = 0) <br/>{< br/> // clrscr (); <br/> printf ("/T/ncan not find the weight file: cmdtxt/N"); <br/> exit (0 ); <br/>}< br/> readw (); </P> <p> for (FF = 0; FF <test; FF ++) /* test sample prediction and result storage */<br/> {<br/> for (H = 0; H <nH; H ++) <br/> fscanf (fp3, "% F ,",& TX [H]); <br/> for (H = 0; H <nH; H ++) <br/> TX [H] = (TX [H]-xmin [H])/(xmax [H]-xmin [H]); /* normalization process */<br/> for (I = 0; I <Ni; I ++) <br/> {<br/> T = 0; <br/> for (H = 0; H <nH; H ++) <br/> T + = whi [H] [I] * TX [H]; <br/> Xi [I] = T + Thi [I]; <br/> Yi [I] = sigmoid (XI [I]); <br/>}< br/> for (j = 0; j <NJ; j ++) <br/> {<br/> T = 0; <br/> for (I = 0; I <Ni; I ++) <br/> T + = wij [I] [J] * Yi [I]; <br/> XJ [J] = T + THJ [J]; <br/> YJ [J] = sigmoid (XJ [J]); <br/>}< br/> // printf ("/T/NNetwork Output:/N ");/* Forward prediction */<br/> for (j = 0; j <NJ; j ++) /* Save the result */<br/> {<br/> YJ [J] = YJ [J] * (Dmax [J]-dmin [J]) + dmin [J]; <br/> fprintf (fp4, "% F/N", YJ [J]); <br/>}< br/> // TX [nh-1] = YJ [0]; <br/> // rewind (fp4 ); <br/> // For (H = 0; H <nH; H ++)/* test sample input */<br/> // fscanf (fp3, "% F,", & TX [H]); <br/>}</P> <p> printf ("/T/Nare you satisfied with the output? /N "); <br/> printf (" press any key to choose a next task! /N "); <br/> getch (); <br/>}< br/>/* ---------------------------------------------- */<br/> void writew (void) <br/>{< br/> rewind (fp2); <br/> for (H = 0; H <nH; H ++) <br/> {<br/> for (I = 0; I <Ni; I ++) <br/> fprintf (fp2, "% 8.3f ", whi [H] [I]); <br/> fprintf (fp2, "/N"); <br/>}< br/> fprintf (fp2, "/N"); </P> <p> for (I = 0; I <Ni; I ++) <br/> fprintf (fp2, "% 8.3f ", thi [I]); <br/> fprintf (fp2, "/n"); </P> <p> for (j = 0; j <NJ; j ++) <br/>{< br/> for (I = 0; I <Ni; I ++) <br/> fprintf (fp2, "% 8.3f ", wij [I] [J]); <br/> fprintf (fp2, "/N"); <br/>}< br/> fprintf (fp2, "/N"); <br/> for (j = 0; j <NJ; j ++) <br/> fprintf (fp2, "% 8.3f ", THJ [J]); <br/>}< br/>/* ------------------------------------------------ */<br/> void readw (void) <br/> {<br/> for (H = 0; H <nH; H ++) <br/> for (I = 0; I <Ni; I ++) <br/> fscanf (fp2, "% F", & whi [H] [I]); <br/> for (I = 0; I <Ni; I ++) <br/> fscanf (fp2, "% F", & Thi [I]); <br/> for (j = 0; j <NJ; j ++) <br/> for (I = 0; I <Ni; I ++) <br/> fscanf (fp2, "% F ", & wij [I] [J]); <br/> for (j = 0; j <NJ; j ++) <br/> fscanf (fp2, "% F", & THJ [J]); <br/>}< br/>/* -------------------------------- */<br/> float sigmoid (float) <br/>{< br/> return (1.0/(1 + exp (-))); <br/>}< br/>/* ------------------------------ */<br/> double ranu (void) <br/>{< br/> static double xrand = 3.0; <br/> double M = 8589934592.0, A = 30517578125.0; <br/> LP: xrand = fmod (xrand * a, m ); /* remainder Division */<br/> If (xrand> 1.0) <br/> return (xrand/M ); <br/> else <br/> {<br/> xrand = 1.0; <br/> goto LP; <br/>}< br/>/* ---------------------------------- */</P> <p> void main () <br/>{< br/> FP1 = fopen ("sample.txt", "R"); <br/> fp2 = fopen ("cmdtxt", "R + "); <br/> fp3 = fopen ("test.txt", "R +"); <br/> fp4 = fopen ("testre.txt", "R + "); <br/> Init (); <br/> while (1) <br/> {<br/> system ("CLS "); <br/> printf ("/T/N please choose a next task... /n "); <br/> printf ("/T/N (s) to start learning. /n "); <br/> printf ("/T/N (t) to test samples. /n "); <br/> printf ("/T/N (r) to resume learning. /n "); <br/> printf ("/T/N (q) quit. /n "); <br/> switch (getchar () <br/>{< br/> case 's': startlearning (); break; <br/> case 'T': testsample (); break; <br/> case 'r': startlearning (); break; <br/> case 'q ': exit (0); break; <br/>}< br/> fclose (FP1); <br/> fclose (fp2 ); <br/> fclose (fp3); <br/> fclose (fp4); <br/> getch (); <br/>}< br/>/* ---------------- by ahzhming@163.com sea Wide Sky ------------------------- */<br/>

Related Article

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.