神经网络

实验过程中要用到神经网络,于是自己按照书上的算法写了一个,虽然能够跑异或问题,但是在大数据量上却没有办法运行。而且有一个问题,有些时候收敛的特别慢,查了两遍没有找出问题来。上网上找了一个别人的神经网络,发现还是不能用在大数据上(收敛特别慢);无奈又用了JOONE,收敛还是有问题。最后,只好调用weka了。下面把所有的4个神经网络都贴上来,算是个总结吧。第四个稍后补上。
如果谁发现我写的神经网络有问题,请指点一二,先谢过了。
javaeye无法添加检索关键字,很伤心。

1.自己写的神经网络。算法参考《神经网络原理》,author: simon Haykin。

/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/

/**
*
* @author KingSlayer
*/
public class InputLayer {

public InputLayer(int aInputD) {
System.out.println("input layer: " + aInputD);
inputD = aInputD;
input = new double[inputD];
output = new double[inputD];
}

public void setInput(double[] aInput) {
for (int i = 0; i < aInput.length; i++) {
input[i] = output[i] = aInput[i];
/*
output[i] = f_x(input[i]);
//*/
}
}
public double[] getOutput() {
return output;
}

private double f_x(double x) {
/* function f(x) */
return 1 / (1 + Math.exp(0-x));
}

private int inputD;
private double[] input;
private double[] output;
}



/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/

/**
*
* @author KingSlayer
*/
public class HiddenLayer {
public HiddenLayer(int aInputD, int aOutputD, double aEta){
System.out.println("hidden layer: " +aInputD+" "+aOutputD);
inputD = aInputD;
outputD = aOutputD;
weight = new double[inputD][outputD];
delta_weight = new double[inputD][outputD];
for(int i=0;i<inputD;i++){
for(int j=0;j<outputD;j++){
weight[i][j] = Math.random()-0.5;
}
}

weight_input = new double[outputD];
output = new double[outputD];
delta = new double[outputD];

eta = aEta;
}
public void forward(double aInput[]){
for(int i=0;i < outputD; i++){
//System.out.println(weight_input[0]);
weight_input[i] = 0;
for(int j=0; j<inputD; j++){
weight_input[i] += weight[j][i]*aInput[j];
}
output[i] = f_x(weight_input[i]);
}
}
public void backward(double[] delta_next, double[][] weight_next, double[] output_pre){
//output_pre是来自上一层结点的输入
for(int i = 0; i < outputD; i++){
double sum = 0;
for(int j=0; j< delta_next.length; j ++){
sum += delta_next[j]*weight_next[i][j];
}
delta[i] = output[i] * (1 - output[i]) * sum;
}
for(int i=0;i<inputD;i++){
for(int j=0;j<outputD;j++){
delta_weight[i][j] += eta * delta[j] * output_pre[i];
}
}
}
public void init_d_weight(){
for(int i=0;i<inputD;i++){
for(int j=0;j<outputD;j++){
delta_weight[i][j] = 0;
}
}
}
public void setWeight(int aTimes){
for(int i=0; i< inputD; i++){
for(int j=0; j< outputD; j++){
weight[i][j] += delta_weight[i][j]/aTimes;
//System.out.print("delta_weight = " +delta_weight[i][j]);
}
}
}
public double[] getDelta(){
return delta;
}
public double[] getOutput(){
return output;
}
private double f_x(double x) {
/* function f(x) */
return 1 / (1 + Math.exp(0-x));
}
private int inputD;
private int outputD; //前面一层数目inputD,本层数目outputD
private double[][] weight; //权值
private double[][] delta_weight;//将要改动的权值的累加和
private double[] weight_input; //加权之后的输入
private double[] output; //经过fx的输入
private double[] delta; //delta局域梯度

private final double eta;
}


/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/

/**
*
* @author KingSlayer
*/
public class OutputLayer {
public OutputLayer(int aInputD, int aOutputD, double aEta){
System.out.println("output layer: "+aInputD+" "+aOutputD);
inputD = aInputD;
outputD = aOutputD;

weight = new double[inputD][outputD];
delta_weight = new double[inputD][outputD];
for(int i=0;i<inputD;i++){
for(int j=0;j<outputD;j++){
weight[i][j] = Math.random()-0.5;
delta_weight[i][j] = 0;
}
}
weight_input = new double[outputD];
output = new double[outputD];
delta = new double[outputD];
expectation = new double[outputD];
error = new double[outputD];
eta = aEta;
}
public void setExpectation(double[] aExpectation){
if(aExpectation.length!= outputD){
System.out.println("error");
}
for(int i = 0; i < outputD; i++){
expectation[i] = aExpectation[i];
}
}
public void forward(double[] aInput){
for(int i=0;i < outputD; i++){
weight_input[i] = 0;
for(int j=0; j<inputD; j++){
weight_input[i] += weight[j][i]*aInput[j];
}
output[i] = f_x(weight_input[i]);
}
}
public void backward(double[] output_pre){
//output_pre是来自上一层结点的输入
for(int i=0; i<outputD; i++){
error[i] = expectation[i] - output[i];
delta[i] = output[i]*(1-output[i])*error[i];
}
for(int i=0;i<inputD;i++){
for(int j=0;j<outputD;j++){
delta_weight[i][j] += eta * delta[j] * output_pre[i];
}
}
}
public void init_d_weight(){
for(int i=0;i<inputD;i++){
for(int j=0;j<outputD;j++){
delta_weight[i][j] = 0;
}
}
}
public double[] getDelta(){
return delta;
}
public double[] getOutput(){
return output;
}
public double getError(){
double sum = 0;
for(int i= 0 ;i < error.length;i ++){
sum += error[i]*error[i];
}
return sum*0.5;
}
public double[][] getWeight(){
return weight;
}
public void setWeight(int aTimes){
for(int i=0; i< inputD; i++){
for(int j=0; j< outputD; j++){
weight[i][j] += delta_weight[i][j]/aTimes;
}
}
}
private double f_x(double x) {
/* function f(x) */
return 1 / (1 + Math.exp(0-x));
}
private int inputD;
private int outputD; //前面一层数目inputD,本层数目outputD
private double[][] weight; //权值
private double[][] delta_weight;
private double[] weight_input; //加权之后的输入
private double[] output; //经过fx的输入
private double[] delta; //delta局域梯度
private double[] expectation; //期望结果
private double[] error; //每次的误差

private final double eta;
}



import java.util.Scanner;

/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
/**
*
* @author KingSlayer
*/
public class MultilayerPerceptron {

/**
* @param args the command line arguments
*/
public MultilayerPerceptron(int[] dimension, double aEta) {
//dimension数组代表了各层的结点数目,第0代表input层,length-1代表output层
//dimension.length-2 即为隐层结点数目;
inputL = new InputLayer(dimension[0]);

hiddenL = new HiddenLayer[dimension.length - 2];
for (int i = 0; i < hiddenL.length; i++) {
hiddenL[i] = new HiddenLayer(dimension[i], dimension[i + 1], aEta);
}

outputL = new OutputLayer(dimension[dimension.length - 2], dimension[dimension.length - 1], aEta);
}

public void test(double[][] input, double[][] result) {
double sum;
for (int i = 0; i < input.length; i++) {
inputL.setInput(input[i]);
//前向
for (int j = 0; j < hiddenL.length; j++) {
//double[] outputBuf ;
if (j == 0) {
hiddenL[j].forward(inputL.getOutput());
} else {
hiddenL[j].forward(hiddenL[j - 1].getOutput());
}
}
outputL.forward(hiddenL[hiddenL.length - 1].getOutput());

System.out.println("第"+i+"个测试样本的结果:");
for(int j = 0 ; j< result[0].length; j++){
System.out.print(result[i][j]+" ");
}
System.out.println();
for(int j = 0 ; j< outputL.getOutput().length; j++){
System.out.print(outputL.getOutput()[j]+" ");
}
System.out.println();
}
}

public static void main(String[] args) {
// TODO code application logic here
int[] dimension = {2, 2, 1};
MultilayerPerceptron mlp = new MultilayerPerceptron(dimension, eta);
//Scanner in = new Scanner(System.in);

double[][] input = {{0, 0}, {1, 1}, {1, 0}, {0, 1}};
double[][] expectation = {{0}, {0}, {1}, {1}};
// double[][] input={{0,0,0},{5,1,4},{5,3,3},
// {5,5,2},{5,3,3},{5,3,2},
// {5,3,2},{5,5,1},{5,1,2},
// {5,3,3},{5,5,4},{5,5,2},
// {5,1,3},{5,3,4},{5,5,5},
// {5,1,4},{5,1,4},{5,3,5},
// {5,5,4},{5,1,3},{5,3,2},
// {1,3,1},{1,5,2},{1,1,3},
// {1,3,4},{1,5,5},{1,5,3},
// {1,1,4},{1,3,5},{1,5,4},
// {1,1,3},{1,1,5},{1,3,4},
// {1,5,3},{1,1,2},{1,3,1},
// {1,3,3},{1,5,2},{1,1,1},
// {1,3,2},{1,5,3}
// };
// double[][] expectation = {{0},{19.02},{14.150},
// {14.360},{14.150},{15.390},
// {15.390},{19.680},{21.060},
// {14.150},{12.680},{14.360},
// {19.610},{13.650},{12.430},
// {19.020},{19.020},{13.390},
// {12.680},{19.610},{15.390},
// {11.110},{6.521},{10.190},
// {6.043},{5.242},{5.724},
// {9.766},{5.870},{5.406},
// {10.190},{9.545},{6.043},
// {5.724},{11.250},{11.110},
// {6.380},{6.521},{16.000},
// {7.219},{5.724}};
int times = input.length;
System.out.println("一共" + times + "个样本");
do {
mlp.error = 0;
for(int i = 0 ; i< mlp.hiddenL.length; i++){
mlp.hiddenL[i].init_d_weight();
}
mlp.outputL.init_d_weight();
for (int i = 0; i < times; i++) {
mlp.inputL.setInput(input[i]);
mlp.outputL.setExpectation(expectation[i]);
/*前向和后向*/
//前向
for (int j = 0; j < mlp.hiddenL.length; j++) {
if (j == 0) {
mlp.hiddenL[j].forward(mlp.inputL.getOutput());
} else {
mlp.hiddenL[j].forward(mlp.hiddenL[j - 1].getOutput());
}
}
mlp.outputL.forward(mlp.hiddenL[mlp.hiddenL.length - 1].getOutput());

//后向
mlp.outputL.backward(mlp.hiddenL[mlp.hiddenL.length - 1].getOutput());
for (int j = mlp.hiddenL.length - 1; j >= 0; j--) {
if (mlp.hiddenL.length == 1) {
mlp.hiddenL[j].backward(mlp.outputL.getDelta(), mlp.outputL.getWeight(), mlp.inputL.getOutput());
} else {
System.out.println("隐层结点数目为:" + mlp.hiddenL.length + "目前算法只针对三层");
}
}
//计算累加的误差
mlp.error += mlp.outputL.getError();
}//批量样本循环一次
//权值调整
mlp.outputL.setWeight(times);
for (int i = 0; i < mlp.hiddenL.length; i++) {
mlp.hiddenL[i].setWeight(times);
}
System.out.println("现在误差为: "+Math.abs(mlp.error / times));
} while (Math.abs(mlp.error / times) > 0.01); //&&Math.abs(mlp.error / times)!=0.25
mlp.test(input, expectation);
}
private InputLayer inputL;
private HiddenLayer[] hiddenL;
private OutputLayer outputL;
private double error;
private static final double eta = 1;
}


这个例子是别人的:

/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/

/**
*
* @author KingSlayer
*/

import java.util.HashMap;
import java.util.Random;
import java.util.Set;
import java.util.Map.Entry;

/**
* JAVA 反向传输神经网络
* @author kj021320 , codeby 2008.12.10
*
*/
public class JavaBackPropagationNeuralNetwork {
/**
* 神经元
*/
public class Neuron {
HashMap<Integer, Link> target = new HashMap<Integer, Link>();// 连接其他神经元的
HashMap<Integer, Link> source = new HashMap<Integer, Link>();// 被其他神经元连接的
double data = 0.0;
public Link sourceGet(int index) {
return source.get(index);
}
public Link targetGet(int index) {
return target.get(index);
}
public boolean targetContains(Link l) {
return target.containsValue(l);
}
public boolean sourceContains(Link l) {
return source.containsValue(l);
}
public Link sourceLink(int index, Link l) {
if (l.linker != this) {
l.setLinker(this);
}
return source.put(index, l);
}
public Link targetLink(int index, Link l) {
if (l.owner != this) {
l.setOwner(this);
}
return target.put(index, l);
}
}

/**
* 神经链
*/
public class Link {
Neuron owner;
public void setOwner(Neuron o) {
owner = o;
if (!o.targetContains(this)) {
o.targetLink(o.target.size(), this);
}
}
public Link() {
weight = rand(-1, 1);
}
public void setLinker(Neuron o) {
linker = o;
if (!o.sourceContains(this)) {
o.sourceLink(o.source.size(), this);
}
}
@Override
public String toString(){
return super.toString()+" weight:"+weight;
}
Neuron linker;
double weight;
}

Random random = new Random();
{
random.setSeed(System.nanoTime());
}
Neuron[] inputnode; //输入层神经元
Neuron[] hiddennode; //隐含层神经元
Neuron[] outputnode; //输出层神经元
double learnrate;// 学习速度
double threshold;// 阀值,误差允许度

private final int inputCount;
private final int hiddenCount;
private final int outputCount;
/**
*
* @param input 输入层的神经元个数
* @param hidden 隐含层的神经元个数
* @param output 输出层的神经元的个数
*/
public JavaBackPropagationNeuralNetwork(int input, int hidden, int output) {
inputCount = input;
hiddenCount = hidden;
outputCount = output;
build();
}
public void reBuildNeuralNetwork(){
build();
}
private void build(){
inputnode = new Neuron[inputCount+1];
hiddennode = new Neuron[hiddenCount];
outputnode = new Neuron[outputCount];
initNeurons(inputnode);
initNeurons(hiddennode);
initNeurons(outputnode);
makeLink(inputnode, hiddennode);
makeLink(hiddennode, outputnode);
}
/**
* 思考方法
* @param inputs 前馈层神经个数相符的浮点数 -1~1之间
* @return 思考后的结果,个数与后端的神经个数相符,每个浮点为-1~1之间
*/
public double[] thinking(double[] inputs) {
/**把数据映射到前馈层的神经元里面*/
makeNeuron(inputnode, inputs);
/**通过每个神经链的权重 从隐藏层计算到最终输出层的值*/
thinking();
/**把输出层的值映为return的double数组*/
return makeMatrix();
}
public double[][] batchThinking(double[][] inputs){
double[][] ret = new double[inputs.length][];
for(int i = 0; i< inputs.length ; i++){
makeNeuron(inputnode, inputs[i]);
thinking();
ret[i]=makeMatrix();
}
return ret;
}
/**
* 总体训练
* @param inputs
* @param outputs
* @param learnrate 学习精细度
* @param error 容许误差
* @param maxlearn 最大学习次数
* @return 是否完成训练
*/
public boolean train(double[][] inputs, double[][] outputs, double learnrate,
double error,int maxlearn) {
this.learnrate = learnrate;
this.threshold = error;
boolean complete = false;
int count = 0;
double e =0;
while (!complete) {
count++;
e = 0;
complete = true;
for (int size = 0; size < inputs.length; size++) {
e += learn(inputs[size], outputs[size]);
if (e > threshold) {
complete = false;
}
}
if(count>=maxlearn){
System.err.println("convergence fail error:"+e);
return false;
}
}
System.out.println("convergence success error:"+e);
return true;
}

/**
* 单次学习
*
* @param input
* @param output
* @return 误差
*/
private double learn(double[] input, double[] output) {
/**把数据映射到前馈层的神经元里面*/
makeNeuron(inputnode, input);
/**通过每个神经链的权重 从隐藏层计算到最终输出层的值*/
thinking();
/**误差计算*/
return evolutionComputing(output);
}

private void thinking() {
transmitComputing(hiddennode);
transmitComputing(outputnode);
}

/**
* 神经元传输计算
*
* @param ns
*/
private void transmitComputing(Neuron[] ns) {
for (Neuron ne : ns) {
double sum = 0.0;
Set<Entry<Integer, Link>> linkset = ne.source.entrySet();
for (Entry<Integer, Link> ent : linkset) {
Link l = ent.getValue();
Neuron n = l.owner;
// 这里是重点,计算神经元*神经权重
sum += n.data * l.weight;
}
// 计算完毕后通过 S型激活函数把数据存储在隐藏层的神经节点上
ne.data = sigmoid(sum);
}
}
/**
* 最速梯度下降法 来计算 delta规则
* @param datas
* @return
*/
private double evolutionComputing(double[] datas) {
double[] output_deltaDatas = new double[outputnode.length];
double totalError = 0.0;
for (int i = 0; i < outputnode.length; i++) {
/**
* Erri = Ti – Oi O is the predicted output T is the correct output
* Δi = Erri * g’(ini) g’ is the derivative of the activation
* function g
*/
output_deltaDatas[i] = (datas[i] - outputnode[i].data)
* sigmoidDerivative(datas[i]);
}

double[] hidden_deltaDatas = new double[hiddennode.length];
for (int i = 0; i < hiddennode.length; i++) {
/**
* Δj = g’(inj) * Σi(Wj,i * Δi)
*/
double error = 0.0;
Set<Entry<Integer, Link>> linkSet = hiddennode[i].target.entrySet();
for (Entry<Integer, Link> ent : linkSet) {
error += output_deltaDatas[ent.getKey()]
* ent.getValue().weight;
}
hidden_deltaDatas[i] = sigmoidDerivative(hiddennode[i].data)
* error;
}
/**
* Wj,i = Wj,i + α * Hj * Δi Hj is the activation of the hidden unit
*/
for (int i = 0; i < hiddennode.length; i++) {
Set<Entry<Integer, Link>> linkSet = hiddennode[i].target.entrySet();
for (Entry<Integer, Link> ent : linkSet) {
Link hidden2output = ent.getValue();
hidden2output.weight += output_deltaDatas[ent.getKey()]
* hiddennode[ent.getKey()].data * learnrate;
//System.out.println("hidden2output:"+hidden2output);
}
}
//System.out.println();
/**
* Wk,j = Wk,j + α * Ik * Δj Ik is the activation of the input unit
*/
for (int i = 0; i < inputnode.length; i++) {
Set<Entry<Integer, Link>> linkSet = inputnode[i].target.entrySet();
for (Entry<Integer, Link> ent : linkSet) {
Link input2hidden = ent.getValue();
input2hidden.weight += hidden_deltaDatas[ent.getKey()]
* inputnode[i].data * learnrate;
//System.out.println("inputnode[i].data:"+inputnode[i].data+"input2hidden:"+input2hidden);
}
}
//System.out.println();
/**
* E = 1/2 Σi((Ti – Oi)^2)
*/
for (int i = 0; i < outputnode.length; i++) {
double temp = outputnode[i].data - datas[i];
totalError += temp * temp;
}
return totalError * 0.5;
}

/**
* 把数据映射到每个神经元里面
*
* @param neurons
* @param datas
*/
private void makeNeuron(Neuron[] neurons, double[] datas) {
for (int len = 0; len < neurons.length; len++) {
if(len >= datas.length){
neurons[len].data = 1.0;
}else{
neurons[len].data = datas[len];
}
}
}

/**
* 把output的神经元数据映射为矩阵
*
* @return
*/
private double[] makeMatrix() {
double[] temp = new double[outputnode.length];
for (int i = 0; i < outputnode.length; i++) {
temp[i] = outputnode[i].data;
}
return temp;
}

private void initNeurons(Neuron[] startns) {
for (int lenN = 0; lenN < startns.length; lenN++) {
if (startns[lenN] == null) {
startns[lenN] = new Neuron();
}
}
}

/**
* 这里是互相交叉连接
*
* @param startns
* @param endns
*/
private void makeLink(Neuron[] startns, Neuron[] endns) {
for (int lenN = 0; lenN < startns.length; lenN++) {
for (int len = 0; len < endns.length; len++) {
Link target = startns[lenN].targetGet(len);
if (target == null) {
target = new Link();
startns[lenN].targetLink(len, target);
}
target.setLinker(endns[len]);
}
}
}

/**
* 这里是S型激活函数.最终目的是把所有数据都2值化
*
* @param x
* @return
*/
private double sigmoid(double x) {
return Math.tanh(x);
}

/*
* calculate a random number where: a <= rand < b def rand(a, b): return
* (b-a)*random.random() + a
*/
private double rand(double min, double max) {
return (max - min) * random.nextDouble() + min;
}

// derivative of our sigmoid function
private double sigmoidDerivative(double y) {
// return (1.0 - sigmoid(y)) * sigmoid(y);
// return 1.0-y*y;
return 1.0 - sigmoid(y) * y;
}

/**
* @param args
* @throws Throwable
*/
public static void main(String[] args) throws Throwable {
//创建一个 反向传输神经网络
JavaBackPropagationNeuralNetwork jbpn = new JavaBackPropagationNeuralNetwork(2, 4, 1);

//训练XOR
while(!jbpn.train(
new double[][] { new double[] { -1, -1 },new double[] { 1, 1 }, new double[] { -1, 1 },new double[] { 1, -1 } },//这个为输入值
new double[][] { new double[] {-1},new double[] {-1},new double[] {1},new double[] {1} },//这个是监督指导结果
0.3, 0.05,1000)){
jbpn.reBuildNeuralNetwork();
}
//思考
double[] res = jbpn.thinking(new double[] { -1, -1 });
for(double s:res){
System.out.println("thinking:"+s);
}
//批量思考
double[][] ress = jbpn.batchThinking(new double[][] { new double[] { -0.8, -0.9 },new double[] { 0.7, 0.3 }, new double[] { -.6, .85 },new double[] { 1, -1 } });
for(double[] s:ress){
for(double d:s){
System.out.print("batchThinking:"+d+" ");
}
System.out.println();
}

}

}


参照joone里面的异或例子,自己加了如何保存训练模型,如何load训练模型

/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/

/**
*
* @author KingSlayer
*/
//public class NN_JOONE {
//
// /**
// * @param args the command line arguments
// */
// public static void main(String[] args) {
// // TODO code application logic here
//
// }
//
//}
/*
* JOONE - Java Object Oriented Neural Engine
* http://joone.sourceforge.net
*
* XOR_using_NeuralNet.java
*
*/
//package org.joone.samples.engine.xor;
//nnet=JooneTools.load(nnOutput)
//JOONE从存储在你的系统中的文本文件中取得输入。这些文本文件通过使用一种称为FileInputSynapse的非凡触角来读取
//下面是从文件中读取神经网络的代码:
//
//ObjectInputStream ois = new ObjectInputStream(new FileInputStream("D:/work/homework/ANN/final/3.snet"));
//Object o = ois.readObject();
//System.out.println("o is " + o);
//ois.close();
//NeuralNet net = (NeuralNet) o;
//FileInputSynapse
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import org.joone.engine.*;
import org.joone.engine.learning.*;
import org.joone.io.*;
import org.joone.net.*;
import java.util.Vector;

/**
* Sample class to demostrate the use of the MemoryInputSynapse
*
* @author Jos�?Rodriguez
*/
public class NN_JOONE implements NeuralNetListener, Serializable {

private NeuralNet nnet = null;
private MemoryInputSynapse inputSynapse, desiredOutputSynapse;
LinearLayer input;
SigmoidLayer hidden, output;
boolean singleThreadMode = true;
// XOR input
// private double[][] inputArray = new double[][]{
// {0.0, 0.0},
// {0.0, 1.0},
// {1.0, 0.0},
// {1.0, 1.0}
// };
// // XOR desired output
// private double[][] desiredOutputArray = new double[][]{
// {0.0},
// {1.0},
// {1.0},
// {0.0}
// };
double[][] inputA={{0,0,0},{5,1,4},{5,3,3},
{5,5,2},{5,3,3},{5,3,2},
{5,3,2},{5,5,1},{5,1,2},
{5,3,3},{5,5,4},{5,5,2},
{5,1,3},{5,3,4},{5,5,5},
{5,1,4},{5,1,4},{5,3,5},
{5,5,4},{5,1,3},{5,3,2},
{1,3,1},{1,5,2},{1,1,3},
{1,3,4},{1,5,5},{1,5,3},
{1,1,4},{1,3,5},{1,5,4},
{1,1,3},{1,1,5},{1,3,4},
{1,5,3},{1,1,2},{1,3,1},
{1,3,3},{1,5,2},{1,1,1},
{1,3,2},{1,5,3}
};
double[][] expectation = {{0},{19.02},{14.150},
{14.360},{14.150},{15.390},
{15.390},{19.680},{21.060},
{14.150},{12.680},{14.360},
{19.610},{13.650},{12.430},
{19.020},{19.020},{13.390},
{12.680},{19.610},{15.390},
{11.110},{6.521},{10.190},
{6.043},{5.242},{5.724},
{9.766},{5.870},{5.406},
{10.190},{9.545},{6.043},
{5.724},{11.250},{11.110},
{6.380},{6.521},{16.000},
{7.219},{5.724}};
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
NN_JOONE xor = new NN_JOONE();
xor.initNeuralNet();
xor.train();
// xor.initNeuralNet("model.snet");
// System.out.print("here");

xor.interrogate();//测试

}

/**
* Method declaration
*/
public void train() {

// set the inputs
inputSynapse.setInputArray(inputA);
inputSynapse.setAdvancedColumnSelector("1,2,3");
// set the desired outputs
desiredOutputSynapse.setInputArray(expectation);
desiredOutputSynapse.setAdvancedColumnSelector("1");

// get the monitor object to train or feed forward
Monitor monitor = nnet.getMonitor();

// set the monitor parameters
monitor.setLearningRate(0.8);
monitor.setMomentum(0.3);
monitor.setTrainingPatterns(inputA.length);
monitor.setTotCicles(500000);
monitor.setLearning(true);

long initms = System.currentTimeMillis();
// Run the network in single-thread, synchronized mode
nnet.getMonitor().setSingleThreadMode(singleThreadMode);
nnet.go(true);
System.out.println("Total time= " + (System.currentTimeMillis() - initms) + " ms");
saveNeuralNet("model.snet");
}

private void interrogate() {
// set the inputs
inputSynapse.setInputArray(inputA);
inputSynapse.setAdvancedColumnSelector("1,2,3");
Monitor monitor = nnet.getMonitor();
monitor.setTrainingPatterns(inputA.length);
monitor.setTotCicles(1);
monitor.setLearning(false);
FileOutputSynapse foutput = new FileOutputSynapse();
// set the output synapse to write the output of the net

foutput.setFileName("tmp/xorOut.txt");
if (nnet != null) {
nnet.addOutputSynapse(foutput);
System.out.println(nnet.check());
nnet.getMonitor().setSingleThreadMode(singleThreadMode);
nnet.go();
}
}

/**
* Method declaration
*/
public void initNeuralNet(String name){
NeuralNetLoader netLoader = new NeuralNetLoader(name);
nnet = netLoader.getNeuralNet();
Layer input = nnet.getInputLayer();
input.removeAllInputs();
inputSynapse = new MemoryInputSynapse();
input.addInputSynapse(inputSynapse);

// The Trainer and its desired output
// Layer output = nnet.getOutputLayer();
// desiredOutputSynapse = new MemoryInputSynapse();
// output.addOutputSynapse(desiredOutputSynapse);

}
protected void initNeuralNet() {

// First create the three layers
input = new LinearLayer();
hidden = new SigmoidLayer();
output = new SigmoidLayer();

// set the dimensions of the layers
input.setRows(2);
hidden.setRows(3);
output.setRows(1);

input.setLayerName("L.input");
hidden.setLayerName("L.hidden");
output.setLayerName("L.output");

// Now create the two Synapses
FullSynapse synapse_IH = new FullSynapse(); /* input -> hidden conn. */

FullSynapse synapse_HO = new FullSynapse(); /* hidden -> output conn. */

// Connect the input layer whit the hidden layer
input.addOutputSynapse(synapse_IH);
hidden.addInputSynapse(synapse_IH);

// Connect the hidden layer whit the output layer
hidden.addOutputSynapse(synapse_HO);
output.addInputSynapse(synapse_HO);

// the input to the neural net
inputSynapse = new MemoryInputSynapse();

input.addInputSynapse(inputSynapse);

// The Trainer and its desired output
desiredOutputSynapse = new MemoryInputSynapse();

TeachingSynapse trainer = new TeachingSynapse();

trainer.setDesired(desiredOutputSynapse);

// Now we add this structure to a NeuralNet object
nnet = new NeuralNet();

nnet.addLayer(input, NeuralNet.INPUT_LAYER);
nnet.addLayer(hidden, NeuralNet.HIDDEN_LAYER);
nnet.addLayer(output, NeuralNet.OUTPUT_LAYER);
nnet.setTeacher(trainer);
output.addOutputSynapse(trainer);
nnet.addNeuralNetListener(this);
}

public void saveNeuralNet(String fileName) {
try {
FileOutputStream stream = new FileOutputStream(fileName);
ObjectOutputStream out = new ObjectOutputStream(stream);
out.writeObject(nnet);
out.close();
} catch (Exception excp) {
excp.printStackTrace();
}
}
public void load(String filename){

}
public NeuralNet restoreNeuralNet(String filename) {
try {
FileInputStream stream = new FileInputStream(filename);
ObjectInputStream inp = new ObjectInputStream(stream);
return (NeuralNet) inp.readObject();
} catch (Exception excp) {
excp.printStackTrace();
return null;
}
}

public void cicleTerminated(NeuralNetEvent e) {
}

public void errorChanged(NeuralNetEvent e) {
Monitor mon = (Monitor) e.getSource();
if (mon.getCurrentCicle() % 100 == 0) {
System.out.println("Epoch: " + (mon.getTotCicles() - mon.getCurrentCicle()) + " RMSE:" + mon.getGlobalError());
}
}

public void netStarted(NeuralNetEvent e) {
Monitor mon = (Monitor) e.getSource();
System.out.print("Network started for ");
if (mon.isLearning()) {
System.out.println("training.");
} else {
System.out.println("interrogation.");
}
}

public void netStopped(NeuralNetEvent e) {
Monitor mon = (Monitor) e.getSource();
System.out.println("Network stopped. Last RMSE=" + mon.getGlobalError());
}

public void netStoppedError(NeuralNetEvent e, String error) {
System.out.println("Network stopped due the following error: " + error);
}
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值