神经算法小例子

神经算法通常指的是神经网络算法,它是一类受到人脑神经元结构启发的机器学习算法。

神经网络模拟人脑的工作方式,通过学习从输入到输出的映射关系,以解决复杂的问题。

神经网络包含输入层、隐藏层输出层,其中每个层都包含多个神经元,它们通过调整连接权重来进行学习。

以下是一个简单的用 Java 实现的神经网络示例。这个示例使用了一个简单的前馈神经网络(Feedforward Neural Network)来解决 XOR 问题。这个网络有一个输入层、一个隐藏层和一个输出层。

import org.apache.commons.math3.analysis.function.Sigmoid;
import org.apache.commons.math3.linear.Array2DRowRealMatrix;
import org.apache.commons.math3.linear.RealMatrix;

public class NeuralNetwork {

    private RealMatrix inputLayer;
    private RealMatrix hiddenLayerWeights;
    private RealMatrix hiddenLayerBiases;
    private RealMatrix outputLayerWeights;
    private RealMatrix outputLayerBiases;

    public NeuralNetwork() {
        initializeWeightsAndBiases();
    }

    private void initializeWeightsAndBiases() {
        // Initialize weights and biases (random initialization for simplicity)
        hiddenLayerWeights = new Array2DRowRealMatrix(new double[][]{{0.5, 0.2}, {-0.5, 0.3}});
        hiddenLayerBiases = new Array2DRowRealMatrix(new double[]{0.1, -0.1});

        outputLayerWeights = new Array2DRowRealMatrix(new double[]{0.3, -0.2});
        outputLayerBiases = new Array2DRowRealMatrix(new double[]{0.2});
    }

    public double predict(double input1, double input2) {
        // Forward pass
        inputLayer = new Array2DRowRealMatrix(new double[]{input1, input2});
        RealMatrix hiddenLayerInput = hiddenLayerWeights.multiply(inputLayer.transpose()).add(hiddenLayerBiases);
        RealMatrix hiddenLayerOutput = applyActivationFunction(hiddenLayerInput);

        RealMatrix outputLayerInput = outputLayerWeights.multiply(hiddenLayerOutput).add(outputLayerBiases);
        RealMatrix outputLayerOutput = applyActivationFunction(outputLayerInput);

        return outputLayerOutput.getEntry(0, 0);
    }

    private RealMatrix applyActivationFunction(RealMatrix matrix) {
        Sigmoid sigmoid = new Sigmoid();
        return matrix.copy().scalarAdd(1).operate(sigmoid);
    }

    public static void main(String[] args) {
        NeuralNetwork neuralNetwork = new NeuralNetwork();

        // Training XOR function
        double[][] trainingData = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
        double[] labels = {0, 1, 1, 0};

        for (int epoch = 0; epoch < 10000; epoch++) {
            for (int i = 0; i < trainingData.length; i++) {
                double input1 = trainingData[i][0];
                double input2 = trainingData[i][1];
                double label = labels[i];

                // Forward pass
                double prediction = neuralNetwork.predict(input1, input2);

                // Backpropagation
                double error = label - prediction;

                // Update weights and biases (gradient descent)
                neuralNetwork.hiddenLayerWeights = neuralNetwork.hiddenLayerWeights.add(
                        neuralNetwork.inputLayer.transpose().scalarMultiply(error)
                                .scalarMultiply(neuralNetwork.outputLayerWeights.getEntry(0, 0))
                                .scalarMultiply(applyDerivative(neuralNetwork.hiddenLayerWeights))
                );
                neuralNetwork.hiddenLayerBiases = neuralNetwork.hiddenLayerBiases.add(
                        neuralNetwork.outputLayerWeights.getEntry(0, 0) * error
                                * applyDerivative(neuralNetwork.hiddenLayerBiases)
                );

                neuralNetwork.outputLayerWeights = neuralNetwork.outputLayerWeights.add(
                        neuralNetwork.hiddenLayerOutput().transpose().scalarMultiply(error)
                                .scalarMultiply(applyDerivative(neuralNetwork.outputLayerWeights))
                );
                neuralNetwork.outputLayerBiases = neuralNetwork.outputLayerBiases.add(
                        error * applyDerivative(neuralNetwork.outputLayerBiases)
                );
            }
        }

        // Test the trained network
        System.out.println("Prediction for (0, 0): " + neuralNetwork.predict(0, 0));
        System.out.println("Prediction for (0, 1): " + neuralNetwork.predict(0, 1));
        System.out.println("Prediction for (1, 0): " + neuralNetwork.predict(1,

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值