现代c++手撸2309神经网络最简化版230901

本文展示了如何使用C++从头构建一个简单的神经网络,包括前向传播和基于梯度下降的反向传播。代码中创建了一个拥有2个输入节点、4个隐藏节点和1个输出节点的网络,并进行训练和测试。主要问题在于可能存在数组越界风险,需修正`sigmoid_derivative`函数的计算。
摘要由CSDN通过智能技术生成

用c++输入数据:vector<vector<float>> inputs = { {1, 1}, {1, 0} };数据targets={0,1}; 测试:vector<vector<float>> inputs22 = { {1, 0}, {1,1} }; 构建神经网络,例如:NeuralNetwork nn({ 2, 4, 1 }); 则网络有四层、输入层2个节点、输出层1个节点、隐藏层4个节点、网络有梯度下降、反向传播…等。

以下是一个使用C++构建的基本神经网络示例,包含前向传播和基于梯度下降的反向传播。

在这个例子中:

  1. 我们创建了一个神经网络,具有输入层2个节点,隐藏层4个节点和输出层1个节点。
  2. 我们使用了两个输入向量({1, 1}{1, 0})进行训练,目标输出分别是01
  3. 我们进行了10000次训练迭代。
  4. 训练完成后,我们使用另外两个输入向量({1, 0}{1, 1})来测试网络的性能。

你可以根据需要调整神经网络的参数,比如学习率和训练迭代次数,以获得更好的性能。

第二(改进)版:

#include <iostream>
#include <vector>

using namespace std;

bool  whetherOutputFlag = 0;
float sigmoid(float x) {
    return 1 / (1 + exp(-x));
}

float sigmoid_derivative(float x) {
    float s = sigmoid(x);
    return s * (1 - s);
}

class NeuralNetwork {
private:
    std::vector<std::vector<std::vector<float>>> weights;
    std::vector<std::vector<float>> layer_outputs;

public:
    NeuralNetwork(std::vector<int> topology) {
        std::srand(std::time(0));
        for (int i = 0; i < topology.size() - 1; i++) {
            weights.push_back(std::vector<std::vector<float>>(topology[i], std::vector<float>(topology[i + 1])));
            for (int j = 0; j < topology[i]; j++) {
                for (int k = 0; k < topology[i + 1]; k++) {
                    weights[i][j][k] = (std::rand() % 2000 - 1000) / 1000.0f;
                }
            }
        }
    }

    std::vector<float> feedforward(std::vector<float> input) {
        layer_outputs.clear();
        layer_outputs.push_back(input);
        for (int i = 0; i < weights.size(); i++) {
            std::vector<float> output(weights[i][0].size(), 0);
            for (int j = 0; j < weights[i][0].size(); j++) {
                for (int k = 0; k < input.size(); k++) {
                    output[j] += input[k] * weights[i][k][j];
                }
                output[j] = sigmoid(output[j]);
            }
            layer_outputs.push_back(output);
            input = output;
        }
        return input;
    }//feedforward


    void train(std::vector<float> input, std::vector<float> targets, float learning_rate , bool  whetherOutputFlag) {

        //--------------------------------------------- 前向传播--输出损失
        
        std::vector<float> outputs = feedforward(input);
        // 初始化误差
        std::vector<float> errors(outputs.size(), 0);
        float sumA=0.0;
        for (int i = 0; i < targets.size(); i++) {
            errors[i] = (targets[i] - outputs[i]);
            if (true == whetherOutputFlag) {//if220
                sumA += fabs((errors[i] ));
                cout << "i"<<i<<"]";
            }//if220
            
        }//for110i
        if(  true==whetherOutputFlag) std::cout << sumA;//根据标志位,决定是否输出损失
        //=============================================

        // 反向传播
        for (int i = weights.size() - 1; i >= 0; i--) {
            // 更新权重
            for (int j = 0; j < weights[i].size(); j++) {
                for (int k = 0; k < weights[i][j].size(); k++) {
                    float derivative = sigmoid_derivative(layer_outputs[i + 1][k]);
                    weights[i][j][k] += learning_rate * errors[k] * derivative * layer_outputs[i][j];
                }
            }

            // 计算下一层的误差
            std::vector<float> next_errors(weights[i].size(), 0);
            for (int j = 0; j < weights[i].size(); j++) {
                for (int k = 0; k < weights[i][j].size(); k++) {
                    next_errors[j] += errors[k] * weights[i][j][k];
                }
            }
            errors = next_errors;
        }
    }//void train(

};//

int main() {
    NeuralNetwork nn({ 2, 8,6, 1 });
    std::vector<std::vector<float>> inputs = { {1,0}, {1, 1}, {0,1},{0,0}  };
    std::vector<float> targets = { 1,0,1,0 };

    for (int i = 0; i < 50000; i++) {
        for (size_t j = 0; j < inputs.size(); j++) {
            nn.train(inputs[j], { targets[j] }, 0.05, false);//false 不输出 0.1);// 0.05);// 0.1);
            if (0 == (i % 10000)) {
                nn.train(inputs[j], { targets[j] }, 0.001 , true); //true 输出
                std::cout << "] ";
                std::cout << std::endl;
            }

        }
        if(0==(i%10000)) std::cout << std::endl;
    }//for110i

    std::vector<std::vector<float>> inputs22 = { {0,1},{0,0}, {1, 0}, {1, 1} };
    for (size_t j = 0; j < inputs22.size(); j++) {
        std::vector<float> output = nn.feedforward(inputs22[j]);
        std::cout << "Output for [" << inputs22[j][0] << ", " << inputs22[j][1] << "]: " << output[0] << std::endl;
    }

    return 0;
}//main

第一版:


#include <iostream>
#include <vector>

using namespace std;

bool  whetherOutputFlag = 0;
float sigmoid(float x) {
    return 1 / (1 + exp(-x));
}

float sigmoid_derivative(float x) {
    float s = sigmoid(x);
    return s * (1 - s);
}

class NeuralNetwork {
private:
    std::vector<std::vector<std::vector<float>>> weights;
    std::vector<std::vector<float>> layer_outputs;

public:
    NeuralNetwork(std::vector<int> topology) {
        std::srand(std::time(0));
        for (int i = 0; i < topology.size() - 1; i++) {
            weights.push_back(std::vector<std::vector<float>>(topology[i], std::vector<float>(topology[i + 1])));
            for (int j = 0; j < topology[i]; j++) {
                for (int k = 0; k < topology[i + 1]; k++) {
                    weights[i][j][k] = (std::rand() % 2000 - 1000) / 1000.0f;
                }
            }
        }
    }

    std::vector<float> feedforward(std::vector<float> input) {
        layer_outputs.clear();
        layer_outputs.push_back(input);
        for (int i = 0; i < weights.size(); i++) {
            std::vector<float> output(weights[i][0].size(), 0);
            for (int j = 0; j < weights[i][0].size(); j++) {
                for (int k = 0; k < input.size(); k++) {
                    output[j] += input[k] * weights[i][k][j];
                }
                output[j] = sigmoid(output[j]);
            }
            layer_outputs.push_back(output);
            input = output;
        }
        return input;
    }//feedforward


    void train(std::vector<float> input, std::vector<float> targets, float learning_rate , bool  whetherOutputFlag) {

        //--------------------------------------------- 前向传播--不输出损失
        std::vector<float> outputs = feedforward(input);
        // 初始化误差
        std::vector<float> errors(outputs.size(), 0);
        float sumA=0.0;
        for (int i = 0; i < targets.size(); i++) {
            errors[i] = (targets[i] - outputs[i]);
            if (true == whetherOutputFlag) {//if220
                sumA += fabs((targets[i] - outputs[i]));
                cout << "i"<<i<<"]";
            }//if220
            
        }//for110i
        if( true==whetherOutputFlag) std::cout << sumA;
        //=============================================

        // 反向传播
        for (int i = weights.size() - 1; i >= 0; i--) {
            // 更新权重
            for (int j = 0; j < weights[i].size(); j++) {
                for (int k = 0; k < weights[i][j].size(); k++) {
                    float derivative = sigmoid_derivative(layer_outputs[i + 1][k]);
                    weights[i][j][k] += learning_rate * errors[k] * derivative * layer_outputs[i][j];
                }
            }

            // 计算下一层的误差
            std::vector<float> next_errors(weights[i].size(), 0);
            for (int j = 0; j < weights[i].size(); j++) {
                for (int k = 0; k < weights[i][j].size(); k++) {
                    next_errors[j] += errors[k] * weights[i][j][k];
                }
            }
            errors = next_errors;
        }
    }//void train(

};//

int main() {
    NeuralNetwork nn({ 2, 4, 1 });
    std::vector<std::vector<float>> inputs = { {1,0}, {1, 1}, {0,1},{0,0}  };
    std::vector<float> targets = { 1,0,1,0 };

    for (int i = 0; i < 50000; i++) {
        for (size_t j = 0; j < inputs.size(); j++) {
            nn.train(inputs[j], { targets[j] }, 0.05, false);//false 不输出 0.1);// 0.05);// 0.1);
            if (0 == (i % 10000)) {
                nn.train(inputs[j], { targets[j] }, 0.001 , true); //true 输出
                std::cout << "] ";
                std::cout << std::endl;
            }

        }
        if(0==(i%10000)) std::cout << std::endl;
    }//for110i

    std::vector<std::vector<float>> inputs22 = { {0,1},{0,0}, {1, 0}, {1, 1} };
    for (size_t j = 0; j < inputs22.size(); j++) {
        std::vector<float> output = nn.feedforward(inputs22[j]);
        std::cout << "Output for [" << inputs22[j][0] << ", " << inputs22[j][1] << "]: " << output[0] << std::endl;
    }

    return 0;
}//main

下面代码出现:vector subscript out of range: float sigmoid(float x) { return 1 / (1 + exp(-x)); } float sigmoid_derivative(float x) { float s = sigmoid(x); return s * (1 - s); } class NeuralNetwork { private: std::vector<std::vector<std::vector<float>>> weights; std::vector<std::vector<float>> layer_outputs; public: NeuralNetwork(std::vector<int> topology) { std::srand(std::time(0)); for (int i = 0; i < topology.size() - 1; i++) { weights.push_back(std::vector<std::vector<float>>(topology[i], std::vector<float>(topology[i + 1]))); for (int j = 0; j < topology[i]; j++) { for (int k = 0; k < topology[i + 1]; k++) { weights[i][j][k] = (std::rand() % 2000 - 1000) / 1000.0f; } } } } std::vector<float> feedforward(std::vector<float> input) { layer_outputs.clear(); layer_outputs.push_back(input); for (int i = 0; i < weights.size(); i++) { std::vector<float> output(weights[i][0].size(), 0); for (int j = 0; j < weights[i][0].size(); j++) { for (int k = 0; k < input.size(); k++) { output[j] += input[k] * weights[i][k][j]; } output[j] = sigmoid(output[j]); } layer_outputs.push_back(output); input = output; } return input; } void train(std::vector<float> input, std::vector<float> targets, float learning_rate) { std::vector<float> outputs = feedforward(input); // 反向传播 for (int i = weights.size() - 1; i >= 0; i--) { std::vector<float> errors(targets.size(), 0); for (int j = 0; j < targets.size(); j++) { errors[j] = targets[j] - outputs[j]; } if (i != weights.size() - 1) { std::vector<float> prev_errors = errors; errors.clear(); errors.resize(weights[i + 1].size(), 0); for (int j = 0; j < weights[i + 1].size(); j++) { for (int k = 0; k < weights[i + 1][j].size(); k++) { errors[j] += prev_errors[k] * weights[i + 1][j][k]; } } } for (int j = 0; j < weights[i].size(); j++) { for (int k = 0; k < weights[i][j].size(); k++) { float derivative = sigmoid_derivative(layer_outputs[i + 1][k]); weights[i][j][k] += learning_rate * errors[k] * derivative * layer_outputs[i][j]; } } targets = layer_outputs[i]; } } }; int main() { NeuralNetwork nn({ 2, 4, 1 }); std::vector<std::vector<float>> inputs = { {1, 1}, {1, 0} }; std::vector<float> targets = { 0, 1 }; for (int i = 0; i < 10000; i++) { for (size_t j = 0; j < inputs.size(); j++) { nn.train(inputs[j], { targets[j] }, 0.1); } } std::vector<std::vector<float>> inputs22 = { {1, 0}, {1, 1} }; for (size_t j = 0; j < inputs22.size(); j++) { std::vector<float> output = nn.feedforward(inputs22[j]); std::cout << "Output for [" << inputs22[j][0] << ", " << inputs22[j][1] << "]: " << output[0] << std::endl; } return 0; } 请修正错误!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值