modernC++手撸任意层神经网络22前向传播反向传播&梯度下降等23代码补全的例子0901b

这篇博客展示了如何使用C++手动实现一个神经网络,包括前向传播过程,并要求补充反向传播和梯度下降的代码。提供了已有的网络结构数据{2, 8, 4, 1},并给出输入样本{{1, 0}, {1, 1}}及其对应的输出{1, 0},期望通过这些内容完成神经网络的训练部分。" 112891518,10536890,DB2 日期相减操作详解,"['数据库操作', 'DB2', '日期处理', '编程语言', '框架应用']
摘要由CSDN通过智能技术生成

以下神经网络代码,请添加输入:{{1,0},{1,1}},输出{1,0};添加反向传播,梯度下降等训练!

以下神经网络代码,请添加输入:{{1,0},{1,1}},输出{1,0};添加反向传播,梯度下降等训练!
#include <iostream>
#include<vector>
#include<Eigen/Dense>
#include<random>
#include<fstream>

using namespace std;

Eigen::MatrixXd forwardPropagation(const std::vector<Eigen::MatrixXd>& weights, const Eigen::VectorXd& input) {
    Eigen::MatrixXd output = input;
    for (const auto& w : weights) {
        output.conservativeResize(output.rows(), 1);  // Make sure it's a column vector
        output = w * output;
        output = output.unaryExpr([](double x) { return 1.0 / (1.0 + std::exp(-x)); }); // Activation function (sigmoid)
    }
    return output;
}//Eigen::MatrixXd forwardPropagation(

int main()
{

    // Read network architecture from file
    std::vector<int> layers = readLayers("\\s.txt");

    // Initialize weights randomly
    std::default_random_engine generator;
    std::normal_distribution<double> distribution(0.0, 1.0);
    std::vector<Eigen::MatrixXd> weights;

    for (size_t i = 0; i < layers.size() - 1; ++i) {//for110i
        Eigen::MatrixXd w(layers[i + 1], layers[i]);
        w = w.unaryExpr([&](double x) { return distribution(generator); });
        weights.push_back(w);
    }//for110i

    // Initialize input (example)
    Eigen::VectorXd input(layers[0]);
    input << 0.5, 0.6;

    // Perform forward propagation
    Eigen::MatrixXd output = forwardPropagation(weights, input);

    std::cout << "Output:\n" << output << endl;

    std::cout << "Hello World!\n";
}//main(

硬盘 本盘,根目录的:

s1.txt文本文件中的内容:

{2,8,4,1}

// modernC++手撸任意层神经网络22前向传播&反向传播等神经网络230901b.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
#include <iostream>
#include<vector>
#include<Eigen/Dense>
#include<random>
#include<fstream>     //ifstream file1(fileName);//getline(file1, line01);

using namespace std;

vector<int> readLayers(const string& fileName) {
    ifstream file1(fileName);
    string line01;
    vector<int> layers1;

    getline(file1, line01);
    //---------------------------------
    //找大括号位置
    size_t start_pos = line01.find('{');
    size_t end_pos = line01.find('}');

    //确保找到大括号
    if (string::npos == start_pos || string::npos == end_pos || start_pos >= end_pos) {
        cout << "没有大括号!" << endl;
    }//if110

    // 截取大括号之间的内容
    string content = line01.substr(start_pos + 1, end_pos - start_pos - 1);
//    cout << "Line55:" << content << endl;

    // 使用istringstream和getline进行分割
    std::istringstream ss(content);
    std::string token;
    while (std::getline(ss, token, ',')) {
        // 使用stoi将字符串转换为整数,并添加到结果vector中
        layers1.push_back(std::stoi(token));
        cout << std::stoi(token) << std::endl;
    }

    return layers1;

}//vector<int> readLayers(

Eigen::MatrixXd forwardPropagation(const std::vector<Eigen::MatrixXd>& weights, const Eigen::VectorXd& input) {
    Eigen::MatrixXd output = input;
    for (const auto& w : weights) {
        output.conservativeResize(output.rows(), 1);  // Make sure it's a column vector
        output = w * output;
        output = output.unaryExpr([](double x) { return 1.0 / (1.0 + std::exp(-x)); }); // Activation function (sigmoid)
    }
    return output;
}//Eigen::MatrixXd forwardPropagation(

//重载
Eigen::MatrixXd forwardPropagation(const std::vector<Eigen::MatrixXd>& weights, const Eigen::VectorXd& input, std::vector<Eigen::MatrixXd>& activations) {
    Eigen::MatrixXd output = input;
    activations.push_back(output);
    for (const auto& w : weights) {
        output.conservativeResize(output.rows(), 1);  // Make sure it's a column vector
        output = w * output;
        output = output.unaryExpr([](double x) { return 1.0 / (1.0 + std::exp(-x)); }); // Activation function (sigmoid)
        activations.push_back(output);
    }
    return output;
}

Eigen::MatrixXd sigmoidDerivative(const Eigen::MatrixXd& x) {
    return x.unaryExpr([](double y) { return y * (1.0 - y); });
}

void backwardPropagation(const Eigen::MatrixXd& predicted, const Eigen::MatrixXd& target, const std::vector<Eigen::MatrixXd>& activations, const std::vector<Eigen::MatrixXd>& weights, std::vector<Eigen::MatrixXd>& gradients) {
    Eigen::MatrixXd delta = (predicted - target).cwiseProduct(sigmoidDerivative(activations.back()));
    gradients.push_back(delta * activations[activations.size() - 2].transpose());

    for (int i = weights.size() - 2; i >= 0; --i) {
        delta = (weights[i + 1].transpose() * delta).cwiseProduct(sigmoidDerivative(activations[i + 1]));
        gradients.push_back(delta * activations[i].transpose());
    }
    std::reverse(gradients.begin(), gradients.end());
}//void backwardPropagation(


void Loss_backwardPropagation(const Eigen::MatrixXd& predicted, const Eigen::MatrixXd& target, const std::vector<Eigen::MatrixXd>& activations, const std::vector<Eigen::MatrixXd>& weights, std::vector<Eigen::MatrixXd>& gradients) {
    Eigen::MatrixXd delta = (predicted - target);
    double sum_of_squares = delta.array().square().sum();
    std::cout << "Sum of squares of all elements: " << sum_of_squares << std::endl;

}//void backwardPropagation(


int main()
{

    // Read network architecture from file
    std::vector<int> layers = readLayers("\\s1.txt");

    // Initialize weights randomly
    std::default_random_engine generator;
    std::normal_distribution<double> distribution(0.0, 1.0);
    std::vector<Eigen::MatrixXd> weights;

    for (size_t i = 0; i < layers.size() - 1; ++i) {//for110i
        Eigen::MatrixXd w(layers[i + 1], layers[i]);
        w = w.unaryExpr([&](double x) { return distribution(generator); });
        weights.push_back(w);
    }//for110i

    // Initialize input (example)
    Eigen::VectorXd input(layers[0]);
    input << 1, 0;// 0.5, 0.6;

    // Perform forward propagation
    Eigen::MatrixXd output = forwardPropagation(weights, input);

    std::cout << "Output:\n" << output << endl;

    std::cout << "Hello World!\n";

    //----------------------------------------------------
    // Training data
    vector<Eigen::MatrixXd> inputs = {
        (Eigen::VectorXd(2) << 1, 0).finished(),
        (Eigen::VectorXd(2) << 0,0).finished(),
        (Eigen::VectorXd(2) << 0,1).finished(),
        (Eigen::VectorXd(2) << 1, 1).finished(),
    };

    vector<Eigen::MatrixXd> targets = {
        (Eigen::VectorXd(1) << 1).finished(),
        (Eigen::VectorXd(1) << 0).finished(),
        (Eigen::VectorXd(1) << 1).finished(),
        (Eigen::VectorXd(1) << 0).finished()
    };

    double learningRate = 0.01;
    int epochs = 50000;

    for (int epoch = 0; epoch < epochs; ++epoch) {
        for (size_t i = 0; i < inputs.size(); ++i) {
            std::vector<Eigen::MatrixXd> activations;
            Eigen::MatrixXd output = forwardPropagation(weights, inputs[i], activations);

            std::vector<Eigen::MatrixXd> gradients;
            backwardPropagation(output, targets[i], activations, weights, gradients);

            for (size_t j = 0; j < weights.size(); ++j) {
                weights[j] -= learningRate * gradients[j];
            }//for3300j

            if (0 == epoch % 1000) {//if4400
                std::vector<Eigen::MatrixXd> activations;
                std::vector<Eigen::MatrixXd> gradients;
                Loss_backwardPropagation(output, targets[i], activations, weights, gradients);
            }//if4400

        }//for2200i
        
    }//for110epoch

    // Testing trained model
    for (size_t i = 0; i < inputs.size(); ++i) {
        std::vector<Eigen::MatrixXd> activations;
        Eigen::MatrixXd output = forwardPropagation(weights, inputs[i], activations);
        std::cout << "Input:\n" << inputs[i] << "\nOutput:\n" << output << "\n";
    }

    return 0;

    //=======================================================
}//main(

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值