C语言手工搭建神经网络

不使用任何关于AI的库,深入底层原理搭建的三层简易神经网络,有助于入门理解原理

#include <iostream>
using namespace std;

double* getSequence(int n) {//随机初始化数组
    double* p=(double*)malloc(sizeof(double) * n);
    double m = rand() % 10;
    for (int i = 0; i < n; i++) {
        p[i] = m / 100;
    }
    return p;
}

double** getTable(int m, int n) {//随机初始化矩阵
    double** table = (double**)malloc(sizeof(double*) * m);
    for (int i = 0; i < m; i++) {
        table[i] = getSequence(n);
    }
    return table;
}

double sigmoid(double x) {//激活函数
    return 1 / (1 + exp(-x));
}

double sigmoid_grad(double x) {
    double temp = sigmoid(x);
    return temp * (1 - temp);
}

typedef struct Neural_NetWork {//神经网络定义
    double* x; double* delta0;
    double* hide; double* active1; double* delta1;
    double* y; double* active2; double* delta2;
    double** W1; 
    double** W2;
    int input_dim;
    int hidden_dim;
    int output_dim;
}Net;

void NetInit(Net* N, int in, int h, int out) {//神经网络初始化
    N->input_dim = in;
    N->hidden_dim = h;
    N->output_dim = out;
    N->x = getSequence(in); N->delta0 = getSequence(in);
    N->hide = getSequence(h); N->active1 = getSequence(h); N->delta1 = getSequence(h);
    N->y = getSequence(out); N->active2 = getSequence(out); N->delta2 = getSequence(out);
    N->W1 = getTable(in, h); 
    N->W2 = getTable(h, out); 
}

double* MTV(double** M,double* V,int dimv,int dimc) {//矩阵的转置乘以向量
    double* out = getSequence(dimc);
    double temp=0;
    for (int j = 0; j < dimc; j++) {
        temp = 0;
        for (int i = 0; i < dimv; i++) {
            temp += M[i][j] * V[i];
        }
        out[j] = temp;
    }
    return out;
}

double* MV(double** M, double* V, int dimv, int dimc) {//矩阵乘以向量
    double* out = getSequence(dimc);
    double temp = 0;
    for (int i = 0; i < dimc; i++) {
        temp = 0;
        for (int j = 0; j < dimv; j++) {
            temp += M[i][j] * V[j];
        }
        out[i] = temp;
    }
    return out;
}

void forward(Net* N, double* input) {//计算输出
    N->x = input;
    N->hide = MTV(N->W1, N->x, N->input_dim, N->hidden_dim);
    for (int i = 0; i < N->hidden_dim; i++) {
        N->active1[i] = sigmoid(N->hide[i]);//激活函数
    }
    N->y = MTV(N->W2, N->active1, N->hidden_dim, N->output_dim);
    for (int i = 0; i < N->output_dim; i++) {
        N->active2[i] = sigmoid(N->y[i]);//激活函数
    }
}

void backward(Net* N, double* input, double* label,double learning_rate) {
    forward(N, input);
    //先传播误差
    for (int i = 0; i < N->output_dim; i++) {
        N->delta2[i] = label[i] - N->active2[i];
    }
    N->delta1 = MV(N->W2, N->delta2, N->output_dim, N->hidden_dim);
    N->delta0 = MV(N->W1, N->delta1, N->hidden_dim, N->input_dim);
    //再更新权重
    for (int i = 0; i < N->input_dim; i++) {
        for (int j = 0; j < N->hidden_dim; j++) {
            N->W1[i][j] = N->W1[i][j] + learning_rate * N->delta1[j]*N->x[i]*sigmoid_grad(N->hide[j]);
            //+=n*delta*xi*(df/de)
        }
    }
    for (int i = 0; i < N->hidden_dim; i++) {
        for (int j = 0; j < N->output_dim; j++) {
            N->W2[i][j] = N->W2[i][j] + learning_rate * N->delta2[j] * N->active1[i] * sigmoid_grad(N->y[j]);
        }
    }
}

int main()
{
    Net NeuralNet; Net* nn = &NeuralNet;
    NetInit(nn, 5, 3, 1);
    //查看初始权值
    for (int i = 0; i < 5; i++) {
        for (int j = 0; j < 3; j++) {
            cout << nn->W1[i][j];
        }
    }
    cout << '\n';
    for (int i = 0; i < 3; i++) {
        for (int j = 0; j < 1; j++) {
            cout << nn->W2[i][j];
        }
    }
    cout << '\n';
    //查看初始权值下的分类效果
    double x[5] = { 1,2,3,4,5 };
    double label[1] = { 0 };
    forward(nn, x);
    cout << nn->active2[0] << '\n';
    for(int i=0;i<30;i++)backward(nn, x,label,1);//多次训练
    //测试权值,发现成功变更
    for (int i = 0; i < 5; i++) {
        for (int j = 0; j < 3; j++) {
            cout << nn->W1[i][j];
        }
    }
    cout << '\n';
    //查看训练的成效,显示训练后的结果更接近预期,训练有效。
    forward(nn, x);
    cout << nn->active2[0] << '\n';
}

  • 7
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值