传统bp算法类C/C++实现

#include "stdio.h"
#include "stdlib.h"

#include "math.h"
#include "time.h"
#include "memory.h"

#define BP_H
class BpNet
{
private:
    int _nInput;            //输入层节点个数
    int _nHide;             //隐含层节点个数
    int _nOutput;           //输出层节点个数

    double **_pplfWeight1;  //输入层-隐含层权系数
    double **_pplfWeight2;  //隐含层-输出层权系数

    double *_plfb1;            //隐含层结点的阈值
    double *_plfb2;            //输出层结点的阈值

    double *_plfHideIn, *_plfHideOut;       //隐含层的网络输入和输出
    double *_plfOutputIn, *_plfOutputOut;   //输出层的网络输入和输出

    double _a;  //学习率
    double _e;  //目标误差

private:

    static double sigmoid(double x);

    double(*f)(double);    //激活函数


    void Init();

public:

    BpNet(int nInput, int nHide, int nOutput);

    virtual ~BpNet();

    void Set(double a, double e);

    void GetBasicInformation(int &nInput, int &nHide, int &nOutput, double &lfA, double &lfE);

    bool Train(int n, double **pplfInput, double **pplfDesire);

    void Classify(double plfInput[], double plfOutput[]);
};

void showInfo(int n, int nInput, int nHide, int nOutput, double lfA, double lfE, double **ppInput, double **ppOutput)
{
    printf("输入层节点数:%d; 隐层节点数:%d; 输出层节点数:%d\n", nInput, nHide, nOutput);
    printf("学习因子:%lf; 最大允许误差:%lf\n", lfA, lfE);
    printf("学习样本为:\n");
    printf("输入:\n");
    for (int i = 0; i < n; i++)
    {
        for (int j = 0; j < nInput; j++) printf("%8.5lf", ppInput[i][j]);
        printf("\n");
    }
    printf("期望输出:\n");
    for (int i = 0; i < n; i++)
    {
        for (int j = 0; j < nOutput; j++) printf("%8.5lf", ppOutput[i][j]);
        printf("\n");
    }
}
BpNet::BpNet(int nInput, int nHide, int nOutput)
{
    _a = 0.2;
    _e = 0.01;

    _nInput = nInput;
    _nHide = nHide;
    _nOutput = nOutput;

    Init();

    srand((unsigned)time(NULL));
    rand();
    //初始化输入层-隐含层权系数
    for (int i = 0; i < _nInput; i++)
    for (int j = 0; j < _nHide; j++)
        _pplfWeight1[i][j] = (double)(rand() % 20000 - 10000) / 100000;// rand() / (double)(RAND_MAX);
    //初始化隐含层-输出层权系数
    for (int i = 0; i < _nHide; i++)
    for (int j = 0; j < _nOutput; j++)
        _pplfWeight2[i][j] = (double)(rand() % 20000 - 10000) / 100000;// rand() / (double)(RAND_MAX);
    //初始化隐含层的阈值
    for (int i = 0; i < _nHide; i++)
        _plfb1[i] = (double)(rand() % 20000 - 10000) / 100000;
    //初始化输出层的阈值
    for (int i = 0; i < _nOutput; i++)
        _plfb2[i] = (double)(rand() % 20000 - 10000) / 100000;
    //使用sigmoid激活函数
    f = sigmoid;
}

BpNet::~BpNet()
{
    delete[]_plfHideIn;
    delete[]_plfHideOut;
    delete[]_plfOutputIn;
    delete[]_plfOutputOut;
    delete[]_plfb1;
    delete[]_plfb2;

    for (int i = 0; i < _nInput; i++) delete[]_pplfWeight1[i];
    for (int i = 0; i < _nHide; i++) delete[]_pplfWeight2[i];
    delete[]_pplfWeight1;
    delete[]_pplfWeight2;
}

void BpNet::Init()
{
    _pplfWeight1 = new double *[_nInput];
    for (int i = 0; i < _nInput; i++) _pplfWeight1[i] = new double[_nHide];

    _pplfWeight2 = new double *[_nHide];
    for (int i = 0; i < _nHide; i++) _pplfWeight2[i] = new double[_nOutput];

    _plfb1 = new double[_nHide];
    _plfb2 = new double[_nOutput];


    _plfHideIn = new double[_nHide];
    _plfHideOut = new double[_nHide];
    _plfOutputIn = new double[_nOutput];
    _plfOutputOut = new double[_nOutput];
}

void BpNet::Set(double a, double e)
{
    _a = a;
    _e = e;
}

void BpNet::GetBasicInformation(int &nInput, int &nHide, int &nOutput, double &lfA, double &lfE)
{
    nInput = _nInput;
    nHide = _nHide;
    nOutput = _nOutput;
    lfA = _a;
    lfE = _e;
}

double BpNet::sigmoid(double x)
{
    return 1 / (1 + exp(-x));
}

void BpNet::Classify(double plfInput[], double plfOutput[])
{
    memset(_plfHideIn, 0, sizeof(double)* _nHide);
    memset(_plfHideOut, 0, sizeof(double)* _nHide);
    memset(_plfOutputIn, 0, sizeof(double)* _nOutput);
    memset(_plfOutputOut, 0, sizeof(double)* _nOutput);

    //输入层到隐含层的正向传播
    for (int i = 0; i < _nInput; i++)
    for (int j = 0; j < _nHide; j++)
        _plfHideIn[j] += plfInput[i] * _pplfWeight1[i][j];
    for (int j = 0; j < _nHide; j++) _plfHideOut[j] = (*f)(_plfHideIn[j] + _plfb1[j]);

    //隐含层到输出层的正向传播
    for (int j = 0; j < _nHide; j++)
    for (int k = 0; k < _nOutput; k++)
        _plfOutputIn[k] += _plfHideOut[j] * _pplfWeight2[j][k];
    for (int k = 0; k < _nOutput; k++) _plfOutputOut[k] = (*f)(_plfOutputIn[k] + _plfb2[k]);

    if (plfOutput != NULL)
        memcpy(plfOutput, _plfOutputOut, sizeof(double)* _nOutput);
}

bool BpNet::Train(int n, double **pplfInput, double **pplfDesire)
{
    double lfE = _e + 1;

    //输入层-隐含层权系数增量
    double **pplfDeltaWeight1 = new double *[_nInput];
    for (int i = 0; i < _nInput; i++)
    {
        pplfDeltaWeight1[i] = new double[_nHide];
        memset(pplfDeltaWeight1[i], 0, sizeof(double)* _nHide);
    }

    //隐含层-输出层权系数增量
    double **pplfDeltaWeight2 = new double *[_nHide];
    for (int i = 0; i < _nHide; i++)
    {
        pplfDeltaWeight2[i] = new double[_nOutput];
        memset(pplfDeltaWeight2[i], 0, sizeof(double)* _nOutput);
    }

    //隐含层的阈值增量
    double *plfDeltaBias1 = new double[_nHide];
    memset(plfDeltaBias1, 0, sizeof(double)*_nHide);

    //输出层的阈值增量
    double *plfDeltaBias2 = new double[_nOutput];
    memset(plfDeltaBias2, 0, sizeof(double)*_nOutput);


    long nCount = 0;
    while (lfE > _e)
    {
        lfE = 0;
        //对每一个样本进行处理
        for (int i = 0; i < n; i++)
        {
            double *plfInput = pplfInput[i];        //样本输入
            double *plfDesire = pplfDesire[i];      //样本期望输出

            //计算样本实际输出plfOutput
            Classify(plfInput, NULL);

            //计算误差测度
            double lfEp = 0;
            for (int j = 0; j < _nOutput; j++)
                lfEp += (plfDesire[j] - _plfOutputOut[j]) * (plfDesire[j] - _plfOutputOut[j]) / 2;
            lfE += lfEp;

            //计算隐含层-输出层权系数增量
            double *plfChange2 = new double[_nOutput];

            for (int j = 0; j < _nOutput; j++)
                plfChange2[j] = _plfOutputOut[j] * (1 - _plfOutputOut[j]) * (plfDesire[j] - _plfOutputOut[j]);
            for (int j = 0; j < _nHide; j++)
            for (int k = 0; k < _nOutput; k++)
                pplfDeltaWeight2[j][k] = _a * _plfHideOut[j] * plfChange2[k];
            for (int k = 0; k < _nOutput; k++)
                plfDeltaBias2[k] = _a*plfChange2[k];

            //计算输入层-隐含层权系数增量
            double *plfChange1 = new double[_nHide];
            memset(plfChange1, 0, sizeof(double)* _nHide);
            for (int j = 0; j < _nHide; j++)
            {
                for (int k = 0; k < _nOutput; k++)
                    plfChange1[j] += _pplfWeight2[j][k] * plfChange2[k];
                plfChange1[j] *= _plfHideOut[j] * (1 - _plfHideOut[j]);
            }
            for (int j = 0; j < _nInput; j++)
            for (int k = 0; k < _nHide; k++)
                pplfDeltaWeight1[j][k] = _a * plfInput[j] * plfChange1[k];
            for (int k = 0; k < _nHide; k++)
                plfDeltaBias1[k] = _a*plfChange1[k];

            delete[]plfChange1;
            delete[]plfChange2;

            //更新Bp网络权值
            for (int i = 0; i < _nInput; i++)
            for (int j = 0; j < _nHide; j++)
                _pplfWeight1[i][j] += pplfDeltaWeight1[i][j];

            for (int i = 0; i < _nHide; i++)
            for (int j = 0; j < _nOutput; j++)
                _pplfWeight2[i][j] += pplfDeltaWeight2[i][j];

            //更新BP网络的阈值
            for (int i = 0; i < _nOutput; i++)
                _plfb2[i] += plfDeltaBias2[i];

            for (int i = 0; i < _nHide; i++)
                _plfb1[i] += plfDeltaBias1[i];
        }
        nCount++;
        if (nCount % 1000 == 0) printf("%lf\n", lfE);
        if (nCount >= 1000000) break;
    }

    for (int i = 0; i < _nInput; i++) delete[]pplfDeltaWeight1[i];
    for (int i = 0; i < _nHide; i++) delete[]pplfDeltaWeight2[i];
    delete[] pplfDeltaWeight1;
    delete[] pplfDeltaWeight2;

    delete[] plfDeltaBias1;
    delete[] plfDeltaBias2;

    printf("迭代在 %ld 步后收敛\n", nCount);

    return true;
}


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值