cpp-BP与特征脸实现人脸表情识别(一):BP神经网络

这是一份充满野指针的只是用来理解的玩具代码


最近用耶鲁人脸库简单实现了人脸表情识别,写几篇博客留个纪念。

分为 BP神经网络 与 实现人脸识别 两篇博客,对于遇到的问题与代码进行总结分析。

第一篇需要理解 BP神经网络的原理,先给出代码再总结。

直接复制粘贴 代码很丑。。。

//DateSet.h
//created by WK

#ifndef DATASET_H
#define DATASET_H

#include <vector>
#include <cfloat>

using namespace std;

//数据集
class DataSet
{
private:
	int					InputNum;
	int					OutputNum;
	vector<double*>		DataMap;
	double				*Max;
	double				*Mini;

public:
	DataSet(int inputnum, int outputnum)
	{
		this->InputNum	=	inputnum;
		this->OutputNum =	outputnum;
		this->Max		=	NULL;
		this->Mini		=	NULL;
	}
	void AddRow(double *inputArray, double *outputArray)
	{
		double *data = new double[this->InputNum + this->OutputNum];
		for (int i = 0; i<this->InputNum; ++i)
		{
			data[i] = inputArray[i];
		}
		for (int i = 0; i<this->OutputNum; ++i)
		{
			data[InputNum + i] = outputArray[i];
		}
		this->DataMap.push_back(data);
	}
	void Normaliz()
	{
		if (this->Max)
		{
			delete this->Max;
			this->Max = NULL;
		}
		if (this->Mini)
		{
			delete this->Mini;
			this->Mini = NULL;
		}
			 
		this->Max = new double[this->InputNum + this->OutputNum];
		this->Mini = new double[this->InputNum + this->OutputNum];
		for (int i = 0; i < this->InputNum + this->OutputNum; ++i)
		{
			double max = INT_MIN;
			double mini = DBL_MAX;
			for (int j = 0; j < DataMap.size(); ++j)
			{
				max = (max > DataMap[j][i] ? max : DataMap[j][i]);
				mini = (mini < DataMap[j][i] ? mini : DataMap[j][i]);
			}
			
			if (max == mini)
				continue;

			for (int j = 0; j < DataMap.size(); ++j)
			{
				DataMap[j][i] = (DataMap[j][i] - mini) / (max - mini);
			}
			Max[i] = max;
			Mini[i] = mini;
		}
	}
	double *GetMax()
	{
		return this->Max;
	}
	double *GetMini()
	{
		return this->Mini;
	}
	int GetInputNum()
	{
		return this->InputNum;
	}
	int GetOutputNum()
	{
		return this->OutputNum;
	}
	int GetRows()
	{
		return DataMap.size();
	}
	vector<double*> GetDataMap()
	{
		return DataMap;
	}
 
};


#endif // !DATASET_H



这是对数据集的简单封装

private:
	int					InputNum;
	int					OutputNum;
	vector<double*>		DataMap;
	double				*Max;
	double				*Mini;


1.保留输入输出的变量的个数,每一行数据,前InputNum 个为输入 后OutputNum 个为输出

2.采用double* 的vector 便于动态添加数据,使用AddRow添加一行数据

3.Max Mini 分别保存了每一列数据的最大最小值,注意是每一列即不同组别的同类数据,用于归一化

4.Normaliz 采用常见的归一化,分别将每一列进行归一化,即不同组别的同类数据进行归一化

/********************************************************************************************************************************************************************/

//TransferFunc.h
//created by WK

#ifndef TRANSFERFUNC_H
#define TRANSFERFUNC_H

#include <cmath>

enum Functypes
{
	FUNCTYPE_TANH,
	FUNCTYPE_STEP,
	FUNCTYPE_LINEAR,
	FUNCTYPE_SIGMOID,
	FUNCTYPE_SGN,
	FUNVTYPE_RAMP
};

class Function
{
private:
	double Step(double input)
	{
		if (input <= 0)
			return 0;
		else
			return 1;
	}
	double Linear(double input)
	{
		return input;
	}
	double Sigmoid(double input)
	{
		return 1.0 / (1.0 + exp(-1.0*input));
	}
	double Sgn(double input)
	{
		if (input < 0)
			return -1;
		else
			return 1;
	}
	double Ramp(double input)
	{
		if (input < 0)
			return 0;
		else if (input >= 0 && input <= 1)
			return input;
		else
			return 1;
	}
public:

	double GetResult(int funcType, double input)
	{
		switch (funcType)
		{
		case FUNCTYPE_TANH:
			return tanh(input);
		case FUNCTYPE_STEP:
			return Step(input);
		case FUNCTYPE_LINEAR:
			return Linear(input);
		case FUNCTYPE_SIGMOID:
			return Sigmoid(input);
		case FUNCTYPE_SGN:
			return Sgn(input);
		case FUNVTYPE_RAMP:
			return Ramp(input);
		default:
			return input;
		}
	}
};

#endif // !TRANSFERFUNC_H




这是对传递函数的简单封装

1.传入函数类型以及输入数据,即可获取相应函数计算所得结果

/****************************************************************************************************************************************************************/

//net.h
//created by WK

#ifndef NET_H
#define NET_H


#include <vector>
#include <iostream>
#include "TransferFunc.h"
#include "DataSet.h"
#include <cstdlib>

using namespace std;

#define WINITVALUE 0.001
#define TINITVALUE 0

//神经元
class Neuron
{
private:
	double				Input;
	double				Output;
	double				Threshold;
	double				*Last_weight;			//神经元维护后向的权重
	int					LastLayerNeuNum;
	int					TransferFunctionType;
	Function			Transferfunction;

public:
	Neuron(double threshold, int lastlayerneunum, int funcType)
	{
		this->Input = 0;
		this->Output = 0;
		this->Threshold = threshold;
		this->LastLayerNeuNum = lastlayerneunum;
		this->TransferFunctionType = funcType;
		this->Last_weight = new double[lastlayerneunum];
		//关键的初始化权值
		for (int i = 0; i < lastlayerneunum; ++i)
			this->Last_weight[i] = (2.0*(double)rand() / RAND_MAX) - 1;

	}
	void SetInput(double input)
	{
		this->Input = input;
	}
	double GetOutput()
	{
		this->Output = Transferfunction.GetResult(this->TransferFunctionType, this->Input - this->Threshold);

		return this->Output;
	}
	double* GetThreshold()
	{
		return &this->Threshold;
	}
	double *GetWeight()
	{
		return this->Last_weight;
	}
	void SetFuncType(int functype)
	{
		this->TransferFunctionType = functype;
	}
};

//多层感知机
class MultiLayerPerceptron
{
private:
	int			OutTransfetFunctionType;
	int			HideTransfetFunctionType;
	int			InTransfetFunctionType;
	int			InLayerNeuNum;
	int			HideLayerNeuNum;
	int			OutLayerNeuNum;
	double		Speed;
	Neuron		**InputNeurons;
	Neuron		**OutputNeurons;
	Neuron		**HidenNeurons;
public:

	MultiLayerPerceptron(int intransferfunctiontype, int inLayerNeuNum, int hidetransferfunctiontype, int hideLayerNeuNum, int outtransferfunctiontype, int outLayerNeuNum, double speed)
	{
		this->InTransfetFunctionType = intransferfunctiontype;
		this->HideTransfetFunctionType = hidetransferfunctiontype;
		this->OutTransfetFunctionType = outtransferfunctiontype;
		this->InLayerNeuNum = inLayerNeuNum;
		this->HideLayerNeuNum = hideLayerNeuNum;
		this->OutLayerNeuNum = outLayerNeuNum;
		this->Speed = speed;

		this->InputNeurons = (Neuron**)new void*[inLayerNeuNum];
		for (int i = 0; i < inLayerNeuNum; ++i)
			this->InputNeurons[i] = new Neuron(TINITVALUE, 0, intransferfunctiontype);
		this->HidenNeurons = (Neuron**)new void*[hideLayerNeuNum];
		for (int i = 0; i < hideLayerNeuNum; ++i)
			this->HidenNeurons[i] = new Neuron(TINITVALUE, inLayerNeuNum, hidetransferfunctiontype);
		this->OutputNeurons = (Neuron**)new void*[outLayerNeuNum];
		for (int i = 0; i < outLayerNeuNum; ++i)
			this->OutputNeurons[i] = new Neuron(TINITVALUE, hideLayerNeuNum, outtransferfunctiontype);
	}
	//获取正向的输出
	void GetOutput(double *output)
	{
		double sum;

		for (int i = 0; i < this->HideLayerNeuNum; ++i)
		{
			sum = 0;
			for (int j = 0; j < this->InLayerNeuNum; ++j)
				sum += this->HidenNeurons[i]->GetWeight()[j] * this->InputNeurons[j]->GetOutput();
			this->HidenNeurons[i]->SetInput(sum);
		}
		for (int i = 0; i < this->OutLayerNeuNum; ++i)
		{
			sum = 0;
			for (int j = 0; j < this->HideLayerNeuNum; ++j)
				sum += this->OutputNeurons[i]->GetWeight()[j] * this->HidenNeurons[j]->GetOutput();
			this->OutputNeurons[i]->SetInput(sum);
			output[i] = this->OutputNeurons[i]->GetOutput();
		}
	}

	//学习所有数据一次
	void Learn(DataSet *trainingSet)
	{
		double *expect;
		double *data;
		double *output = new double[this->OutLayerNeuNum];
		for (int i = 0; i < trainingSet->GetRows(); ++i)
		{
			data = trainingSet->GetDataMap()[i];
			expect = data + trainingSet->GetInputNum();
			for (int j = 0; j < trainingSet->GetInputNum(); ++j)
				this->InputNeurons[j]->SetInput(data[j]);
			this->GetOutput(output);
			//更改隐藏层到输出层权重以及阈值
			//更新公式详见机器学习
			for (int j = 0; j < this->OutLayerNeuNum; ++j)
			{
				double delta = this->Speed * output[j] * (1 - output[j]) * (expect[j] - output[j]);

				for (int k = 0; k < this->HideLayerNeuNum; ++k)
					this->OutputNeurons[j]->GetWeight()[k] += (delta * this->HidenNeurons[k]->GetOutput());
				*this->OutputNeurons[j]->GetThreshold() -= delta;
			}
			//更改输入层到隐藏层的权重以及阈值
			//更新公式详见机器学习
			for (int j = 0; j < this->HideLayerNeuNum; ++j)
			{
				double t = 0;
				for (int k = 0; k < this->OutLayerNeuNum; ++k)
					t += (this->OutputNeurons[k]->GetWeight()[j] * output[k] * (1 - output[k])*(expect[k] - output[k]));
				double delta = this->HidenNeurons[j]->GetOutput() * (1 - this->HidenNeurons[j]->GetOutput()) * t;
				for (int k = 0; k < this->InLayerNeuNum; ++k)
					this->HidenNeurons[j]->GetWeight()[k] += (this->Speed * this->InputNeurons[k]->GetOutput() * delta);
				*this->HidenNeurons[j]->GetThreshold() -= (this->Speed * delta);
			}
		}
	}

	void Test(DataSet *Set)
	{
		double *output = new double[this->OutLayerNeuNum];
		double *expect = new double[this->OutLayerNeuNum];

		for (int i = 0; i < Set->GetRows(); ++i)
		{
			for (int j = 0; j < Set->GetInputNum(); ++j)
				this->InputNeurons[j]->SetInput(Set->GetDataMap()[i][j]);
			this->GetOutput(output);
			for (int j = 0; j < Set->GetOutputNum(); ++j)
			{
				cout << "output: ";
				cout << output[j] << "  ";
				cout << "expect: ";
				cout << Set->GetDataMap()[i][Set->GetInputNum() + j] << "  ";
			}
			cout << endl;
		}
		cout << endl;
		cout << "in to hide Weight:" << endl;
		for (int i = 0; i < this->HideLayerNeuNum; ++i)
		{
			for (int j = 0; j < this->InLayerNeuNum; ++j)
			{
				cout << this->HidenNeurons[i]->GetWeight()[j] << "  ";
			}
			cout << endl;
		}
		cout << endl;
		cout << "hide to out Weight:" << endl;
		for (int i = 0; i < this->OutLayerNeuNum; ++i)
		{
			for (int j = 0; j < this->HideLayerNeuNum; ++j)
			{
				cout << this->OutputNeurons[i]->GetWeight()[j] << "  ";
			}
			cout << endl;
		}
	}

};

#endif // !NET_H


以上为 神经元 以及 BP神经网络的代码 以下分开说

/****************************************************************************************************************************************/

//神经元
class Neuron
{
private:
	double				Input;
	double				Output;
	double				Threshold;
	double				*Last_weight;			//神经元维护后向的权重
	int					LastLayerNeuNum;
	int					TransferFunctionType;
	Function			Transferfunction;

public:
	Neuron(double threshold, int lastlayerneunum, int funcType)
	{
		this->Input = 0;
		this->Output = 0;
		this->Threshold = threshold;
		this->LastLayerNeuNum = lastlayerneunum;
		this->TransferFunctionType = funcType;
		this->Last_weight = new double[lastlayerneunum];
		//关键的初始化权值
		for (int i = 0; i < lastlayerneunum; ++i)
			this->Last_weight[i] = (2.0*(double)rand() / RAND_MAX) - 1;

	}
	void SetInput(double input)
	{
		this->Input = input;
	}
	double GetOutput()
	{
		this->Output = Transferfunction.GetResult(this->TransferFunctionType, this->Input - this->Threshold);

		return this->Output;
	}
	double* GetThreshold()
	{
		return &this->Threshold;
	}
	double *GetWeight()
	{
		return this->Last_weight;
	}
	void SetFuncType(int functype)
	{
		this->TransferFunctionType = functype;
	}
};



以上为神经元

1.输入值,输出值,阈值,以及传输函数类型无需多言,构造必须指定 阈值,上一层神经元个数用于分配内存给权重数组,以及传输函数类型

2.每一个神经元维护后向的权值Last_weight   举例

    s前1 代表 前一层的1号神经元输出                    s后1 代表后一层1号神经元的输入  w表示权重  有:

    s后1 = w1 * s前1 + w2 * s前2.......                  暂时忽略阈值与传输函数

    即由 s后1 这个神经元维护一个数组,这个数组的元素是 w1 w2 w3
    (权重的维护有多种实现方式 随意)

/***********************************************************************************************************************************************************************************/

//多层感知机
class MultiLayerPerceptron
{
private:
	int			OutTransfetFunctionType;
	int			HideTransfetFunctionType;
	int			InTransfetFunctionType;
	int			InLayerNeuNum;
	int			HideLayerNeuNum;
	int			OutLayerNeuNum;
	double		Speed;
	Neuron		**InputNeurons;
	Neuron		**OutputNeurons;
	Neuron		**HidenNeurons;
public:

	MultiLayerPerceptron(int intransferfunctiontype, int inLayerNeuNum, int hidetransferfunctiontype, int hideLayerNeuNum, int outtransferfunctiontype, int outLayerNeuNum, double speed)
	{
		this->InTransfetFunctionType = intransferfunctiontype;
		this->HideTransfetFunctionType = hidetransferfunctiontype;
		this->OutTransfetFunctionType = outtransferfunctiontype;
		this->InLayerNeuNum = inLayerNeuNum;
		this->HideLayerNeuNum = hideLayerNeuNum;
		this->OutLayerNeuNum = outLayerNeuNum;
		this->Speed = speed;

		this->InputNeurons = (Neuron**)new void*[inLayerNeuNum];
		for (int i = 0; i < inLayerNeuNum; ++i)
			this->InputNeurons[i] = new Neuron(TINITVALUE, 0, intransferfunctiontype);
		this->HidenNeurons = (Neuron**)new void*[hideLayerNeuNum];
		for (int i = 0; i < hideLayerNeuNum; ++i)
			this->HidenNeurons[i] = new Neuron(TINITVALUE, inLayerNeuNum, hidetransferfunctiontype);
		this->OutputNeurons = (Neuron**)new void*[outLayerNeuNum];
		for (int i = 0; i < outLayerNeuNum; ++i)
			this->OutputNeurons[i] = new Neuron(TINITVALUE, hideLayerNeuNum, outtransferfunctiontype);
	}
	//获取正向的输出
	void GetOutput(double *output)
	{
		double sum;

		for (int i = 0; i < this->HideLayerNeuNum; ++i)
		{
			sum = 0;
			for (int j = 0; j < this->InLayerNeuNum; ++j)
				sum += this->HidenNeurons[i]->GetWeight()[j] * this->InputNeurons[j]->GetOutput();
			this->HidenNeurons[i]->SetInput(sum);
		}
		for (int i = 0; i < this->OutLayerNeuNum; ++i)
		{
			sum = 0;
			for (int j = 0; j < this->HideLayerNeuNum; ++j)
				sum += this->OutputNeurons[i]->GetWeight()[j] * this->HidenNeurons[j]->GetOutput();
			this->OutputNeurons[i]->SetInput(sum);
			output[i] = this->OutputNeurons[i]->GetOutput();
		}
	}

	//学习所有数据一次
	void Learn(DataSet *trainingSet)
	{
		double *expect;
		double *data;
		double *output = new double[this->OutLayerNeuNum];
		for (int i = 0; i < trainingSet->GetRows(); ++i)
		{
			data = trainingSet->GetDataMap()[i];
			expect = data + trainingSet->GetInputNum();
			for (int j = 0; j < trainingSet->GetInputNum(); ++j)
				this->InputNeurons[j]->SetInput(data[j]);
			this->GetOutput(output);
			//更改隐藏层到输出层权重以及阈值
			//更新公式详见机器学习
			for (int j = 0; j < this->OutLayerNeuNum; ++j)
			{
				double delta = this->Speed * output[j] * (1 - output[j]) * (expect[j] - output[j]);

				for (int k = 0; k < this->HideLayerNeuNum; ++k)
					this->OutputNeurons[j]->GetWeight()[k] += (delta * this->HidenNeurons[k]->GetOutput());
				*this->OutputNeurons[j]->GetThreshold() -= delta;
			}
			//更改输入层到隐藏层的权重以及阈值
			//更新公式详见机器学习
			for (int j = 0; j < this->HideLayerNeuNum; ++j)
			{
				double t = 0;
				for (int k = 0; k < this->OutLayerNeuNum; ++k)
					t += (this->OutputNeurons[k]->GetWeight()[j] * output[k] * (1 - output[k])*(expect[k] - output[k]));
				double delta = this->HidenNeurons[j]->GetOutput() * (1 - this->HidenNeurons[j]->GetOutput()) * t;
				for (int k = 0; k < this->InLayerNeuNum; ++k)
					this->HidenNeurons[j]->GetWeight()[k] += (this->Speed * this->InputNeurons[k]->GetOutput() * delta);
				*this->HidenNeurons[j]->GetThreshold() -= (this->Speed * delta);
			}
		}
	}

	void Test(DataSet *Set)
	{
		double *output = new double[this->OutLayerNeuNum];
		double *expect = new double[this->OutLayerNeuNum];

		for (int i = 0; i < Set->GetRows(); ++i)
		{
			for (int j = 0; j < Set->GetInputNum(); ++j)
				this->InputNeurons[j]->SetInput(Set->GetDataMap()[i][j]);
			this->GetOutput(output);
			for (int j = 0; j < Set->GetOutputNum(); ++j)
			{
				cout << "output: ";
				cout << output[j] << "  ";
				cout << "expect: ";
				cout << Set->GetDataMap()[i][Set->GetInputNum() + j] << "  ";
			}
			cout << endl;
		}
		cout << endl;
		cout << "in to hide Weight:" << endl;
		for (int i = 0; i < this->HideLayerNeuNum; ++i)
		{
			for (int j = 0; j < this->InLayerNeuNum; ++j)
			{
				cout << this->HidenNeurons[i]->GetWeight()[j] << "  ";
			}
			cout << endl;
		}
		cout << endl;
		cout << "hide to out Weight:" << endl;
		for (int i = 0; i < this->OutLayerNeuNum; ++i)
		{
			for (int j = 0; j < this->HideLayerNeuNum; ++j)
			{
				cout << this->OutputNeurons[i]->GetWeight()[j] << "  ";
			}
			cout << endl;
		}
	}

};


以上为bp神经网络

1.构造需指定 输入层神经元个数,输入层神经元传输函数类型,隐藏层(单隐)神经元个数,隐藏层神经元传输函数类型,输出层神经元个数,输出层神经元传输函数类型,学习速率

2.Neuron** 的成员维护一层的神经元,保存指针数组

3.Getoutput 设置输入层各神经元的input 调用Getoutput  获取输出神经元的输出值

4.Learn 根据 周志华所著的机器学习中的权重更新公式,更新权重,这里的代码略为臃肿

5.Test 用于测试

/********************************************************************************************************************************************************************************************/


以下为神经网络测试 解决异或问题

#include "net.h"

int main()
{
	DataSet *trainingSet = new DataSet(2, 1);
	trainingSet->AddRow(new double[2]{ 1,1 }, new double[1]{ 0 });
	trainingSet->AddRow(new double[2]{ 1,0 }, new double[1]{ 1 });
	trainingSet->AddRow(new double[2]{ 0,1 }, new double[1]{ 1 });
	trainingSet->AddRow(new double[2]{ 0,0 }, new double[1]{ 0 });

	//层激励函数类型 神经元个数... 学习速率  
	MultiLayerPerceptron *m = new MultiLayerPerceptron(FUNCTYPE_LINEAR, 2, FUNCTYPE_SIGMOID, 5, FUNCTYPE_SIGMOID, 1, 0.9);

	//学习1000次  
	for (int i = 0; i < 1000; ++i)
		m->Learn(trainingSet);

	m->Test(trainingSet);



	system("pause");

}




  • 4
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值