深度学习之卷积神经网络编程实现(二)

       目前网上深度学习有关资料巨多,但多数都是转载,就算原创,但很多也是光说不练,理论公式一堆,但是靠谱代码没几行,能用的更是少之又少。paper看的溜、code写的溜的人还是比较少的,本人应该属于paper看的磕磕绊绊,code写的凑合那种。
       前文主要是一些卷积神经网络的原理及公式推导,本文将使用c语言实现经典LeNet-5(http://yann.lecun.com/exdb/lenet/ )网络。代码主要参考了tiny-cnn(https://github.com/nyanp/tiny-cnn ),tiny-cnn这份开源代码质量还是挺高的,但是由于整个项目采用面向对象程序设计思路,外加c++泛型技术,如果想吃透整个项目,除了需要花些时间外,还需要技术功底的。本人在编写LeNet-5网络过程中,有些代码摘录自该项目,相比较而言,本工程更小巧,代码非常容易懂,也非常容易移植,完全不依赖第三方库。
       整个LeNet-5网络如下图所示,但注意,实际在开发过程中没有用到F6层。主要包括2个卷积层、2个池化层、1个全连接层,外加输入及输出,共7层网络。实际训练时采用最大值池化、双曲正切激活函数,经过8轮迭代训练,手写数字识别准确率即达到99%。
       
       因为核心的正向传播、反向传播计算在各个层间展开,涉及的数据比较多,所以首要就是设计数据结构,合理的数据结构将非常有利于编写代码,也便于阅读。这里主要定义了四种数据结构,分别用于保存训练样本、卷积核、特征map以及网络层。这里就不再讲算法原理了,原理部分可参考前文,代码中注释以及变量、方法命名比较详细,还是比较容易看懂的。当然,前提是一定要掌握卷积神经网络的算法原理。

       数据结构:
typedef struct _Sample
{
	double *data;
	double *label;

	int sample_w;
	int sample_h;
	int sample_count;
} Sample;

typedef struct _Kernel
{
	double *W;
	double *dW;
} Kernel;

typedef struct _Map
{
	double *data;
	double *error;
	double  b;
	double  db;
} Map;

typedef struct _Layer
{
	int map_w;
	int map_h;
	int map_count;
	Map *map;

	int kernel_w;
	int kernel_h;
	int kernel_count;
	Kernel *kernel;

	double *map_common;
} Layer;

       初始化过程:
void init_kernel(double *kernel, int size, double weight_base)
{
	for (int i = 0; i < size; i++)
	{
		kernel[i] = (genrand_real1() - 0.5) * 2 * weight_base;
	}
}

void init_layer(Layer *layer, int prevlayer_map_count, int map_count, int kernel_w, int kernel_h, int map_w, int map_h, bool is_pooling)
{
	int mem_size = 0;

	const double scale = 6.0;
	int fan_in = 0;
	int fan_out = 0;
	if (is_pooling)
	{
		fan_in  = 4;
		fan_out = 1;
	}
	else
	{
		fan_in = prevlayer_map_count * kernel_w * kernel_h;
		fan_out = map_count * kernel_w * kernel_h;
	}
	int denominator = fan_in + fan_out;
	double weight_base = (denominator != 0) ? sqrt(scale / (double)denominator) : 0.5;

	layer->kernel_count = prevlayer_map_count * map_count;
	layer->kernel_w = kernel_w;
	layer->kernel_h = kernel_h;
	layer->kernel = (Kernel *)malloc(layer->kernel_count * sizeof(Kernel));
	mem_size = layer->kernel_w * layer->kernel_h * sizeof(double);
	for (int i = 0; i < prevlayer_map_count; i++)
	{
		for (int j = 0; j < map_count; j++)
		{
			layer->kernel[i*map_count + j].W = (double *)malloc(mem_size);
			init_kernel(layer->kernel[i*map_count + j].W, layer->kernel_w*layer->kernel_h, weight_base);
			layer->kernel[i*map_count + j].dW = (double *)malloc(mem_size);
			memset(layer->kernel[i*map_count + j].dW, 0, mem_size);
		}
	}

	layer->map_count = map_count;
	layer->map_w = map_w;
	layer->map_h = map_h;
	layer->map = (Map *)malloc(layer->map_count * sizeof(Map));
	mem_size = layer->map_w * layer->map_h * sizeof(double);
	for (int i = 0; i < layer->map_count; i++)
	{
		layer->map[i].b = 0.0;
		layer->map[i].db = 0.0;
		layer->map[i].data = (double *)malloc(mem_size);
		layer->map[i].error = (double *)malloc(mem_size);
		memset(layer->map[i].data, 0, mem_size);
		memset(layer->map[i].error, 0, mem_size);
	}
	layer->map_common = (double *)malloc(mem_size);
	memset(layer->map_common, 0, mem_size);	
}

       正向传播过程:
#define O true
#define X false
bool connection_table[6*16] = 
{
	O, X, X, X, O, O, O, X, X, O, O, O, O, X, O, O,
	O, O, X, X, X, O, O, O, X, X, O, O, O, O, X, O,
	O, O, O, X, X, X, O, O, O, X, X, O, X, O, O, O,
	X, O, O, O, X, X, O, O, O, O, X, X, O, X, O, O,
	X, X, O, O, O, X, X, O, O, O, O, X, O, O, X, O,
	X, X, X, O, O, O, X, X, O, O, O, O, X, O, O, O
};
#undef O
#undef X

void conv_fprop(Layer *prev_layer, Layer *layer, bool *pconnection)
{
	int index = 0;
	int size = layer->map_w * layer->map_h;
	for (int i = 0; i < layer->map_count; i++)
	{
		memset(layer->map_common, 0, size*sizeof(double));
		for (int j = 0; j < prev_layer->map_count; j++)
		{
			index = j*layer->map_count + i;
			if (pconnection != NULL && !pconnection[index])
			{
				continue;
			}
		
			convn_valid(
				prev_layer->map[j].data, prev_layer->map_w, prev_layer->map_h, 
				layer->kernel[index].W, layer->kernel_w, layer->kernel_h, 
				layer->map_common, layer->map_w, layer->map_h);
		}

		for (int k = 0; k < size; k++)
		{
			layer->map[i].data[k] = activation_func::tan_h(layer->map_common[k] + layer->map[i].b);
		}
	}
}

void avg_pooling_fprop(Layer *prev_layer, Layer *layer)
{
	int map_w = layer->map_w;
	int map_h = layer->map_h;
	int upmap_w = prev_layer->map_w;
	const double scale_factor = 0.25;

	for (int k = 0; k < layer->map_count; k++)
	{
		for (int i = 0; i < map_h; i++)
		{
			for (int j = 0; j < map_w; j++)
			{
				double sum = 0.0;
				for (int n = 2*i; n < 2*(i + 1); n++)
				{
					for (int m = 2*j; m < 2*(j + 1); m++)
					{
						sum += prev_layer->map[k].data[n*upmap_w + m] * layer->kernel[k].W[0];
					}
				}

				sum *= scale_factor;
				sum += layer->map[k].b;
				layer->map[k].data[i*map_w + j] = activation_func::tan_h(sum);
			}
		}
	}
}

void max_pooling_fprop(Layer *prev_layer, Layer *layer)
{
	int map_w = layer->map_w;
	int map_h = layer->map_h;
	int upmap_w = prev_layer->map_w;

	for (int k = 0; k < layer->map_count; k++)
	{
		for (int i = 0; i < map_h; i++)
		{
			for (int j = 0; j < map_w; j++)
			{
				double max_value = prev_layer->map[k].data[2*i*upmap_w + 2*j];
				for (int n = 2*i; n < 2*(i + 1); n++)
				{
					for (int m = 2*j; m < 2*(j + 1); m++)
					{
						max_value = MAX(max_value, prev_layer->map[k].data[n*upmap_w + m]);
					}
				}

				layer->map[k].data[i*map_w + j] = activation_func::tan_h(max_value);
			}
		}
	}
}

void fully_connected_fprop(Layer *prev_layer, Layer *layer)
{
	for (int i = 0; i < layer->map_count; i++) 
	{
		double sum = 0.0;
		for (int j = 0; j < prev_layer->map_count; j++)
		{
			sum += prev_layer->map[j].data[0] * layer->kernel[j*layer->map_count + i].W[0];
		}

		sum += layer->map[i].b;
		layer->map[i].data[0] = activation_func::tan_h(sum);
	}
}

void forward_propagation()
{
	// In-->C1
	conv_fprop(&input_layer, &c1_conv_layer, NULL);

	// C1-->S2
	max_pooling_fprop(&c1_conv_layer, &s2_pooling_layer);/*avg_pooling_fprop*/

	// S2-->C3
	conv_fprop(&s2_pooling_layer, &c3_conv_layer, connection_table);

	// C3-->S4
	max_pooling_fprop(&c3_conv_layer, &s4_pooling_layer);/*avg_pooling_fprop*/

	// S4-->C5
	conv_fprop(&s4_pooling_layer, &c5_conv_layer, NULL);

	// C5-->Out
	fully_connected_fprop(&c5_conv_layer, &output_layer);
}

       反向传播过程:
void conv_bprop(Layer *layer, Layer *prev_layer, bool *pconnection)
{
	int index = 0;
	int size = prev_layer->map_w * prev_layer->map_h;

	// delta
	for (int i = 0; i < prev_layer->map_count; i++)
	{
		memset(prev_layer->map_common, 0, size*sizeof(double));
		for (int j = 0; j < layer->map_count; j++)
		{
			index = i*layer->map_count + j;
			if (pconnection != NULL && !pconnection[index])
			{
				continue;
			}

			for (int n = 0; n < layer->map_h; n++)
			{
				for (int m = 0; m < layer->map_w; m++)
				{
					double error = layer->map[j].error[n*layer->map_w + m];
					for (int ky = 0; ky < layer->kernel_h; ky++)
					{
						for (int kx = 0; kx < layer->kernel_w; kx++)
						{
							prev_layer->map_common[(n + ky)*prev_layer->map_w + m + kx] += error * layer->kernel[index].W[ky*layer->kernel_w + kx];
						}
					}
				}
			}
		}

		for (int k = 0; k < size; k++)
		{
			prev_layer->map[i].error[k] = prev_layer->map_common[k] * activation_func::dtan_h(prev_layer->map[i].data[k]);
		}
	}

	// dW
	for (int i = 0; i < prev_layer->map_count; i++)
	{
		for (int j = 0; j < layer->map_count; j++)
		{
			index = i*layer->map_count + j;
			if (pconnection != NULL && !pconnection[index])
			{
				continue;
			}

			convn_valid(
				prev_layer->map[i].data, prev_layer->map_w, prev_layer->map_h,
				layer->map[j].error, layer->map_w, layer->map_h,
				layer->kernel[index].dW, layer->kernel_w, layer->kernel_h);
		}
	}

	// db
	size = layer->map_w * layer->map_h;
	for (int i = 0; i < layer->map_count; i++)
	{
		double sum = 0.0;
		for (int k = 0; k < size; k++)
		{
			sum += layer->map[i].error[k];
		}
		layer->map[i].db += sum;
	}
}

void avg_pooling_bprop(Layer *layer, Layer *prev_layer)
{
	const double scale_factor = 0.25;
	int size = prev_layer->map_w * prev_layer->map_h;

	for (int i = 0; i < layer->map_count; i++)
	{
		kronecker(layer->map[i].error, layer->map_w, layer->map_h, prev_layer->map_common, prev_layer->map_w);

		// delta
		for (int k = 0; k < size; k++)
		{
			double delta = layer->kernel[i].W[0] * prev_layer->map_common[k];
			prev_layer->map[i].error[k] = delta * scale_factor * activation_func::dtan_h(prev_layer->map[i].data[k]);
		}

		// dW
		double sum = 0.0;
		for (int k = 0; k < size; k++)
		{
			sum += prev_layer->map[i].data[k] * prev_layer->map_common[k];
		}
		layer->kernel[i].dW[0] += sum * scale_factor;

		// db
		sum = 0.0;
		for (int k = 0; k < layer->map_w * layer->map_h; k++)
		{
			sum += layer->map[i].error[k];
		}
		layer->map[i].db += sum;
	}
}

void max_pooling_bprop(Layer *layer, Layer *prev_layer)
{
	int map_w = layer->map_w;
	int map_h = layer->map_h;
	int upmap_w = prev_layer->map_w;

	for (int k = 0; k < layer->map_count; k++)
	{
		// delta
		for (int i = 0; i < map_h; i++)
		{
			for (int j = 0; j < map_w; j++)
			{
				int row = 2*i, col = 2*j;
				double max_value = prev_layer->map[k].data[row*upmap_w + col];
				for (int n = 2*i; n < 2*(i + 1); n++)
				{
					for (int m = 2*j; m < 2*(j + 1); m++)
					{
						if (prev_layer->map[k].data[n*upmap_w + m] > max_value)
						{
							row = n;
							col = m;
							max_value = prev_layer->map[k].data[n*upmap_w + m];
						}
						else
						{
							prev_layer->map[k].error[n*upmap_w + m] = 0.0;
						}
					}
				}

				prev_layer->map[k].error[row*upmap_w + col] = layer->map[k].error[i*map_w + j] * activation_func::dtan_h(max_value);
			}
		}

		// dW
		// db
	}
}

void fully_connected_bprop(Layer *layer, Layer *prev_layer)
{
	// delta
	for (int i = 0; i < prev_layer->map_count; i++)
	{
		prev_layer->map[i].error[0] = 0.0;
		for (int j = 0; j < layer->map_count; j++)
		{
			prev_layer->map[i].error[0] += layer->map[j].error[0] * layer->kernel[i*layer->map_count + j].W[0];
		}
		prev_layer->map[i].error[0] *= activation_func::dtan_h(prev_layer->map[i].data[0]);
	}

	// dW
	for (int i = 0; i < prev_layer->map_count; i++)
	{
		for (int j = 0; j < layer->map_count; j++)
		{
			layer->kernel[i*layer->map_count + j].dW[0] += layer->map[j].error[0] * prev_layer->map[i].data[0];
		}
	}

	// db
	for (int i = 0; i < layer->map_count; i++)
	{
		layer->map[i].db += layer->map[i].error[0];
	}
}

void backward_propagation(double *label)
{
	for (int i = 0; i < output_layer.map_count; i++)
	{
		output_layer.map[i].error[0] = loss_func::dmse(output_layer.map[i].data[0], label[i]) * activation_func::dtan_h(output_layer.map[i].data[0]);
	}

	// Out-->C5
	fully_connected_bprop(&output_layer, &c5_conv_layer);

	// C5-->S4
	conv_bprop(&c5_conv_layer, &s4_pooling_layer, NULL);

	// S4-->C3
	max_pooling_bprop(&s4_pooling_layer, &c3_conv_layer);/*avg_pooling_bprop*/

	// C3-->S2
	conv_bprop(&c3_conv_layer, &s2_pooling_layer, connection_table);

	// S2-->C1
	max_pooling_bprop(&s2_pooling_layer, &c1_conv_layer);/*avg_pooling_bprop*/

	// C1-->In
	conv_bprop(&c1_conv_layer, &input_layer, NULL);
}

       主函数:
int main()
{
	int kernel_w = 0, kernel_h = 0;
	double learning_rate =  0.01 * sqrt((double)batch_size);

	// 训练数据
	Sample *train_sample = (Sample *)malloc(train_sample_count*sizeof(Sample));
	memset(train_sample, 0, train_sample_count*sizeof(Sample));
	train_sample->sample_w = width;
	train_sample->sample_h = height;
	train_sample->sample_count = train_sample_count;
	read_mnist_data(train_sample, "../TestLeNet/mnist/train-images.idx3-ubyte");
	read_mnist_label(train_sample, "../TestLeNet/mnist/train-labels.idx1-ubyte");

	// 测试数据
	Sample *test_sample = (Sample *)malloc(test_sample_count*sizeof(Sample));
	memset(test_sample, 0, test_sample_count*sizeof(Sample));
	test_sample->sample_w = width;
	test_sample->sample_h = height;
	test_sample->sample_count = test_sample_count;
	read_mnist_data(test_sample, "../TestLeNet/mnist/t10k-images.idx3-ubyte");
	read_mnist_label(test_sample, "../TestLeNet/mnist/t10k-labels.idx1-ubyte");

	// 随机数
	init_genrand((unsigned long)time(NULL));

	// 输入层In
	kernel_w = 0;
	kernel_h = 0;
	init_layer(&input_layer, 0, 1, kernel_w, kernel_h, width, height, false);

	// 卷积层C1
	kernel_w = 5;
	kernel_h = 5;
	init_layer(&c1_conv_layer, 1, 6, kernel_w, kernel_h, input_layer.map_w - kernel_w + 1, input_layer.map_h - kernel_h + 1, false);

	// 采样层S2
	kernel_w = 1;
	kernel_h = 1;
	init_layer(&s2_pooling_layer, 1, 6, kernel_w, kernel_h, c1_conv_layer.map_w / 2, c1_conv_layer.map_h / 2, true);

	// 卷积层C3
	kernel_w = 5;
	kernel_h = 5;
	init_layer(&c3_conv_layer, 6, 16, kernel_w, kernel_h, s2_pooling_layer.map_w - kernel_w + 1, s2_pooling_layer.map_h - kernel_h + 1, false);

	// 采样层S4
	kernel_w = 1;
	kernel_h = 1;
	init_layer(&s4_pooling_layer, 1, 16, kernel_w, kernel_h, c3_conv_layer.map_w / 2, c3_conv_layer.map_h / 2, true);

	// 卷积层C5
	kernel_w = 5;
	kernel_h = 5;
	init_layer(&c5_conv_layer, 16, 120, kernel_w, kernel_h, s4_pooling_layer.map_w - kernel_w + 1, s4_pooling_layer.map_h - kernel_h + 1, false);

	// 输出层Out
	kernel_w = 1;
	kernel_h = 1;
	init_layer(&output_layer, 120, 10, kernel_w, kernel_h, 1, 1, false);

	// 训练及测试
	clock_t start_time = 0;
	const int epoch = 50;
	for (int i = 0; i < epoch; i++)
	{
		printf("train epoch is %d ************************************************\n", i + 1);
		start_time = clock();
		train(train_sample, learning_rate);
		printf("train time is....%f s\n", (double)(clock() - start_time) / CLOCKS_PER_SEC);

		start_time = clock();
		predict(test_sample);
		printf("predict time is....%f s\n\n", (double)(clock() - start_time) / CLOCKS_PER_SEC);

		learning_rate *= 0.85;
	}

	// 释放资源
	for (int i = 0; i < train_sample_count; i++)
	{
		free(train_sample[i].data);
		free(train_sample[i].label);
		train_sample[i].data = NULL;
		train_sample[i].label = NULL;
	}
	free(train_sample);

	for (int i = 0; i < test_sample_count; i++)
	{
		free(test_sample[i].data);
		free(test_sample[i].label);
		test_sample[i].data = NULL;
		test_sample[i].label = NULL;
	}
	free(test_sample);

	release_layer(&input_layer);
	release_layer(&c1_conv_layer);
	release_layer(&c3_conv_layer);
	release_layer(&c5_conv_layer);
	release_layer(&s2_pooling_layer);
	release_layer(&s4_pooling_layer);
	release_layer(&output_layer);

	system("pause");
	return 0;
}

       训练过程如下图所示,经过两轮训练,准确度已经达到98.31%。
                                

       工程下载地址:

       参考代码:

       训练数据下载地址:




  • 7
    点赞
  • 21
    收藏
    觉得还不错? 一键收藏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值