mxnet mnist lenet

import logging
import numpy as np
import mxnet as mx
import gzip
import os
import urllib
import urllib.request
import struct
	

# https://blog.csdn.net/weixin_38358654/article/details/79842612

def download_data(url, force_download=False):
    fname = url.split("/")[-1]
    if force_download or not os.path.exists(fname):
        urllib.request.urlretrieve(url,fname) # python2 与python3的urllib不同在与python3要加上.request
    return fname

def read_data(label_url, image_url):
    with gzip.open(download_data(label_url))as flbl:
        magic, num = struct.unpack(">II", flbl.read(8))
        label = np.fromstring(flbl.read(), dtype=np.int8)
    with gzip.open(download_data(image_url),'rb')as fimg:
        magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
        image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
    return (label, image)

def getTrainTestData():
	train_data_path = 'MNIST_data/train-images-idx3-ubyte.gz'
	train_label_path = 'MNIST_data/train-labels-idx1-ubyte.gz'
	test_data_path = 'MNIST_data/t10k-images-idx3-ubyte.gz'
	test_label_path = 'MNIST_data/t10k-labels-idx1-ubyte.gz'

	train_label, train_data = read_data(image_url=train_data_path, label_url=train_label_path)
	test_label, test_data = read_data(image_url=test_data_path, label_url=test_label_path)
	
	print('shape of train_data:', train_data.shape, ", ", type(train_data))
	print('shape of train_label:', train_label.shape, ", ", type(train_label))
	print('shape of test_data:', test_data.shape, ", ", type(test_data))
	print('shape of test_label:', test_label.shape, ", ", type(test_label))
	
	return (train_data, train_label, test_data, test_label)

def getTrainTestDataFirstTime():
	# 如果大家是初次下载,运行
	mnist = mx.test_utils.get_mnist()  # 得到手写字体数据集
	
	train_image = mnist['train_data']
	train_image_label = mnist['train_label']
	test_image = mnist['test_data']
	test_image_label = mnist['test_label']

	return(train_image, train_image_label, test_image, test_image_label)
	
'''
下载了mnist数据集,并且得到其具体的数据,该如何把这些数据转换成我们训练阶段真正需要的格式?
从上面的print的信息中我们已经可以知道图片的大小已经是28×28、单通道灰度图。如果我们不对图片
进行缩放的话,网络的输入应该是(batch_size, channel, height, width),所以我们需要把60000
张训练集图片,10000张测试集图片转换成 (60000//batch_size)×(batch_size, channel, height, width)、
(10000//batch_size)×(batch_size, channel, height, width)的迭代的形式。

为什么mxnet里面的训练数据必须是以迭代器的形式传入的?1)简单,简单,简单!!! 2)mxnet框架中,用户
是不能像tensorflow框架那样写个for循环来显示的将数据送入到网络里面。那么如何正确的使用mxnet框架提供的
迭代器呢?有的时候mxnet提供的迭代器类并不能满足所有的需求,我们还需要重写这个类。

熟悉mxnet框架的小伙伴,应该知道,mxnet框架中网络的输入主要包含两种:1)img,2)ndarray
一般来说,对于前者我们可以很方便的使用mxnet提供的img2rec.py这个文件,将所有的图片转换成rec文件,然后
将这个rec文件作为网络的输入,其实也是一个迭代器对象。然而生成rec文件耗时,并且需要很大的额外空间,但
是有没有一种办法不生成rec文件呢?当然有,就是上文提到的,重写DataIter类,返回一个迭代器对象,每一次迭
代都是(batch_size, channel, height, width)的完整数据快,这样就可以将数据源源不断的送入到网络里面去。
完整代码如下:
--------------------- 
作者:还长不胖 
来源:CSDN 
原文:https://blog.csdn.net/weixin_38358654/article/details/79842612 
版权声明:本文为博主原创文章,转载请附上博文链接!
'''	
class Batch(object):
	def __init__(self, data, label):
		self.data = data
		self.label = label
	
class Inter(mx.io.DataIter):
	def __init__(self, batch_size, train_data, train_label):
		super(Inter, self).__init__()
		self.batch_size = batch_size
		self.begin = 0
		self.index = 0
		self.train_data = train_data
		self.train_label = train_label
		self.train_count = len(train_data)
		
		assert len(train_data) == len(train_label), 'Error'
		assert (self.train_count >= self.batch_size) and (self.batch_size > 0), 'Error'
		self.train_batches = self.train_count // self.batch_size
		
	#def bind(self, data_shapes=train_data.shape, label_shapes=train_label.shape):
	#	self.data_shapes = data_shapes
	#	self.label_shapes = label_shapes
	def bind(self):
		self.data_shapes = self.train_data.shape
		self.label_shapes = self.train_label.shape
		
	def __iter__(self):
		return self
	
	def reset(self):
		self.begin = 0
		self.index = 0
	
	def next(self):
		if self.iter_next():
			return self.getdata()
		else:
			raise StopIteration
	
	def __next__(self):
		return self.next()
	
	def iter_next(self):
		if self.begin < self.train_batches:
			return True
		else:
			return False
	
	def get_batch_images_labels(self):
		data = self.train_data[self.index:(self.index + self.batch_size), :, :, :]
		label = self.train_label[self.index:(self.index + self.batch_size)]
		return data, label
	
	def getdata(self):
		images, labels = self.get_batch_images_labels()  # 顺序的得到数据
		data_all = [mx.nd.array(images)]
		label_all = [mx.nd.array(labels)]
		self.index += self.batch_size
		self.begin += 1
		return Batch(data_all, label_all)
	
	def getlabel(self):
		pass
	
	def getindex(self):
		return None
	
	def getpad(self):
		pass

# 使用symbol构建Lenet-5网络结构:		
def get_net(class_num, bn_mom=0.99, filter_list=(6, 16)):
	data = mx.sym.Variable('data')
	# 批量标准化 
	imput = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=1e-5, momentum=bn_mom, name='bn_imput') 
	
	# layer_1 卷积
	layer_1 = mx.sym.Convolution(data=imput, num_filter=filter_list[0], kernel=(5, 5), stride=(2, 2), \
									pad=(2, 2), no_bias=False, name="conv_layer_1")
									
	bn_layer_1 = mx.sym.BatchNorm(data=layer_1, fix_gamma=False, eps=1e-5, momentum=bn_mom, name='bn_layer_1')						
	a_bn_layer_1 = mx.sym.Activation(data=bn_layer_1, act_type='relu', name='relu_a_bn_layer_1')
	
	# layer_2 卷积
	bn_layer_2 = mx.sym.BatchNorm(data=a_bn_layer_1, fix_gamma=True, eps=1e-5, momentum=bn_mom, name='bn_layer_2')
	conv_layer_2 = mx.sym.Convolution(data=bn_layer_2, num_filter=filter_list[1], kernel=(5, 5), \
										stride=(1, 1), pad=(0, 0), no_bias=False, name="conv_layer_2")
	bn_layer_2_1 = mx.sym.BatchNorm(data=conv_layer_2, fix_gamma=False, eps=1e-5, momentum=bn_mom, name='bn_layer_2_1')
	a_bn_layer_2 = mx.sym.Activation(data=bn_layer_2_1, act_type='relu', name='relu_a_a_bn_layer_2')
	
	# 下采样层
	pooling_layer_2 = mx.symbol.Pooling(data=a_bn_layer_2, kernel=(5, 5), stride=(2, 2), pad=(2, 2), pool_type='max',name='pooling_layer_2')
	
	# 全连接层
	fc = mx.symbol.FullyConnected(data=pooling_layer_2, num_hidden=120, flatten=True, no_bias=False, name='fc')
	bn1_fc = mx.sym.BatchNorm(data=fc, fix_gamma=False, eps=1e-5, momentum=bn_mom, name='bn1_fc')
	fc1 = mx.symbol.FullyConnected(data=bn1_fc, num_hidden=84, flatten=True, no_bias=False, name='fc1')
	bn1_fc1 = mx.sym.BatchNorm(data=fc1, fix_gamma=False, eps=1e-5, momentum=bn_mom, name='bn1_fc1')
	
	fc2 = mx.symbol.FullyConnected(data=bn1_fc1, num_hidden=class_num, flatten=True, no_bias=False, name='fc2')
	bn1_fc2 = mx.sym.BatchNorm(data=fc2, fix_gamma=False, eps=1e-5, momentum=bn_mom, name='bn1_fc2')
	
	return mx.symbol.SoftmaxOutput(data=bn1_fc2, name='softmax')
	
# 使用gluon组件构建Lenet-5网络结构:
def createLenet_5():
	net = nn.Sequential()
	with net.name_scope(): 
		net.add(
			nn.BatchNorm(epsilon=1e-5, momentum=0.9),
			nn.Conv2D(channels=6, kernel_size=5, strides=2, padding=2, activation='relu'),
			nn.BatchNorm(epsilon=1e-5, momentum=0.9),
			nn.Conv2D(channels=16, kernel_size=5, strides=1, padding=0, activation='relu'),
			nn.BatchNorm(epsilon=1e-5, momentum=0.9),
			nn.AvgPool2D(pool_size=2, strides=2, padding=2),
			nn.Flatten(),
			nn.BatchNorm(epsilon=1e-5, momentum=0.9),
			nn.Dense(120, activation='relu'),
			nn.BatchNorm(epsilon=1e-5, momentum=0.9),
			nn.Dense(84, activation='relu'),
			nn.BatchNorm(epsilon=1e-5, momentum=0.9),
			nn.Dense(10)
		)
	return net
		
# 3、使用mxnet的ndarray(区别于 numpy的 array)构建Lenet-5网络结构:
def createNet(X):
	ctx = mx.cpu()  # 计算设备
	
	# 输出特征数目 = 6, 卷积核 = (5,5)----------第一个卷积层
	W1 = nd.random_normal(shape=(6, 1, 5, 5), scale=.1, ctx=ctx)
	b1 = nd.zeros(W1.shape[0], ctx=ctx)
	
	# 特征数目 = 16, 卷积核 = (5,5)----------第二个卷积层
	W2 = nd.random_normal(shape=(16, 6, 3, 3), scale=.1, ctx=ctx)
	b2 = nd.zeros(W2.shape[0], ctx=ctx)
	
	# 第一个全链接层
	W3 = nd.random_normal(shape=(400, 120), scale=.1, ctx=ctx)
	b3 = nd.zeros(W3.shape[1], ctx=ctx)
	
	# 第二个全链接层
	W4 = nd.random_normal(shape=(W3.shape[1], 84), scale=.1, ctx=ctx)
	b4 = nd.zeros(W4.shape[1], ctx=ctx)
	
	# 第三个全链接层
	W5 = nd.random_normal(shape=(W4.shape[1], 10), scale=.1, ctx=ctx)
	b5 = nd.zeros(W5.shape[1], ctx=ctx)
	
	params = [W1, b1, W2, b2, W3, b3, W4, b4, W5, b5]
	for param in params:
		param.attach_grad()
		
	# create network
	X = X.as_in_context(W1.context)
	
	# 批量归一化
	bn_X = nd.BatchNorm_v1(data=X, fix_gamma=True, eps=1e-5, output_mean_var=0.99, name='bn_X')
	
	# 第一层卷积
	h1_conv = nd.Convolution(data=bn_X, weight=W1, bias=b1, kernel=W1.shape[2:], num_filter=W1.shape[0], name='h1_conv')

	# 批量归一化
	bn_h1_conv = mx.sym.BatchNorm(data=h1_conv, fix_gamma=False, eps=1e-5, momentum=0.99, name='bn_h1_conv')
	h1_activation = nd.relu(bn_h1_conv)
	
	# 第二层卷集
	# 批量归一化
	bn_h1_conv2 = nd.BatchNorm_v1(data=h1_activation, fix_gamma=False, eps=1e-5, momentum=0.99, name='bn_h1_conv2')
	h1_conv2 = nd.Convolution(data=bn_h1_conv2, weight=W2, bias=b2, kernel=W1.shape[2:], num_filter=W1.shape[0], name="h1_conv2")
	bn_h1_conv2 = nd.BatchNorm_v1(data=h1_conv2, fix_gamma=False, eps=1e-5, momentum=0.99, name='bn_h1_conv2')
	h2_activation = nd.relu(bn_h1_conv2)
	
	# 下采样层
	pooling_layer_2 = mx.symbol.Pooling(data=h2_activation, kernel=(5, 5), stride=(2, 2), pad=(2, 2), pool_type='max',name='pooling_layer_2')  # 16 *5 *5 flatten =
	
	# flatten
	fla = nd.flatten(data=pooling_layer_2, name='fla')
	
	# 全链接层---1
	fullcollect_layer = nd.dot(fla, W3) + b3
	bn_fullcollect_layer = mx.sym.BatchNorm(data=fullcollect_layer, fix_gamma=False, eps=1e-5, momentum=0.99,name='bn_fullcollect_layer')
	relu_bn_fullcollect_layer = nd.relu(data=bn_fullcollect_layer)
	
	# 全链接层-2
	fullcollect_layer_2 = nd.dot(relu_bn_fullcollect_layer, W4) + b4
	bn_fullcollect_layer_2 = mx.sym.BatchNorm(data=fullcollect_layer_2, fix_gamma=False, eps=1e-5, momentum=0.99,name='bn_fullcollect_layer_2')
	relu_bn_fullcollect_layer_2 = nd.relu(data=bn_fullcollect_layer_2)
	
	# 全链接层3
	fullcollect_layer_3 = nd.dot(relu_bn_fullcollect_layer_2, W5) + b5
	bn_fullcollect_layer_3 = mx.sym.BatchNorm(data=fullcollect_layer_3, fix_gamma=False, eps=1e-5, momentum=0.99,name='bn_fullcollect_layer_3')
	relu_bn_fullcollect_layer_3 = nd.relu(data=bn_fullcollect_layer_3)
	
	print('网络结构:')
	print('第一个卷积层:', h1_activation.shape)
	print('第二个卷积层:', h2_activation.shape)
	print('下采样层:', pooling_layer_2.shape)
	print('第一个全链接层:', relu_bn_fullcollect_layer.shape)
	print('第二个全链接层:', relu_bn_fullcollect_layer_2.shape)
	print('输出层:', relu_bn_fullcollect_layer_3.shape)
	
	return relu_bn_fullcollect_layer_3
	
def accuracy(output, label):
	return nd.mean(output.argmax(axis=1) == label).asscalar()
	
def evaluate_accuracy(_test_data, net, ctx, eval_data_batch_count):
	acc = 0.
	for test_data_label_data_names_label_names in _test_data:
		test_data = test_data_label_data_names_label_names.data
		test_label = test_data_label_data_names_label_names.label
		data = test_data[0].as_in_context(ctx)
		label = test_label[0].as_in_context(ctx)
		
		output = net(data)
		label = label.as_in_context(ctx)
		acc += accuracy(output, label)
	return acc / eval_data_batch_count
	
def main():
	saved_model_path = "."
	train_test_log_save_path = 'resnet_log.log'
	
	batch_size = 500
	learning_rate = 0.005
	momentum = 0.9
	num_epoch= 10
	_eval_data = mx.sym.Variable('eval_data:')
	ctx = mx.cpu()  # 计算设备
	
	logger = logging.getLogger()
	logging.basicConfig(level=logging.INFO,
						format='%(message)s',
						datefmt='%a, %d %b %Y %H:%M:%S',
						filename=train_test_log_save_path,
						filemode='w'
	)
	
	train_data, train_label, test_data, test_label = getTrainTestData()
	data_train = Inter(batch_size, train_data, train_label)  # 获取训练集的迭代器对象
	_eval_data = Inter(batch_size*2, test_data, test_label)  # 获取测试集的迭代器对象
	
	train_data_batch_count = train_data.shape[0] // batch_size
	eval_data_batch_count = test_data.shape[0] // batch_size
	#train_data_batch_count = len(train_data) // config.batch_size  # 937
    #eval_data_batch_count = len(test_data) // config.batch_size  # 156
	train_step = 0
	
	# 保存日志
    #log = open(file='train_test_log/resnet_log.log', mode='w')
	log = open(file=train_test_log_save_path, mode = 'w')
	
	softmax_out = get_net(class_num=10, bn_mom=0.99, filter_list=[6, 16])
	model = mx.mod.Module(symbol=softmax_out,
                      context=mx.cpu(),
                      data_names=['data'],
                      label_names=['softmax_label'])
	
	model.fit(data_train,
          eval_data=_eval_data,
          optimizer='sgd',
          initializer=mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2),
          eval_metric=['acc', 'ce'],
          optimizer_params={'learning_rate': learning_rate, 'momentum': momentum},
          batch_end_callback=mx.callback.Speedometer(batch_size, 1),
          epoch_end_callback=mx.callback.do_checkpoint(saved_model_path),
          num_epoch=num_epoch)

	
if __name__ == "__main__":
	main()

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值