基于TensorFlow实现的CNN神经网络 花卉识别系统Demo

基于TensorFlow实现的CNN神经网络 花卉识别系统Demo

Demo展示

登录与注册

登录界面
提醒注册
注册界面

主页面

主页面
加入图片后
删除界面

模型训练

训练模型

识别

识别

神经网络

定义CNN网络结构
卷积神经网络,卷积加池化2,全连接2,softmax分类
关键代码:

# 定义函数infence,定义CNN网络结构
# 卷积神经网络,卷积加池化*2,全连接*2,softmax分类
# 卷积层1
def inference(images, batch_size, n_classes):
    with tf.variable_scope('conv1') as scope:
        weights = tf.Variable(tf.truncated_normal(shape=[3, 3, 3, 64], stddev=1.0, dtype=tf.float32),
                              name='weights', dtype=tf.float32)
        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[64]),
                             name='biases', dtype=tf.float32)
        conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(pre_activation, name=scope.name)

    # 池化层1
    # 3x3最大池化,步长strides为2,池化后执行lrn()操作,局部响应归一化,对训练有利。

    with tf.variable_scope('pooling1_lrn') as scope:
        pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pooling1')
        norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')

    # 卷积层2
    # 16个3x3的卷积核(16通道),padding=’SAME’,表示padding后卷积的图与原图尺寸一致,激活函数relu()
    with tf.variable_scope('conv2') as scope:
        weights = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 16], stddev=0.1, dtype=tf.float32),
                              name='weights', dtype=tf.float32)

        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[16]),
                             name='biases', dtype=tf.float32)

        conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(pre_activation, name='conv2')

    # 池化层2
    # 3x3最大池化,步长strides为2,池化后执行lrn()操作,
    # pool2 and norm2
    with tf.variable_scope('pooling2_lrn') as scope:
        norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
        pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME', name='pooling2')

    # 全连接层3
    # 128个神经元,将之前pool层的输出reshape成一行,激活函数relu()
    with tf.variable_scope('local3') as scope:
        reshape = tf.reshape(pool2, shape=[batch_size, -1])
        dim = reshape.get_shape()[1].value
        weights = tf.Variable(tf.truncated_normal(shape=[dim, 128], stddev=0.005, dtype=tf.float32),
                              name='weights', dtype=tf.float32)

        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[128]),
                             name='biases', dtype=tf.float32)

        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)

    # 全连接层4
    # 128个神经元,激活函数relu()
    with tf.variable_scope('local4') as scope:
        weights = tf.Variable(tf.truncated_normal(shape=[128, 128], stddev=0.005, dtype=tf.float32),
                              name='weights', dtype=tf.float32)

        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[128]),
                             name='biases', dtype=tf.float32)

        local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')

    # dropout层
    #    with tf.variable_scope('dropout') as scope:
    #        drop_out = tf.nn.dropout(local4, 0.8)

    # Softmax回归层
    # 将前面的FC层输出,做一个线性回归,计算出每一类的得分
    with tf.variable_scope('softmax_linear') as scope:
        weights = tf.Variable(tf.truncated_normal(shape=[128, n_classes], stddev=0.005, dtype=tf.float32),
                              name='softmax_linear', dtype=tf.float32)
        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[n_classes]),
                             name='biases', dtype=tf.float32)
        softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')

    return softmax_linear

# -----------------------------------------------------------------------------
# loss计算
# 传入参数:logits,网络计算输出值。labels,真实值,在这里是0或者1
# 返回参数:loss,损失值
def losses(logits, labels):
    with tf.variable_scope('loss') as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy_per_example')
        loss = tf.reduce_mean(cross_entropy, name='loss')
        tf.summary.scalar(scope.name + '/loss', loss)
    return loss


# --------------------------------------------------------------------------
# loss损失值优化
# 输入参数:loss。learning_rate,学习速率。
# 返回参数:train_op,训练op,这个参数要输入sess.run中让模型去训练。
def trainning(loss, learning_rate):
    with tf.name_scope('optimizer'):
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)
    return train_op


# -----------------------------------------------------------------------
# 评价/准确率计算
# 输入参数:logits,网络计算值。labels,标签,也就是真实值,在这里是0或者1。
# 返回参数:accuracy,当前step的平均准确率,也就是在这些batch中多少张图片被正确分类了。
def evaluation(logits, labels):
    with tf.variable_scope('accuracy') as scope:
        correct = tf.nn.in_top_k(logits, labels, 1)
        correct = tf.cast(correct, tf.float16)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar(scope.name + '/accuracy', accuracy)
    return accuracy

训练

关键代码:

class Train:
	path, train_dir, logs_train_dir = None, None, None

	def __init__(self):
		self.path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
		self.train_dir = self.path + '/input_data'  # 训练样本的读入路径
		self.logs_train_dir = self.path + '/save'  # logs存储路径

	def train(self, BATCH_SIZE=20, MAX_STEP=1000, learning_rate=0.0001):
		# 变量声明
		N_CLASSES = 4  # 四种花类型
		IMG_W = 64  # resize图像,太大的话训练时间久
		IMG_H = 64
		CAPACITY = 200
		# 获取批次batch
		# train, train_label = input_data.get_files(train_dir)
		train, train_label, val, val_label = input_data.get_files(self.train_dir, 0.3)
		# 训练数据及标签
		train_batch, train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
		# 测试数据及标签
		val_batch, val_label_batch = input_data.get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

		# 训练操作定义
		train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
		train_loss = model.losses(train_logits, train_label_batch)
		train_op = model.trainning(train_loss, learning_rate)
		train_acc = model.evaluation(train_logits, train_label_batch)

		# 测试操作定义
		test_logits = model.inference(val_batch, BATCH_SIZE, N_CLASSES)
		test_loss = model.losses(test_logits, val_label_batch)
		test_acc = model.evaluation(test_logits, val_label_batch)

		# 这个是log汇总记录
		summary_op = tf.summary.merge_all()

		# 产生一个会话
		sess = tf.Session()
		# 产生一个writer来写log文件
		train_writer = tf.summary.FileWriter(self.logs_train_dir, sess.graph)
		# 产生一个saver来存储训练好的模型
		saver = tf.train.Saver()
		# 所有节点初始化
		sess.run(tf.initialize_all_variables())
		# 队列监控
		coord = tf.train.Coordinator()
		threads = tf.train.start_queue_runners(sess=sess, coord=coord)
		# 进行batch的训练
		try:
			print('批次为:{},步数为:{},学习率为:{}'.format(BATCH_SIZE, MAX_STEP, learning_rate))
			# 执行MAX_STEP步的训练,一步一个batch
			for step in np.arange(MAX_STEP + 1):
				if coord.should_stop():
					break
				_, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

				# 每隔50步打印一次当前的loss以及acc,同时记录log,写入writer
				if step % 10 == 0:
					print('步数:%d, loss:%.2f, 训练准确率:%.2f%%' % (step, tra_loss, tra_acc * 100.0))
					summary_str = sess.run(summary_op)
					train_writer.add_summary(summary_str, step)
				# 每隔100步,保存一次训练好的模型
				if (step) == MAX_STEP:
					checkpoint_path = os.path.join(self.logs_train_dir, 'model.ckpt')
					saver.save(sess, checkpoint_path, global_step=step)
		except tf.errors.OutOfRangeError:
			print('到达训练上限,训练完成')
		finally:
			coord.request_stop()
			
if __name__ == '__main__':
	Train().train()

Demo下载

大家喜欢的话,希望可以star一下,谢谢。
https://github.com/JJJiangYH/Flower-Distinguish

  • 22
    点赞
  • 95
    收藏
    觉得还不错? 一键收藏
  • 21
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 21
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值