入门 tf

import os
import numpy as np
import tensorflow as tf
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from tensorflow.examples.tutorials.mnist import input_data
tf.logging.set_verbosity(tf.logging.ERROR)
# !pip install install gast==0.2.2 # 降级gast

class FLAGS:
    #分类种类
    num_classes = 10
    #每个批次的大小
    batch_size = 100
    #迭代次数
    global_epoch = 100
    #保存路径
    model_dir = "best_model"

#载入数据集,one_hot:将标签转换为只有一位为1,其它为0,会自动从网上下载数据集到当前目录
mnist=input_data.read_data_sets("MNIST_data",one_hot=True)
print(mnist.train.num_examples, mnist.test.num_examples)

#计算一共有多少个批次(整除)
n_batch=mnist.train.num_examples//FLAGS.batch_size

#该神经网络只输入层和输出层,输入层包含784个神经元,输出层包含10个神经元
inputs = tf.placeholder(tf.float32, [None,784], name='inputs')
#标签结果
y_true = tf.placeholder(tf.float32, [None,10], name='ground_truth')
#全局
global_step = tf.get_variable( 'global_step', [], initializer=tf.constant_initializer(0), trainable=False)   
#
is_training = tf.placeholder(tf.bool, name="phase_train") 


#定义模型
def conv2d(x, filters, kernel_size=3, strides=1, is_training=True):
    x = tf.layers.conv2d(inputs=x, filters=filters, kernel_size=kernel_size, strides=strides, padding='same')
    x = tf.layers.batch_normalization(inputs=x, training=is_training)
    return tf.nn.relu(x)  #激活函数为ReLU

def maxpool2d(x, strides=2):
    #最大池化层,ksize窗口大小,strides滑动步幅
    return  tf.layers.max_pooling2d(inputs=x, pool_size=2, strides=strides, padding='same')

def conv_net(inputs, num_classes=10, is_training=True):
    inputs = tf.reshape(inputs, shape=[-1, 28, 28, 1])  # 重塑输入图片
    conv1 = conv2d(inputs, 32, kernel_size=3, strides=1, is_training=is_training)
    conv1 = maxpool2d(conv1, strides=2)
    
    conv2 = conv2d(conv1, 64, kernel_size=3, strides=1, is_training=is_training)
    conv2 = maxpool2d(conv2, strides=2)
    
    fc1 = tf.reshape(conv2, [-1,7*7*64])
    fc1 = tf.layers.dense(fc1, 1024, activation=tf.nn.relu)
    fc1 = tf.layers.dropout(fc1, rate=0.3, training=is_training)
        
    end_point = tf.layers.dense(fc1, num_classes)
    #x = [Dense(n_class, activation='softmax', name='c%d'%(i+1))(x)
    return end_point

#模型输出
end_point = conv_net(inputs, num_classes=FLAGS.num_classes, is_training=is_training)
result_cls = tf.argmax(end_point, axis=-1)
result_cls = tf.identity(result_cls, name="result_cls")

#准确率
#accuracy = tf.metrics.accuracy(labels=tf.argmax(y_true, axis=-1), predictions=result_cls)[1]
#准确率
accuracy = tf.reduce_mean(tf.cast(tf.equal(result_cls, tf.argmax(y_true, axis=-1)), tf.float32))

#计算误差,使用交叉熵(交叉熵用来衡量真实值和预测值的相似性)
loss = tf.losses.softmax_cross_entropy(onehot_labels=y_true, logits=end_point)
update_opts = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies([tf.group(*update_opts)]):
    var_list = tf.trainable_variables()
    g_list = tf.global_variables()
    bn_moving_vars  = [g for g in g_list if 'moving_mean' in g.name]
    bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
    var_list += bn_moving_vars  
    loss = loss
    train_op  = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss, var_list=var_list, global_step=global_step)
    pass


with tf.Session() as sess:
    bast_score = 0.998
    
    #初始化全局变量和局部变量
    init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    sess.run(init)
    
    #对所有图片迭代
    for epoch in range(FLAGS.global_epoch):
        #对训练图片分批训练一次
        for batch in range(n_batch):
            #获取一批样本图片, batch_xs:图片信息,batch_ys:图片标签
            batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
            #利用训练图片信息及对应标签,梯度下降法训练模型,得到权重W及b
            [__train_op, __total_loss, __accuracy, __global_step] = sess.run([train_op, loss, accuracy, global_step], 
                                                                             feed_dict={inputs: batch_xs, y_true: batch_ys, is_training:True})
            pass
        #利用测试集进行测试该迭代时模型的准确率
        acc_train = sess.run(accuracy, feed_dict={inputs: mnist.train.images, y_true: mnist.train.labels, is_training: False})
        #利用测试集进行测试该迭代时模型的准确率
        acc_test = sess.run(accuracy, feed_dict={inputs: mnist.test.images, y_true: mnist.test.labels, is_training: False})
        #打印迭代次数及对应准确率
        print("Iter:{:3d}, loss: {}, TrainAcc: {}, TestAcc: {}".format(epoch,'%.4f'%__total_loss,'%.4f'%acc_train,'%.4f'%acc_test))

        #验证模型
        if epoch%5==0:            
            [output_cls] = sess.run([result_cls], feed_dict={inputs: mnist.validation.images, y_true: mnist.validation.labels, is_training: False})
            y_pred_cls = output_cls
            y_true_cls = np.argmax(mnist.validation.labels, axis=-1)
            # 计算分类指标
            from sklearn.metrics import f1_score, confusion_matrix, classification_report # 计算分类指标
            Y_cls = confusion_matrix(np.squeeze(y_true_cls), np.squeeze(y_pred_cls) )
            acc = 0
            for i in range(len(Y_cls)):
                acc += Y_cls[i][i]            
            acc = acc/np.sum(Y_cls)
            target_names = []
            for i in range(len(Y_cls)):
                flage_temp = 'Class ' + str(i) + ' {}'
                target_names.append(flage_temp.format(Y_cls[i]))
            print("\n"+classification_report(y_true_cls, y_pred_cls, target_names=target_names))        
        
        #保存模型
        if acc_test>=bast_score:
            bast_score = max(bast_score, acc_test)
            if not os.path.exists(FLAGS.model_dir): 
                os.makedirs(FLAGS.model_dir)
            constant_graph = convert_variables_to_constants(sess, sess.graph_def, ['result_cls']) # 分割结果和分类特征值
            with tf.gfile.FastGFile(os.path.join(FLAGS.model_dir, 'best_model_{:.4f}_{:}.pb'.format(acc_test, epoch)), mode='wb') as f:
                f.write(constant_graph.SerializeToString())
                print("save best model!")

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

智能之心

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值