基于多层神经网络的手写数字识别

基于多层神经网络的手写数字识别

import tensorflow as tf
import os
import matplotlib.pyplot as plt
import numpy as np

#下载读取数据
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("./MNIST_data/",one_hot=True)

#模型构建



#定义全连接层函数
def fcn_layer(inputs,
              input_dim,
              output_dim,
              activation=None):
    w=tf.Variable(tf.truncated_normal([input_dim,output_dim],stddev=0.1))
    b=tf.Variable(tf.zeros([output_dim]))
    xwb=tf.matmul(inputs,w)+b

    if activation is None:
        outputs=xwb
    else:
        outputs=activation(xwb)
    return outputs

#输入数据占位符
x=tf.placeholder(tf.float32,[None,784],name="X")#每张图片有28*28个像素点
image_shaped_input=tf.reshape(x,[-1,28,28,1])
tf.summary.image("input",image_shaped_input)#加入到tensorbroad
y=tf.placeholder(tf.float32,[None,10],name="Y")#10个类别
H1_NN=256
H2_NN=64
H3_NN=32

h1=fcn_layer(inputs=x,
             input_dim=784,
             output_dim=H1_NN,
             activation=tf.nn.relu)

h2=fcn_layer(inputs=h1,
             input_dim=H1_NN,
             output_dim=H2_NN,
             activation=tf.nn.relu)

h3=fcn_layer(inputs=h2,
             input_dim=H2_NN,
             output_dim=H3_NN,
             activation=tf.nn.relu)

forword=fcn_layer(inputs=h3,
             input_dim=H3_NN,
             output_dim=10,
             activation=None)

tf.summary.histogram("forword",forword)
pred=tf.nn.softmax(forword)

# #构建隐藏层
# H1_NN=256
# w1=tf.Variable(tf.random_normal([784,H1_NN]),name="W1")
# b1=tf.Variable(tf.zeros([H1_NN]),name="B1")
# y1=tf.nn.relu(tf.matmul(x,w1)+b1)
#
# #构建输出层
# w2=tf.Variable(tf.random_normal([H1_NN,10]),name="W2")
# b2=tf.Variable(tf.zeros([10]),name="B2")
#
# forword=tf.matmul(y1,w2)+b2 #定义前向计算
# pred=tf.nn.softmax(forword) #结果分类

#设置训练参数
train_epochs=40 #训练轮次
batch_size=50 #单次训练样本数(批次大小)
total_batch=int(mnist.train.num_examples/batch_size) #一轮训练批次
display_step=1 #显示粒度
learning_rate=0.01 #学习率
save_step=5
ckpt_dir="./ckpt_dir/"
if not os.path.exists(ckpt_dir):
    os.makedirs(ckpt_dir)


#损失函数
#log(pred)不稳定
# loss_function=tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred),reduction_indices=1)) #交叉熵损失
loss_function=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=forword,labels=y))
tf.summary.scalar("loss",loss_function)

#优化器
optimizer=tf.train.AdamOptimizer(learning_rate).minimize(loss_function)

#定义计算准确率
corr_prediction=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(corr_prediction,tf.float32))#转换成浮点数计算平均值
tf.summary.scalar("acc",accuracy)

saver=tf.train.Saver()
# 声明会话和初始化
#记录训练开始时间
from time import time
startTime=time()

sess=tf.Session()
init=tf.global_variables_initializer()
sess.run(init)

merged_summary_op=tf.summary.merge_all()
writer=tf.summary.FileWriter("./log/",sess.graph)

#模型训练
for epoch in range(train_epochs):
    for batch in range(total_batch):
        xs, ys = mnist.train.next_batch(batch_size)  # 读取批次数据
        sess.run(optimizer, feed_dict={x: xs, y: ys})  # 执行批次训练

        summary_str=sess.run(merged_summary_op,feed_dict={x: xs, y: ys})#生成summary
        writer.add_summary(summary_str,epoch)#将summary写成文件

    # total_batch个批次训练完成后,使用验证数据计算误差与准确率;验证集没有分批
    loss, acc = sess.run([loss_function, accuracy],
                         feed_dict={x: mnist.validation.images, y: mnist.validation.labels})
    # 打印训练过程中的详细信息
    if (epoch + 1) % display_step == 0:
        print("Train Epoch:", '%02d' % (epoch + 1), "Loss=", "{:.9f}".format(loss),
              " Accuracy=", "{:.4f}".format(acc))
    if (epoch + 1) % save_step == 0:
        saver.save(sess,os.path.join(ckpt_dir,"mnist_h256_model_{:06d}.ckpt".format(epoch+1)))
        print("mnist_h256_model_{:06d}.ckpt saved".format(epoch+1))
        
saver.save(sess,os.path.join(ckpt_dir,"mnist_h256_model.ckpt"))
print("Model saved")

#显示总运行时间
duration=time()-startTime
print("Train Finished takes:","{:.2f}".format(duration))

#评估模型
# 测试集
accu_test =  sess.run(accuracy,feed_dict={x: mnist.test.images, y: mnist.test.labels})
print("Test Accuracy:",accu_test)

# 验证集
accu_validation =  sess.run(accuracy,feed_dict={x: mnist.validation.images, y: mnist.validation.labels})
print("Test Accuracy:",accu_validation)

# 训练集
accu_train =  sess.run(accuracy,feed_dict={x: mnist.train.images, y: mnist.train.labels})
print("Test Accuracy:",accu_train)

#进行预测
# 由于pred预测结果是one-hot编码格式,所以需要转换为0~9数字
prediction_result = sess.run(tf.argmax(pred, 1),feed_dict={x: mnist.test.images})

# 查看预测结果中的前10项
print(prediction_result[0:10])

#定义可视化函数
def plot_images_labels_prediction(images,  # 图像列表
                                  labels,  # 标签列表
                                  prediction,  # 预测值列表
                                  index,  # 从第index个开始显示
                                  num=10):  # 缺省一次显示 10 幅
    fig = plt.gcf()  # 获取当前图表,Get Current Figure
    fig.set_size_inches(10, 12)  # 1英寸等于 2.54 cm
    if num > 25:
        num = 25  # 最多显示25个子图
    for i in range(0, num):
        ax = plt.subplot(5, 5, i + 1)  # 获取当前要处理的子图

        ax.imshow(np.reshape(images[index], (28, 28)),  # 显示第index个图像
                  cmap='binary')

        title = "label=" + str(np.argmax(labels[index]))  # 构建该图上要显示的title信息
        if len(prediction) > 0:
            title += ",predict=" + str(prediction[index])

        ax.set_title(title, fontsize=10)  # 显示图上的title信息
        ax.set_xticks([]);  # 不显示坐标轴
        ax.set_yticks([])
        index += 1
    plt.show()

#可视化测试
plot_images_labels_prediction(mnist.test.images,
                              mnist.test.labels,
                              prediction_result, 10, 25)


#找出预测错误

# #将预测结果和数据集结果进行比较,相同为True,不相同为False,并添加到数组中
compare_lists = prediction_result == np.argmax(mnist.test.labels, 1)
print(compare_lists)

#遍历数组找到错误集
err_lists = [i for i in range(len(compare_lists)) if compare_lists[i] == False]
print(err_lists, len(err_lists))

#定义输出错误分类的函数
def print_predict_errs(labels,  # 标签列表
                       prediction):  # 预测值列表
    count = 0
    compare_lists = (prediction == np.argmax(labels, 1))
    err_lists = [i for i in range(len(compare_lists)) if compare_lists[i] == False]
    for x in err_lists:
        print("index=" + str(x) +
              " 标签值=", np.argmax(labels[x]),
              "预测值=", prediction[x])
        count = count + 1
    print("总计:" + str(count))

#输出错误分类
print_predict_errs(labels=mnist.test.labels,
                   prediction=prediction_result)
#可视化错误
plot_images_labels_prediction(mnist.test.images,
                              mnist.test.labels,
                              prediction_result, 460, 10)

输出结果,并保存到文件

D:\PycharmProjects\12313\venv\Scripts\python.exe "D:\JetBrains\PyCharm 2019.3.5\plugins\python\helpers\pydev\pydevconsole.py" --mode=client --port=58079
import sys; print('Python %s on %s' % (sys.version, sys.platform))
sys.path.extend(['D:\\PycharmProjects\\12313', 'D:/PycharmProjects/12313'])
PyDev console: starting.
Python 3.7.4 (tags/v3.7.4:e09359112e, Jul  8 2019, 20:34:20) [MSC v.1916 64 bit (AMD64)] on win32
>>> runfile('D:/PycharmProjects/12313/venv/spe.py', wdir='D:/PycharmProjects/12313/venv')
WARNING:tensorflow:From D:/PycharmProjects/12313/venv/spe.py:8: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From C:\Users\86159\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From C:\Users\86159\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting ./MNIST_data/train-images-idx3-ubyte.gz
Extracting ./MNIST_data/train-labels-idx1-ubyte.gz
WARNING:tensorflow:From C:\Users\86159\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
WARNING:tensorflow:From C:\Users\86159\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting ./MNIST_data/t10k-images-idx3-ubyte.gz
Extracting ./MNIST_data/t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From C:\Users\86159\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From C:\Users\86159\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
2020-11-02 10:06:33.950217: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
Train Epoch: 01 Loss= 0.033373673  Accuracy= 0.9488
Train Epoch: 02 Loss= 0.021994267  Accuracy= 0.9662
Train Epoch: 03 Loss= 0.017937912  Accuracy= 0.9722
Train Epoch: 04 Loss= 0.022437602  Accuracy= 0.9684
Train Epoch: 05 Loss= 0.021898262  Accuracy= 0.9710
mnist_h256_model_000005.ckpt saved
Train Epoch: 06 Loss= 0.019241231  Accuracy= 0.9774
Train Epoch: 07 Loss= 0.022323193  Accuracy= 0.9700
Train Epoch: 08 Loss= 0.024318960  Accuracy= 0.9722
Train Epoch: 09 Loss= 0.019099472  Accuracy= 0.9734
Train Epoch: 10 Loss= 0.023970880  Accuracy= 0.9728
mnist_h256_model_000010.ckpt saved
Train Epoch: 11 Loss= 0.020766426  Accuracy= 0.9746
Train Epoch: 12 Loss= 0.021221180  Accuracy= 0.9754
Train Epoch: 13 Loss= 0.026324116  Accuracy= 0.9772
Train Epoch: 14 Loss= 0.022307105  Accuracy= 0.9748
Train Epoch: 15 Loss= 0.023436131  Accuracy= 0.9746
mnist_h256_model_000015.ckpt saved
Train Epoch: 16 Loss= 0.025960576  Accuracy= 0.9754
Train Epoch: 17 Loss= 0.025602039  Accuracy= 0.9770
Train Epoch: 18 Loss= 0.022416592  Accuracy= 0.9776
Train Epoch: 19 Loss= 0.023123918  Accuracy= 0.9782
Train Epoch: 20 Loss= 0.024619967  Accuracy= 0.9770
mnist_h256_model_000020.ckpt saved
Train Epoch: 21 Loss= 0.030413935  Accuracy= 0.9730
Train Epoch: 22 Loss= 0.029267039  Accuracy= 0.9776
Train Epoch: 23 Loss= 0.023128647  Accuracy= 0.9776
Train Epoch: 24 Loss= 0.028852930  Accuracy= 0.9740
Train Epoch: 25 Loss= 0.018726118  Accuracy= 0.9798
mnist_h256_model_000025.ckpt saved
Train Epoch: 26 Loss= 0.030474311  Accuracy= 0.9748
Train Epoch: 27 Loss= 0.034199715  Accuracy= 0.9724
Train Epoch: 28 Loss= 0.029555725  Accuracy= 0.9760
Train Epoch: 29 Loss= 0.028389521  Accuracy= 0.9746
Train Epoch: 30 Loss= 0.030048430  Accuracy= 0.9766
WARNING:tensorflow:From C:\Users\86159\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\training\saver.py:966: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.
Instructions for updating:
Use standard file APIs to delete files with this prefix.
mnist_h256_model_000030.ckpt saved
Train Epoch: 31 Loss= 0.031847879  Accuracy= 0.9704
Train Epoch: 32 Loss= 0.034973588  Accuracy= 0.9748
Train Epoch: 33 Loss= 0.027522055  Accuracy= 0.9746
Train Epoch: 34 Loss= 0.022573495  Accuracy= 0.9778
Train Epoch: 35 Loss= 0.038066167  Accuracy= 0.9750
mnist_h256_model_000035.ckpt saved
Train Epoch: 36 Loss= 0.035617530  Accuracy= 0.9774
Train Epoch: 37 Loss= 0.029843180  Accuracy= 0.9764
Train Epoch: 38 Loss= 0.030613486  Accuracy= 0.9794
Train Epoch: 39 Loss= 0.035416018  Accuracy= 0.9734
Train Epoch: 40 Loss= 0.039067239  Accuracy= 0.9780
mnist_h256_model_000040.ckpt saved
Model saved
Train Finished takes: 139.56
Test Accuracy: 0.9773
Test Accuracy: 0.978
Test Accuracy: 0.9954727
[7 2 1 0 4 1 4 9 5 9]
[ True  True  True ...  True  True  True]
[33, 92, 124, 149, 151, 175, 247, 259, 321, 359, 445, 449, 495, 582, 619, 674, 684, 720, 740, 813, 842, 846, 882, 924, 938, 951, 956, 965, 1014, 1039, 1107, 1112, 1166, 1182, 1194, 1226, 1232, 1242, 1247, 1260, 1290, 1315, 1328, 1364, 1393, 1395, 1433, 1441, 1500, 1527, 1530, 1549, 1553, 1554, 1654, 1681, 1686, 1716, 1717, 1790, 1800, 1828, 1901, 1910, 1969, 1981, 2035, 2053, 2073, 2098, 2109, 2118, 2130, 2135, 2162, 2182, 2272, 2280, 2292, 2293, 2326, 2369, 2387, 2422, 2454, 2462, 2542, 2597, 2607, 2648, 2654, 2720, 2721, 2730, 2769, 2829, 2896, 2921, 2927, 2939, 2953, 2970, 3023, 3073, 3117, 3225, 3405, 3422, 3441, 3451, 3475, 3503, 3520, 3549, 3558, 3567, 3597, 3662, 3674, 3688, 3751, 3778, 3780, 3796, 3817, 3838, 3850, 3853, 3906, 3941, 3976, 4027, 4078, 4140, 4154, 4163, 4176, 4212, 4224, 4248, 4289, 4315, 4344, 4374, 4425, 4477, 4487, 4497, 4511, 4536, 4552, 4571, 4575, 4639, 4740, 4751, 4761, 4763, 4807, 4823, 4874, 4880, 4890, 4956, 4966, 5140, 5331, 5457, 5586, 5642, 5676, 5749, 5842, 5936, 5955, 5973, 6011, 6023, 6024, 6045, 6059, 6101, 6157, 6555, 6571, 6572, 6576, 6597, 6628, 6641, 6755, 7216, 7233, 7514, 7552, 7606, 7850, 7859, 7918, 7921, 8020, 8094, 8198, 8325, 8339, 8408, 9009, 9015, 9019, 9024, 9209, 9385, 9517, 9587, 9634, 9664, 9669, 9679, 9700, 9729, 9745, 9749, 9770, 9839, 9867, 9888, 9975] 227
index=33 标签值= 4 预测值= 0
index=92 标签值= 9 预测值= 4
index=124 标签值= 7 预测值= 4
index=149 标签值= 2 预测值= 8
index=151 标签值= 9 预测值= 8
index=175 标签值= 7 预测值= 9
index=247 标签值= 4 预测值= 2
index=259 标签值= 6 预测值= 0
index=321 标签值= 2 预测值= 7
index=359 标签值= 9 预测值= 8
index=445 标签值= 6 预测值= 0
index=449 标签值= 3 预测值= 5
index=495 标签值= 8 预测值= 0
index=582 标签值= 8 预测值= 2
index=619 标签值= 1 预测值= 8
index=674 标签值= 5 预测值= 3
index=684 标签值= 7 预测值= 3
index=720 标签值= 5 预测值= 8
index=740 标签值= 4 预测值= 9
index=813 标签值= 9 预测值= 8
index=842 标签值= 7 预测值= 0
index=846 标签值= 7 预测值= 0
index=882 标签值= 9 预测值= 4
index=924 标签值= 2 预测值= 7
index=938 标签值= 3 预测值= 5
index=951 标签值= 5 预测值= 7
index=956 标签值= 1 预测值= 2
index=965 标签值= 6 预测值= 0
index=1014 标签值= 6 预测值= 5
index=1039 标签值= 7 预测值= 8
index=1107 标签值= 9 预测值= 8
index=1112 标签值= 4 预测值= 6
index=1166 标签值= 3 预测值= 8
index=1182 标签值= 6 预测值= 8
index=1194 标签值= 7 预测值= 8
index=1226 标签值= 7 预测值= 2
index=1232 标签值= 9 预测值= 4
index=1242 标签值= 4 预测值= 9
index=1247 标签值= 9 预测值= 8
index=1260 标签值= 7 预测值= 1
index=1290 标签值= 3 预测值= 5
index=1315 标签值= 3 预测值= 5
index=1328 标签值= 7 预测值= 8
index=1364 标签值= 8 预测值= 2
index=1393 标签值= 5 预测值= 3
index=1395 标签值= 2 预测值= 3
index=1433 标签值= 8 预测值= 1
index=1441 标签值= 8 预测值= 0
index=1500 标签值= 7 预测值= 1
index=1527 标签值= 1 预测值= 5
index=1530 标签值= 8 预测值= 7
index=1549 标签值= 4 预测值= 2
index=1553 标签值= 9 预测值= 3
index=1554 标签值= 9 预测值= 8
index=1654 标签值= 2 预测值= 3
index=1681 标签值= 3 预测值= 7
index=1686 标签值= 8 预测值= 5
index=1716 标签值= 7 预测值= 8
index=1717 标签值= 8 预测值= 0
index=1790 标签值= 2 预测值= 8
index=1800 标签值= 6 预测值= 9
index=1828 标签值= 3 预测值= 8
index=1901 标签值= 9 预测值= 4
index=1910 标签值= 5 预测值= 9
index=1969 标签值= 6 预测值= 0
index=1981 标签值= 6 预测值= 8
index=2035 标签值= 5 预测值= 3
index=2053 标签值= 4 预测值= 9
index=2073 标签值= 5 预测值= 6
index=2098 标签值= 2 预测值= 0
index=2109 标签值= 3 预测值= 8
index=2118 标签值= 6 预测值= 0
index=2130 标签值= 4 预测值= 9
index=2135 标签值= 6 预测值= 1
index=2162 标签值= 5 预测值= 8
index=2182 标签值= 1 预测值= 2
index=2272 标签值= 8 预测值= 0
index=2280 标签值= 3 预测值= 9
index=2292 标签值= 9 预测值= 5
index=2293 标签值= 9 预测值= 0
index=2326 标签值= 0 预测值= 5
index=2369 标签值= 5 预测值= 8
index=2387 标签值= 9 预测值= 1
index=2422 标签值= 6 预测值= 0
index=2454 标签值= 6 预测值= 5
index=2462 标签值= 2 预测值= 0
index=2542 标签值= 6 预测值= 0
index=2597 标签值= 5 预测值= 3
index=2607 标签值= 7 预测值= 8
index=2648 标签值= 9 预测值= 5
index=2654 标签值= 6 预测值= 1
index=2720 标签值= 9 预测值= 0
index=2721 标签值= 6 预测值= 0
index=2730 标签值= 7 预测值= 4
index=2769 标签值= 9 预测值= 8
index=2829 标签值= 5 预测值= 9
index=2896 标签值= 8 预测值= 0
index=2921 标签值= 3 预测值= 2
index=2927 标签值= 3 预测值= 2
index=2939 标签值= 9 预测值= 5
index=2953 标签值= 3 预测值= 5
index=2970 标签值= 5 预测值= 8
index=3023 标签值= 8 预测值= 5
index=3073 标签值= 1 预测值= 2
index=3117 标签值= 5 预测值= 9
index=3225 标签值= 7 预测值= 9
index=3405 标签值= 4 预测值= 9
index=3422 标签值= 6 预测值= 0
index=3441 标签值= 7 预测值= 8
index=3451 标签值= 7 预测值= 9
index=3475 标签值= 3 预测值= 0
index=3503 标签值= 9 预测值= 1
index=3520 标签值= 6 预测值= 4
index=3549 标签值= 3 预测值= 2
index=3558 标签值= 5 预测值= 0
index=3567 标签值= 8 预测值= 5
index=3597 标签值= 9 预测值= 3
index=3662 标签值= 8 预测值= 0
index=3674 标签值= 8 预测值= 5
index=3688 标签值= 6 预测值= 0
index=3751 标签值= 7 预测值= 3
index=3778 标签值= 5 预测值= 6
index=3780 标签值= 4 预测值= 0
index=3796 标签值= 2 预测值= 8
index=3817 标签值= 2 预测值= 4
index=3838 标签值= 7 预测值= 0
index=3850 标签值= 9 预测值= 4
index=3853 标签值= 6 预测值= 0
index=3906 标签值= 1 预测值= 3
index=3941 标签值= 4 预测值= 2
index=3976 标签值= 7 预测值= 8
index=4027 标签值= 7 预测值= 1
index=4078 标签值= 9 预测值= 8
index=4140 标签值= 8 预测值= 2
index=4154 标签值= 9 预测值= 4
index=4163 标签值= 9 预测值= 0
index=4176 标签值= 2 预测值= 7
index=4212 标签值= 1 预测值= 8
index=4224 标签值= 9 预测值= 7
index=4248 标签值= 2 预测值= 8
index=4289 标签值= 2 预测值= 7
index=4315 标签值= 5 预测值= 8
index=4344 标签值= 9 预测值= 3
index=4374 标签值= 5 预测值= 8
index=4425 标签值= 9 预测值= 4
index=4477 标签值= 0 预测值= 6
index=4487 标签值= 7 预测值= 8
index=4497 标签值= 8 预测值= 7
index=4511 标签值= 9 预测值= 0
index=4536 标签值= 6 预测值= 5
index=4552 标签值= 3 预测值= 8
index=4571 标签值= 6 预测值= 8
index=4575 标签值= 4 预测值= 0
index=4639 标签值= 8 预测值= 9
index=4740 标签值= 3 预测值= 5
index=4751 标签值= 4 预测值= 6
index=4761 标签值= 9 预测值= 4
index=4763 标签值= 5 预测值= 6
index=4807 标签值= 8 预测值= 0
index=4823 标签值= 9 预测值= 4
index=4874 标签值= 9 预测值= 5
index=4880 标签值= 0 预测值= 6
index=4890 标签值= 8 预测值= 3
index=4956 标签值= 8 预测值= 4
index=4966 标签值= 7 预测值= 8
index=5140 标签值= 3 预测值= 5
index=5331 标签值= 1 预测值= 6
index=5457 标签值= 1 预测值= 0
index=5586 标签值= 8 预测值= 0
index=5642 标签值= 1 预测值= 5
index=5676 标签值= 4 预测值= 8
index=5749 标签值= 8 预测值= 5
index=5842 标签值= 4 预测值= 7
index=5936 标签值= 4 预测值= 9
index=5955 标签值= 3 预测值= 8
index=5973 标签值= 3 预测值= 8
index=6011 标签值= 3 预测值= 8
index=6023 标签值= 3 预测值= 9
index=6024 标签值= 8 预测值= 3
index=6045 标签值= 3 预测值= 8
index=6059 标签值= 3 预测值= 8
index=6101 标签值= 1 预测值= 8
index=6157 标签值= 9 预测值= 8
index=6555 标签值= 8 预测值= 9
index=6571 标签值= 9 预测值= 7
index=6572 标签值= 1 预测值= 8
index=6576 标签值= 7 预测值= 1
index=6597 标签值= 0 预测值= 9
index=6628 标签值= 1 预测值= 9
index=6641 标签值= 8 预测值= 5
index=6755 标签值= 8 预测值= 9
index=7216 标签值= 0 预测值= 6
index=7233 标签值= 3 预测值= 5
index=7514 标签值= 8 预测值= 0
index=7552 标签值= 8 预测值= 9
index=7606 标签值= 7 预测值= 1
index=7850 标签值= 5 预测值= 0
index=7859 标签值= 5 预测值= 6
index=7918 标签值= 5 预测值= 6
index=7921 标签值= 8 预测值= 2
index=8020 标签值= 1 预测值= 8
index=8094 标签值= 2 预测值= 8
index=8198 标签值= 2 预测值= 4
index=8325 标签值= 0 预测值= 6
index=8339 标签值= 8 预测值= 6
index=8408 标签值= 8 预测值= 5
index=9009 标签值= 7 预测值= 2
index=9015 标签值= 7 预测值= 2
index=9019 标签值= 7 预测值= 2
index=9024 标签值= 7 预测值= 2
index=9209 标签值= 2 预测值= 8
index=9385 标签值= 8 预测值= 5
index=9517 标签值= 9 预测值= 4
index=9587 标签值= 9 预测值= 4
index=9634 标签值= 0 预测值= 1
index=9664 标签值= 2 预测值= 7
index=9669 标签值= 4 预测值= 7
index=9679 标签值= 6 预测值= 8
index=9700 标签值= 2 预测值= 8
index=9729 标签值= 5 预测值= 6
index=9745 标签值= 4 预测值= 2
index=9749 标签值= 5 预测值= 6
index=9770 标签值= 5 预测值= 0
index=9839 标签值= 2 预测值= 8
index=9867 标签值= 2 预测值= 8
index=9888 标签值= 6 预测值= 0
index=9975 标签值= 3 预测值= 8
总计:227

| 在这里插入图片描述
| 在这里插入图片描述

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值