TensorFlow2.0笔记(四)——神经网络八股功能扩展

北大MOOC——TF2.0笔记

以下是我的听课笔记,供以后回忆(大多内容来自ppt)

一. 神经网络八股功能扩展

①自制数据集,解决本领域应用
②数据增强,扩充数据集
③断点续训,存取模型
④参数提取,把参数存入文本
⑤acc/loss可视化,查看训练效果
⑥应用程序,给图识物

1.自制数据集

观察标签文件:第一列value[0]图片名,第二列value[1]为标签

import tensorflow as tf
from PIL import Image
import numpy as np
import os
#自制数据集,自己写个函数,代替以下两行代码
# mnist = tf.keras.datasets.mnist
# (x_train, y_train), (x_test, y_test) = mnist.load_data()


train_path = './fashion_image_label/fashion_train_jpg_60000/'
train_txt = './fashion_image_label/fashion_train_jpg_60000.txt'
x_train_savepath = './fashion_image_label/fashion_x_train.npy'   #输入特征存储文件
y_train_savepath = './fashion_image_label/fahion_y_train.npy'

test_path = './fashion_image_label/fashion_test_jpg_10000/'
test_txt = './fashion_image_label/fashion_test_jpg_10000.txt'
x_test_savepath = './fashion_image_label/fashion_x_test.npy'
y_test_savepath = './fashion_image_label/fashion_y_test.npy'


def generateds(path, txt):
    f = open(txt, 'r')
    contents = f.readlines()  # 按行读取
    f.close()
    x, y_ = [], []   #列表,类似动态数组,append
    for content in contents: #逐行读出
        value = content.split()  # 以空格分开,存入数组
        img_path = path + value[0]   #图片名为value[0]
        img = Image.open(img_path)
        # "1"为非黑即白,8个bit表示;
        # "L"为灰度图像,每个像素用8个bit表示,0表示黑,255表示白,其他数字表示不同的灰度。
        img = np.array(img.convert('L'))
        img = img / 255.
        x.append(img)  #x为列表形式
        y_.append(value[1])  #标签为value[1]
        print('loading : ' + content)

    x = np.array(x)  #数组形式
    y_ = np.array(y_)
    y_ = y_.astype(np.int64)  #数据类型转换
    return x, y_

#如果标签存在,直接读取;如果不存在,制作数据集
if os.path.exists(x_train_savepath) and os.path.exists(y_train_savepath) and os.path.exists(
        x_test_savepath) and os.path.exists(y_test_savepath):
    print('-------------Load Datasets-----------------')
    x_train_save = np.load(x_train_savepath)
    y_train = np.load(y_train_savepath)
    x_test_save = np.load(x_test_savepath)
    y_test = np.load(y_test_savepath)
    x_train = np.reshape(x_train_save, (len(x_train_save), 28, 28))
    x_test = np.reshape(x_test_save, (len(x_test_save), 28, 28))
else:
    print('-------------Generate Datasets-----------------')
    x_train, y_train = generateds(train_path, train_txt)
    x_test, y_test = generateds(test_path, test_txt)

    print('-------------Save Datasets-----------------')
    x_train_save = np.reshape(x_train, (len(x_train), -1))  #指定了行数,-1代表列数自行转换,列数暂时不能确定
    x_test_save = np.reshape(x_test, (len(x_test), -1))
    np.save(x_train_savepath, x_train_save)
    np.save(y_train_savepath, y_train)
    np.save(x_test_savepath, x_test_save)
    np.save(y_test_savepath, y_test)

model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])

model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1)
model.summary()

2.数据增强

数据增强就是对图像的简单形变,来应对因拍照角度不同,引起的图片变形

数据增强(增大数据量)
image_gen_train = tf.keras.preprocessing.image.ImageDataGenerator( 

rescale = 所有数据将乘以该数值

rotation_range = 随机旋转角度数范围

width_shift_range = 随机宽度偏移量

height_shift_range = 随机高度偏移量

水平翻转:horizontal_flip = 是否随机水平翻转

随机缩放:zoom_range = 随机缩放的范围 [1-n,1+n] )

image_gen_train.fit(x_train)

3.断点续训 

断点续训:在神经网络训练过程中,由于某些原因训练无法进行,需要保存当前的训练结果,下次接着训练。

#读取模型:

load_weights(路径文件名)

在生成ckpt文件的时候,会自动生成索引表,所以判断索引表是否存在,判断是否已经保存过模型参数了。

checkpoint_save_path = "./checkpoint/fashion.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):#如果索引文件存在,则说明保存过训练参数了
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)    #读取模型

 

#保存模型

tf.keras.callbacks.ModelCheckpoint( filepath=路径文件名,           #存储路径                                                

save_weights_only=True/False,                                   #是否只保留模型参数

save_best_only=True/False                                         #是否保存最优解)

history = model.fit( callbacks=[cp_callback] )

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import numpy as np
import os

# fashion = tf.keras.datasets.fashion_mnist
# (x_train, y_train), (x_test, y_test) = fashion.load_data()

###############################################################
#(1)代码代替上面两句代码
#只是调用图片
train_path = './fashion_image_label/fashion_train_jpg_60000/'
train_txt = './fashion_image_label/fashion_train_jpg_60000.txt'
x_train_savepath = './fashion_image_label/fashion_x_train.npy'   #输入特征存储文件
y_train_savepath = './fashion_image_label/fahion_y_train.npy'

test_path = './fashion_image_label/fashion_test_jpg_10000/'
test_txt = './fashion_image_label/fashion_test_jpg_10000.txt'
x_test_savepath = './fashion_image_label/fashion_x_test.npy'
y_test_savepath = './fashion_image_label/fashion_y_test.npy'
def generateds(path, txt):
    f = open(txt, 'r')
    contents = f.readlines()  # 按行读取
    f.close()
    x, y_ = [], []   #列表,类似动态数组,append
    for content in contents: #逐行读出
        value = content.split()  # 以空格分开,存入数组
        img_path = path + value[0]   #图片名为value[0]
        img = Image.open(img_path)
        # "1"为非黑即白,8个bit表示;
        # "L"为灰度图像,每个像素用8个bit表示,0表示黑,255表示白,其他数字表示不同的灰度。
        img = np.array(img.convert('L'))
        img = img / 255.
        x.append(img)  #x为列表形式
        y_.append(value[1])  #标签为value[1]
        print('loading : ' + content)

    x = np.array(x)  #数组形式
    y_ = np.array(y_)
    y_ = y_.astype(np.int64)  #数据类型转换
    return x, y_

#如果标签存在,直接读取;如果不存在,制作数据集
if os.path.exists(x_train_savepath) and os.path.exists(y_train_savepath) and os.path.exists(
        x_test_savepath) and os.path.exists(y_test_savepath):
    print('-------------Load Datasets-----------------')
    x_train_save = np.load(x_train_savepath)
    y_train = np.load(y_train_savepath)
    x_test_save = np.load(x_test_savepath)
    y_test = np.load(y_test_savepath)
    x_train = np.reshape(x_train_save, (len(x_train_save), 28, 28))
    x_test = np.reshape(x_test_save, (len(x_test_save), 28, 28))
else:
    print('-------------Generate Datasets-----------------')
    x_train, y_train = generateds(train_path, train_txt)
    x_test, y_test = generateds(test_path, test_txt)

    print('-------------Save Datasets-----------------')
    x_train_save = np.reshape(x_train, (len(x_train), -1))  #指定了行数,-1代表列数自行转换,列数暂时不能确定
    x_test_save = np.reshape(x_test, (len(x_test), -1))
    np.save(x_train_savepath, x_train_save)
    np.save(y_train_savepath, y_train)
    np.save(x_test_savepath, x_test_save)
    np.save(y_test_savepath, y_test)

#################################################################

x_train, x_test = x_train / 255.0, x_test / 255.0

model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])

#################################################################
#此次修改的地方,
checkpoint_save_path = "./checkpoint/fashion.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):#如果索引文件存在,则说明保存过训练参数了
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)    #读取模型

#保存模型
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,  #保存路径
                                                 save_weights_only=True,        #是否保留模型参数
                                                 save_best_only=True)           #是否保存最优解

history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1,
                    callbacks=[cp_callback])
#################################################################
model.summary()

 4.参数提取,把参数存入文本

提取可训练参数 model.trainable_variables 返回模型中可训练的参数
np.set_printoptions(threshold=超过多少省略显示) 

5. acc/loss可视化,查看训练效果

history=model.fit(

训练集数据, 训练集标签, batch_size=, epochs=,

validation_split=用作测试数据的比例,

validation_data=测试集,

validation_freq=测试频率)

在fit的同时,就存储了以下内容,
history:

训练集loss: loss

测试集loss: val_loss

训练集准确率: sparse_categorical_accuracy

测试集准确率: val_sparse_categorical_accuracy
可用 .history 提取出来

import tensorflow as tf
import os
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt

np.set_printoptions(threshold=np.inf)

# mnist = tf.keras.datasets.mnist
# (x_train, y_train), (x_test, y_test) = mnist.load_data()

#################################################################################
#代替上两行数据,加载数据集
train_path = './mnist_image_label/mnist_train_jpg_60000/'
train_txt = './mnist_image_label/mnist_train_jpg_60000.txt'
x_train_savepath = './mnist_image_label/mnist_x_train.npy'
y_train_savepath = './mnist_image_label/mnist_y_train.npy'

test_path = './mnist_image_label/mnist_test_jpg_10000/'
test_txt = './mnist_image_label/mnist_test_jpg_10000.txt'
x_test_savepath = './mnist_image_label/mnist_x_test.npy'
y_test_savepath = './mnist_image_label/mnist_y_test.npy'
def generateds(path, txt):
    f = open(txt, 'r')  # 以只读形式打开txt文件
    contents = f.readlines()  # 读取文件中所有行
    f.close()  # 关闭txt文件
    x, y_ = [], []  # 建立空列表
    for content in contents:  # 逐行取出
        value = content.split()  # 以空格分开,图片路径为value[0] , 标签为value[1] , 存入列表
        img_path = path + value[0]  # 拼出图片路径和文件名
        img = Image.open(img_path)  # 读入图片
        img = np.array(img.convert('L'))  # 图片变为8位宽灰度值的np.array格式
        img = img / 255.  # 数据归一化 (实现预处理)
        x.append(img)  # 归一化后的数据,贴到列表x
        y_.append(value[1])  # 标签贴到列表y_
        print('loading : ' + content)  # 打印状态提示
    x = np.array(x)  # 变为np.array格式
    y_ = np.array(y_)  # 变为np.array格式
    y_ = y_.astype(np.int64)  # 变为64位整型
    return x, y_  # 返回输入特征x,返回标签y_

if os.path.exists(x_train_savepath) and os.path.exists(y_train_savepath) and os.path.exists(
        x_test_savepath) and os.path.exists(y_test_savepath):
    print('-------------Load Datasets-----------------')
    x_train_save = np.load(x_train_savepath)
    y_train = np.load(y_train_savepath)
    x_test_save = np.load(x_test_savepath)
    y_test = np.load(y_test_savepath)
    x_train = np.reshape(x_train_save, (len(x_train_save), 28, 28))
    x_test = np.reshape(x_test_save, (len(x_test_save), 28, 28))
else:
    print('-------------Generate Datasets-----------------')
    x_train, y_train = generateds(train_path, train_txt)
    x_test, y_test = generateds(test_path, test_txt)

    print('-------------Save Datasets-----------------')
    x_train_save = np.reshape(x_train, (len(x_train), -1))
    x_test_save = np.reshape(x_test, (len(x_test), -1))
    np.save(x_train_savepath, x_train_save)
    np.save(y_train_savepath, y_train)
    np.save(x_test_savepath, x_test_save)
    np.save(y_test_savepath, y_test)
#################################################################################
x_train, x_test = x_train / 255.0, x_test / 255.0

model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])

checkpoint_save_path = "./checkpoint/mnist.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)

cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
                                                 save_weights_only=True,
                                                 save_best_only=True)

history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1,
                    callbacks=[cp_callback])
model.summary()

print(model.trainable_variables)
file = open('./weights.txt', 'w')
for v in model.trainable_variables:
    file.write(str(v.name) + '\n')
    file.write(str(v.shape) + '\n')
    file.write(str(v.numpy()) + '\n')
file.close()


###############################################    show   ###############################################
#提取了model.fit()函数在执行过程中存储的训练集准确率等参数
# 显示训练集和验证集的acc和loss曲线
acc = history.history['sparse_categorical_accuracy']   #训练集准确率
val_acc = history.history['val_sparse_categorical_accuracy']  #测试集准确率
loss = history.history['loss']    #训练集损失
val_loss = history.history['val_loss']    #测试集损失

#将图像分为1行两列,画出第一列
plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')  #画出acc,val_acc数据
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')   #画出图例
plt.legend()
#画出第二列
plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')  #画出loss,val_loss
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()

 结果:

 

 

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值