tensorflow2.0 深度学习与入门实践 日月光华 学习笔记(二)

tf.data输入模块

import tensorflow as tf

#下载数据
(train_image,train_lable),(test_image,test_label)=tf.keras.datasets.fashion_mnist.load_data()

#数据归一化
train_image = train_image/255
test_image = test_image/255

#创建dataset
ds_train_img = tf.data.Dataset.from_tensor_slices(train_image)
ds_train_lab = tf.data.Dataset.from_tensor_slices(train_lable)
#合并
de_train = tf.data.Dataset.zip((ds_train_img,ds_train_lab))
#乱序,重复,每次输出个数
de_train = de_train.shuffle(1000).repeat().batch(32)
#另一种方法
ds_test = tf.data.Dataset.from_tensor_slices((test_image,test_label))
#训练集不用乱序,默认重复
de_test = ds_test.batch(32)
#创建模型
model = tf.keras.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(28,28)))
model.add(tf.keras.layers.Dense(128,activation='relu'))
model.add(tf.keras.layers.Dense(10,activation='softmax'))

#配置优化器及损失函数
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['acc'])

#计算步长 总个数除batch取整
steps_per_epoch = train_image.shape[0]//32
validation_steps = test_image.shape[0]//32

#训练及测试模型
model.fit(de_train,epochs=5,steps_per_epoch=steps_per_epoch,validation_data=de_test,validation_steps=validation_steps)

卷积神经网络

cnn架构
卷积层 conv2d
非线性变化层 relu/sigmiod/tanh
池化层 pooling2d
全连接层 w*x+b
卷积层
ksize 卷积核的大小
strides 卷积核移动跨度
paddig 边缘填充

import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt

(train_image,train_lable),(test_image,test_label)=tf.keras.datasets.fashion_mnist.load_data()

#扩张维度
#输入卷积神经网络参数(个数,高度,宽度,厚度)
train_image = np.expand_dims(train_image,-1)
test_image = np.expand_dims(test_image,-1)

#创建模型
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32,(3,3),input_shape=train_image.shape[1:], activation='relu'))
model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Conv2D(64,(3,3),activation='relu'))
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(10,activation='softmax'))

#配置优化器及损失函数
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['acc'])

# 训练模型
history = model.fit(train_image,train_lable,epochs=30,validation_data=(test_image,test_label))

#history.history.keys() 查看参数

#调用参数,绘制图型
plt.plot(history.epoch,history.history['acc'],label='acc')
plt.plot(history.epoch,history.history['val_acc'],label='val_acc')

plt.plot(history.epoch,history.history['loss'],label='loss')
plt.plot(history.epoch,history.history['val_loss'],label='val_loss')
plt.show()

tf.data,卷积神经网络 综合实例

import tensorflow as tf
import matplotlib.pyplot as plt
import pathlib
import random

#提取所有文件路径
data_dir = '...'
data_root = pathlib.Path(data_dir)
#for item in data_root.iterdir():
    #print(item)
all_image_path = list(data_root.glob('*/*'))
all_image_path = [str(path) for path in all_image_path]
random .shuffle(all_image_path)
image_count = len(all_image_path)

#编码标签
label_names = sorted(item.name for item in data_root.glob('*/'))
label_to_index = dict((name,index) for index,name in enumerate(label_names))

#给图片贴标签
#pathlib.Path('..../2.jpg').parent.name   返回上一级目录名称
all_image_label = [label_to_index[pathlib.Path(p).parent.name] for p in all_image_path]
index_to_label = dict((v,k)for k,v in label_to_index.items())

#查看是否对应成功
# import  IPython.display as display
# for n in range(3):
#     image_index = random.choices(range(len(all_image_path)))
#     display.display(display.Image(all_image_path[image_index]))
#     print(index_to_label[all_image_path[image_index]])

# 查看图片及结构
# img_raw = tf.io.read_file(all_image_path[0]) #读取图片
# img_tensor = tf.image.decode_image(img_raw) #解码图片
# print(img_tensor.shape)


#图片预处理函数
def load_preprosess_image(image_path):
    img_raw = tf.io.read_file(image_path)
    img_tensor = tf.image.decode_jpeg(img_raw,channels=3)
    img_tensor = tf.image.resize(img_tensor,[256,256])
    #img_tensor.shape
    img_tensor = tf.cast(img_tensor,tf.float32)
    img = img_tensor/255
    return img

#检验是否正确
# plt.imshow(load_preprosess_image(all_image_path[100]))
# plt.show()


# 创建数据集
path_ds = tf.data.Dataset.from_tensor_slices(all_image_path)
image_dataset = path_ds.map(load_preprosess_image)
lible_dataset = tf.data.Dataset.from_tensor_slices(all_image_label)
dataset = tf.data.Dataset.zip((image_dataset,lible_dataset))

#划分训练集和测试集
test_count = int(image_count*0.2)
train_count = image_count - test_count
train_dataset = dataset.skip(test_count)
test_dataset = dataset.take(test_count)
batch_size = 32
train_dataset = train_dataset.repeat().shuffle(buffer_size=train_count).batch(batch_size)
test_dataset = test_dataset.batch(batch_size)

#建立模型
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(64,(3,3),input_shape=(256,256,3),activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.Conv2D(64,(3,3),activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Conv2D(128,(3,3),activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.Conv2D(128,(3,3),activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Conv2D(256,(3,3),activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.Conv2D(256,(3,3),activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Conv2D(512,(3,3),activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Conv2D(512,(3,3),activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Conv2D(1024,(3,3),activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(1024,activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.Dense(256,activation='relu'))
model.add(tf.keras.layers.BatchNormalization())

model.add(tf.keras.layers.Dense(1,activation='sigmoid'))

#训练模型
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['acc'])
steps_per_epoch = train_count//batch_size
validation_steps = test_count//batch_size

#记录数据
history = model.fit(train_dataset,epochs=10,steps_per_epoch=steps_per_epoch,validation_data=test_dataset,validation_steps=validation_steps)
#history.history.keys() #查看参数

#调用参数,绘制图型
plt.plot(history.epoch,history.history['acc'],label='acc')
plt.plot(history.epoch,history.history['val_acc'],label='val_acc')

plt.plot(history.epoch,history.history['loss'],label='loss')
plt.plot(history.epoch,history.history['val_loss'],label='val_loss')
plt.show()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值