使用keras初入深度学习二分类图像


# 导入库
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
# D:/JetBrains/PycharmProjectsuntitled/data1
# 获取图片路径
train_image_path =glob.glob('../data1/train/*/*.jpg')
test_image_path =glob.glob('../data1/test/*/*.jpg')
# glob.glob()返回所有匹配的文件路径列表
# 例如此时某一个匹配的文件路径为"../data1/train\\cat\\0.jpg
# # 分配标签
# train_image_label =[int(p.split('\\')[1 ]=='cat' )for p in train_image_path]
# test_image_label =[int(p.split('\\')[1 ]=='cat' )for p in test_image_path]

#或者这样分配标签(当为多分类时)
train_image_label=[]
for p in train_image_path:
    if(p.split('\\')[1]=='cat'):
        train_image_label.append(1)
    else:
        train_image_label.append(0)

test_image_label=[]
for p in test_image_path:
    if(p.split('\\')[1]=='cat'):
        test_image_label.append(1)
    else:
        test_image_label.append(0)

#int(p.split('\\')[1]=='cat')将字符串p按'\\'分割可知下标为1的字符为'cat'或'dog'

# 定义加载图片的函数
# def load_image(path,label):
#     image=tf.io.read_file(path)
#     image=tf.image.decode_jpeg(image,channels=3)
#     image=tf.image.resize(image,[60,80])#注意这个图片的尺寸
#     image=tf.cast(image,tf.float32)
#     image=image/255
#     return image,label

# 加载数据集的函数
def train_load_image(path ,label):
    image = tf.io.read_file(path)
    # 读取文件
    image = tf.image.decode_jpeg(image ,channels=1)
    #将jpeg编码图像解码为uint8张量,channels=3表示转为RGB图像
    # 注意图片的尺寸
    image = tf.image.resize(image ,[60 ,72]  )
    # 扩大为360*360的图片
    image = tf.image.random_crop(image ,[50 ,60 ,1]  )
    # 随机裁剪为256*256的图片(为了防止过拟合)
    image = tf.image.random_flip_left_right(image  )
    # 随机左右翻转
    image = tf.image.random_flip_up_down(image  )
    # 随机上下翻转
    image = tf.image.random_brightness(image ,0.3  )
    # 随机增强亮度 0.3为亮度幅度
    image = tf.image.random_contrast(image ,0.2 ,0.4  )
    # 随机调整RGB图像的对比度第一个参数是要改变的图像
    image = tf.cast(image, tf.float32)
    # ???待定???要增强亮度前应该先进行数据类型转换
    image =image /255
    # 归一化
    # 类型转换和归一化操作可参考tf.image.convert_image_dtype()
    return image ,label

def test_load_image(path ,label):
    image = tf.io.read_file(path)
    image = tf.image.decode_jpeg(image ,channels=1)
    image = tf.image.resize(image ,[50 ,60]  )  # 注意这个图片的尺寸
    image = tf.cast(image ,tf.float32)
    image = image /255
    return image ,label


# 样本数据太大时不能一次性载入内存,
# 可以使用tf.data.Dataset创建样本的输入数据流,
# 进而输入给模型进行训练(model.fit(dataset))

# 创建数据集
train_image_ds =tf.data.Dataset.from_tensor_slices((train_image_path ,train_image_label))
test_image_ds =tf.data.Dataset.from_tensor_slices((test_image_path ,test_image_label))
# 将数据对应一张图片对应一个标签
# https://blog.csdn.net/Dr_jiabin/article/details/93366661

AUTOTUNE =tf.data.experimental.AUTOTUNE
# ????????
train_image_ds =train_image_ds.map(train_load_image ,num_parallel_calls=AUTOTUNE)
test_image_ds =test_image_ds.map(test_load_image ,num_parallel_calls=AUTOTUNE)
# tf.data.Dataset.map(map_func,num_parallel_calls=None)
# map_func以原来的Dataset中的元素为参数的自定义函数,返回新Dataset中的对应元素
# num_parallel_calls并行处理元素的个数,默认顺序执行
# https://dothinking.github.io/2020-02-27-TensorFlow%E8%87%AA%E5%AE%9A%E4%B9%89%E6%95%B0%E6%8D%AE%E9%9B%86%EF%BC%9Atf.data.Dataset/

BATCH_SIZE =32
# 批大小。在深度学习中每次训练在训练集中取batchsize个样本训练;
# https://www.cnblogs.com/key1994/p/11898304.html

#==========================================================================================
train_cout =len(train_image_path)
# 训练样本数量

train_image_ds =train_image_ds.shuffle(train_cout).batch(BATCH_SIZE)
# shuffle(buffer_size, seed=None, reshuffle_each_iteration=None)
# buffer_size 缓冲区大小。元素被依次填入缓冲区,然后从中随机取出以达到打乱效果。因此,buffer_size越大,乱序效果越好,但性能随之下降。
# seed 打乱用的随机数种子
# reshuffle_each_iteration 是否每次遍历时都自动打乱,默认 是。避免不同epoch的训练过程中,Dataset保持一致的顺序。

# batch(batch_size, drop_remainder=False)
# batch_size 批次的大小
# drop_remainder 当原来样本数量不能被batch_size整除时,是否丢弃最后剩下的不足一个批次的样本。默认 保留。

train_image_ds =train_image_ds.prefetch(AUTOTUNE)
# Dataset.prefetch(buffer_size) 开启预加载数据,使得在 GPU 训练的同时 CPU 可以准备数据
# buffer_size:表示预取时将被缓冲的最大数量的元素。

test_image_ds =test_image_ds.batch(BATCH_SIZE)
test_image_ds =test_image_ds.prefetch(AUTOTUNE)

# 定义模型这里构建的是类VGG16模型
# https://tensorflow.google.cn/api_docs/python/tf/keras/layers/AveragePooling2D?hl=zh-cn
model =keras.Sequential()
# 创建模型(使用keras框架(or pytorch/tensorflow框架也可以,框架之间区别较大))
# keras自定义层
# 第一层需要定义输入数据的大小
model.add(tf.keras.layers.Conv2D(filters=6,kernel_size=(5 ,5),input_shape=(50,60,1),activation='relu'))
# https://blog.csdn.net/godot06/article/details/105054657
# 卷积层
model.add(tf.keras.layers.MaxPool2D(padding="same",pool_size=(2,2)))
# 池化层

model.add(tf.keras.layers.Conv2D(16,kernel_size=(5,5),activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same",pool_size=(2,2)))

model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(120, activation='relu'))
model.add(tf.keras.layers.Dense(1))
model.summary()

##定义损失函数(未搞懂???)
loss_func =tf.keras.losses.BinaryCrossentropy(from_logits=True)
# tf.keras.losses.BinaryCrossentropy(
#     from_logits=False, label_smoothing=0.0, axis=-1,
#     reduction=losses_utils.ReductionV2.AUTO, name='binary_crossentropy'
# )

# 定义优化器
optimizer =tf.keras.optimizers.Adam()
# keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.99, epsilon=1e-07, decay=0.0)
# lr:float> = 0:学习率
# beta_1:float,0 <beta <1。一般接近1。一阶矩估计的指数衰减率
# beta_2:float,0 <beta <1。一般接近1。二阶矩估计的指数衰减率
# epsilon:float> = 0,模糊因子。如果None,默认为K.epsilon()。该参数是非常小的数,其为了防止在实现中除以零
# decay:float> = 0,每次更新时学习率下降
# 在监督学习中我们使用梯度下降法时,学习率是一个很重要的指标,
# 因为学习率决定了学习进程的快慢(也可以看作步幅的大小)。
# 如果学习率过大,很可能会越过最优值,
# 而如果学习率过小,优化的效率可能很低,导致过长的运算时间,
# 所以学习率对于算法性能的表现十分重要。
# 而优化器keras.optimizers.Adam()是解决这个问题的一个方案。
# 其大概的思想是开始的学习率设置为一个较大的值,
# 然后根据次数的增多,动态的减小学习率,以实现效率和效果的兼得。
# https://blog.csdn.net/I_canjnu/article/details/106035640

#定义指标
epoch_train_loss =tf.keras.metrics.Mean('train_loss')
# tf.keras.metrics.Mean(
#     name='mean', dtype=None
# )
# name	(可选)度量标准实例的字符串名称。
# dtype	(可选)度量标准结果的数据类型。

epoch_train_acc =tf.keras.metrics.Accuracy('train_acc')
# tf.keras.metrics.Accuracy(
#     name='accuracy', dtype=None
# )
# name	(可选)度量标准实例的字符串名称。
# dtype	(可选)度量标准结果的数据类型。

epoch_test_loss =tf.keras.metrics.Mean('test_loss')
epoch_test_acc =tf.keras.metrics.Accuracy('test_acc')

##定义训练步骤
def train_step(model ,images ,labels):
    # tf.GradientTape(
    #     persistent=False, watch_accessed_variables=True
    # )
    with tf.GradientTape() as t:
        pred =model(images)
        batch_loss =loss_func(labels ,pred)
    # print(pred)
    grads =t.gradient(batch_loss ,model.trainable_variables)
    optimizer.apply_gradients(zip(grads ,model.trainable_variables))
    epoch_test_loss(batch_loss)
    pred_val =[]
    for p in pred:
        if p> 0:
            pred_val.append(1)
        else:
            pred_val.append(0)
    # print(pred_val)
    # print(labels)
    epoch_train_acc(labels, tf.cast(pred_val, tf.int32))

# 如果我们在网络输出层中加入sigmod,那么就是大于0.5的值为1,小于等于0.5的值为0
##定义测试步骤
def test_step(model, images, labels):
    pred = model(images)
    batch_loss = loss_func(labels, pred)
    epoch_test_loss(batch_loss)
    # print(pred)
    pred_val = []
    for p in pred:
        if p > 0:
            pred_val.append(1)
        else:
            pred_val.append(0)
    # print(pred_val)
    # print(labels)
    epoch_test_acc(labels, tf.cast(pred_val, tf.int32))


all_train_loss_result_list = []
all_train_acc_result_list = []

all_test_loss_result_list = []
all_test_acc_result_list = []

##定义训练过程
num_epochs = 30
for epoch in range(num_epochs):
    batch_num = 0
    for imgs, labels in train_image_ds:
        batch_num = batch_num + 1
        train_step(model, imgs, labels)
        # print('train_batch:{} finish'.format(batch_num))

    all_train_loss_result_list.append(epoch_train_loss.result())
    all_train_acc_result_list.append(epoch_train_acc.result())

    print('Epoch:{},loss:{},acc{:.3f}'.format(epoch+1,epoch_train_loss.result(),epoch_train_acc.result()))

    epoch_train_loss.reset_states()
    epoch_train_acc.reset_states()
    # reset_states清空指标

    batch_num = 0
    for imgs, labels in test_image_ds:
         batch_num = batch_num + 1
         test_step(model, imgs, labels)
         # print('test_batch:{} fnish',format(batch_num))
         all_test_loss_result_list.append(epoch_test_loss.result())
         all_test_acc_result_list.append(epoch_test_acc.result())
         print('Epoch:{},loss:{},acc{:.3f}'.format(epoch+1,epoch_test_loss.result(),epoch_test_acc.result()))
         epoch_test_loss.reset_states()
         epoch_test_acc.reset_states()

plt.plot(range(1,num_epochs+1),all_train_acc_result_list,label='acc')
plt.plot(range(1,num_epochs+1),all_test_acc_result_list,label='val_acc')
plt.legend()
plt.show()

plt.plot(range(1,num_epochs+1),all_train_loss_result_list,label='loss')
plt.plot(range(1,num_epochs+1),all_test_loss_result_list,label='val_loss')
plt.legend()
plt.show()

model.save('51fit')


def load_img(path):
    img_raw = tf.io.read_file(path)
    img_tensor = tf.image.decode_jpeg(img_raw, channels=1)
    img_tensor = tf.image.resize(img_tensor, [50, 60])
    img_tensor = tf.cast(img_tensor, tf.float32)
    img_tensor = img_tensor / 255
    return img_tensor


def predict(path):
    img = load_img(path)
    img = tf.expand_dims(img, axis=0)
    result = model.predict(img)
    print(result)
    if (result > 0):
        print('cat=1')
    else:
        print('dog=0')



predict('00.jpg')
predict('0.jpg')
predict('1.jpg')
predict('11.jpg')




# 导入库
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
# D:/JetBrains/PycharmProjectsuntitled/data1
# 获取图片路径
train_image_path =glob.glob('../data1/train/*/*.jpg')
test_image_path =glob.glob('../data1/test/*/*.jpg')
# glob.glob()返回所有匹配的文件路径列表
# 例如此时某一个匹配的文件路径为"../data1/train\\cat\\0.jpg
# 分配标签
train_image_label =[int(p.split('\\')[1 ]=='cat' )for p in train_image_path]
test_image_label =[int(p.split('\\')[1 ]=='cat' )for p in test_image_path]

# #或者这样分配标签(当为多分类时)
# train_image_label=[]
# for p in train_image_path:
#     if(p.split('\\')[1]=='cat'):
#         train_image_label.append(1)
#     else:
#         train_image_label.append(0)
#
# test_image_label=[]
# for p in test_image_path:
#     if(p.split('\\')[1]=='cat'):
#         test_image_label.append(1)
#     else:
#         test_image_label.append(0)

#int(p.split('\\')[1]=='cat')将字符串p按'\\'分割可知下标为1的字符为'cat'或'dog'

# 定义加载图片的函数
# def load_image(path,label):
#     image=tf.io.read_file(path)
#     image=tf.image.decode_jpeg(image,channels=3)
#     image=tf.image.resize(image,[60,80])#注意这个图片的尺寸
#     image=tf.cast(image,tf.float32)
#     image=image/255
#     return image,label

# 加载数据集的函数
def train_load_image(path ,label):
    image = tf.io.read_file(path)
    # 读取文件
    image = tf.image.decode_jpeg(image ,channels=3)
    #将jpeg编码图像解码为uint8张量,channels=3表示转为RGB图像
    # 注意图片的尺寸
    image = tf.image.resize(image ,[360 ,360]  )
    # 扩大为360*360的图片
    image = tf.image.random_crop(image ,[256 ,256 ,3]  )
    # 随机裁剪为256*256的图片(为了防止过拟合)
    image = tf.image.random_flip_left_right(image  )
    # 随机左右翻转
    image = tf.image.random_flip_up_down(image  )
    # 随机上下翻转
    image = tf.image.random_brightness(image ,0.3  )
    # 随机增强亮度 0.3为亮度幅度
    image = tf.image.random_contrast(image ,0.2 ,0.4  )
    # 随机调整RGB图像的对比度第一个参数是要改变的图像
    image = tf.cast(image, tf.float32)
    # ???待定???要增强亮度前应该先进行数据类型转换
    image =image /255
    # 归一化
    # 类型转换和归一化操作可参考tf.image.convert_image_dtype()
    return image ,label

def test_load_image(path ,label):
    image = tf.io.read_file(path)
    image = tf.image.decode_jpeg(image ,channels=3)
    image = tf.image.resize(image ,[256 ,256]  )  # 注意这个图片的尺寸
    image = tf.cast(image ,tf.float32)
    image = image /255
    return image ,label


# 样本数据太大时不能一次性载入内存,
# 可以使用tf.data.Dataset创建样本的输入数据流,
# 进而输入给模型进行训练(model.fit(dataset))

# 创建数据集
train_image_ds =tf.data.Dataset.from_tensor_slices((train_image_path ,train_image_label))
test_image_ds =tf.data.Dataset.from_tensor_slices((test_image_path ,test_image_label))
# 将数据对应一张图片对应一个标签
# https://blog.csdn.net/Dr_jiabin/article/details/93366661

AUTOTUNE =tf.data.experimental.AUTOTUNE
# ????????
train_image_ds =train_image_ds.map(train_load_image ,num_parallel_calls=AUTOTUNE)
test_image_ds =test_image_ds.map(test_load_image ,num_parallel_calls=AUTOTUNE)
# tf.data.Dataset.map(map_func,num_parallel_calls=None)
# map_func以原来的Dataset中的元素为参数的自定义函数,返回新Dataset中的对应元素
# num_parallel_calls并行处理元素的个数,默认顺序执行
# https://dothinking.github.io/2020-02-27-TensorFlow%E8%87%AA%E5%AE%9A%E4%B9%89%E6%95%B0%E6%8D%AE%E9%9B%86%EF%BC%9Atf.data.Dataset/

BATCH_SIZE =16
# 批大小。在深度学习中每次训练在训练集中取batchsize个样本训练;
# https://www.cnblogs.com/key1994/p/11898304.html
train_cout =len(train_image_path)
# 训练样本数量

train_image_ds =train_image_ds.shuffle(train_cout).batch(BATCH_SIZE)
# shuffle(buffer_size, seed=None, reshuffle_each_iteration=None)
# buffer_size 缓冲区大小。元素被依次填入缓冲区,然后从中随机取出以达到打乱效果。因此,buffer_size越大,乱序效果越好,但性能随之下降。
# seed 打乱用的随机数种子
# reshuffle_each_iteration 是否每次遍历时都自动打乱,默认 是。避免不同epoch的训练过程中,Dataset保持一致的顺序。

# batch(batch_size, drop_remainder=False)
# batch_size 批次的大小
# drop_remainder 当原来样本数量不能被batch_size整除时,是否丢弃最后剩下的不足一个批次的样本。默认 保留。

train_image_ds =train_image_ds.prefetch(AUTOTUNE)
# Dataset.prefetch(buffer_size) 开启预加载数据,使得在 GPU 训练的同时 CPU 可以准备数据
# buffer_size:表示预取时将被缓冲的最大数量的元素。

test_image_ds =test_image_ds.batch(BATCH_SIZE)
test_image_ds =test_image_ds.prefetch(AUTOTUNE)

# 定义模型这里构建的是类VGG16模型
# https://tensorflow.google.cn/api_docs/python/tf/keras/layers/AveragePooling2D?hl=zh-cn
model =keras.Sequential()
# 创建模型(使用keras框架(or pytorch/tensorflow框架也可以,框架之间区别较大))
# keras自定义层
# 第一层需要定义输入数据的大小
model.add(tf.keras.layers.Conv2D(filters=64,kernel_size=(3 ,3),input_shape=(256,256,3),activation='relu'))
# https://blog.csdn.net/godot06/article/details/105054657
model.add(tf.keras.layers.Conv2D(filters=64,kernel_size=(3 ,3),padding="same",activation='relu'))
# 卷积层
model.add(tf.keras.layers.MaxPool2D(padding="same"))
# 池化层

model.add(tf.keras.layers.Conv2D(filters=128,kernel_size=(3 ,3),padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=128,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same"))

model.add(tf.keras.layers.Conv2D(filters=256,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=256,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same"))

model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same"))

model.add(tf.keras.layers.GlobalAveragePooling2D())
# 全连接层
model.add(tf.keras.layers.Dense(256,activation='relu'))
model.add(tf.keras.layers.Dense(1))

model.summary()

##定义损失函数(未搞懂???)
loss_func =tf.keras.losses.BinaryCrossentropy(from_logits=True)
# tf.keras.losses.BinaryCrossentropy(
#     from_logits=False, label_smoothing=0.0, axis=-1,
#     reduction=losses_utils.ReductionV2.AUTO, name='binary_crossentropy'
# )

# 定义优化器
optimizer =tf.keras.optimizers.Adam()
# keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.99, epsilon=1e-07, decay=0.0)
# lr:float> = 0:学习率
# beta_1:float,0 <beta <1。一般接近1。一阶矩估计的指数衰减率
# beta_2:float,0 <beta <1。一般接近1。二阶矩估计的指数衰减率
# epsilon:float> = 0,模糊因子。如果None,默认为K.epsilon()。该参数是非常小的数,其为了防止在实现中除以零
# decay:float> = 0,每次更新时学习率下降
# 在监督学习中我们使用梯度下降法时,学习率是一个很重要的指标,
# 因为学习率决定了学习进程的快慢(也可以看作步幅的大小)。
# 如果学习率过大,很可能会越过最优值,
# 而如果学习率过小,优化的效率可能很低,导致过长的运算时间,
# 所以学习率对于算法性能的表现十分重要。
# 而优化器keras.optimizers.Adam()是解决这个问题的一个方案。
# 其大概的思想是开始的学习率设置为一个较大的值,
# 然后根据次数的增多,动态的减小学习率,以实现效率和效果的兼得。
# https://blog.csdn.net/I_canjnu/article/details/106035640

#定义指标
epoch_train_loss =tf.keras.metrics.Mean('train_loss')
# tf.keras.metrics.Mean(
#     name='mean', dtype=None
# )
# name	(可选)度量标准实例的字符串名称。
# dtype	(可选)度量标准结果的数据类型。

epoch_train_acc =tf.keras.metrics.Accuracy('train_acc')
# tf.keras.metrics.Accuracy(
#     name='accuracy', dtype=None
# )
# name	(可选)度量标准实例的字符串名称。
# dtype	(可选)度量标准结果的数据类型。

epoch_test_loss =tf.keras.metrics.Mean('test_loss')
epoch_test_acc =tf.keras.metrics.Accuracy('test_acc')

##定义训练步骤
def train_setp(model ,images ,labels):
    # tf.GradientTape(
    #     persistent=False, watch_accessed_variables=True
    # )
    with tf.GradientTape() as t:
        pred =model(images)
        batch_loss =loss_func(labels ,pred)
    # print(pred)
    grads =t.gradient(batch_loss ,model.trainable_variables)
    optimizer.apply_gradients(zip(grads ,model.trainable_variables))
    epoch_test_loss(batch_loss)
    pred_val =[]
    for p in pred:
        if p> 0:
            pred_val.append(1)
        else:
            pred_val.append(0)
    # print(pred_val)
    # print(labels)
    epoch_train_acc(labels, tf.cast(pred_val, tf.int32))

# 如果我们在网络输出层中加入sigmod,那么就是大于0.5的值为1,小于等于0.5的值为0
##定义测试步骤
def test_step(model, images, labels):
    pred = model(images)
    batch_loss = loss_func(labels, pred)
    epoch_test_loss(batch_loss)
    # print(pred)
    pred_val = []
    for p in pred:
        if p > 0:
            pred_val.append(1)
        else:
            pred_val.append(0)
    # print(pred_val)
    # print(labels)
    epoch_test_acc(labels, tf.cast(pred_val, tf.int32))


all_train_loss_result_list = []
all_train_acc_result_list = []

all_test_loss_result_list = []
all_test_acc_result_list = []

##定义训练过程
num_epochs = 2
for epoch in range(num_epochs):
    batch_num = 0
    for imgs, labels in train_image_ds:
        batch_num = batch_num + 1
        train_setp(model, imgs, labels)
        # print('train_batch:{} finish'.format(batch_num))

    all_test_loss_result_list.append(epoch_train_loss.result())
    all_train_acc_result_list.append(epoch_train_acc.result())

    print('Epoch:{},loss:{},acc{:.3f}'.format(epoch+1,epoch_train_loss.result(),epoch_train_acc.result()))

    epoch_train_loss.reset_states()
    epoch_train_acc.reset_states()
    # reset_states清空指标

    batch_num = 0
    for imgs, labels in test_image_ds:
        batch_num = batch_num + 1
        test_step(model, imgs, labels)
        # print('test_batch:{} fnish',format(batch_num))
        all_test_loss_result_list.append(epoch_test_loss.result())
        all_test_acc_result_list.append(epoch_test_acc.result())
        print('Epoch:{},loss:{},acc{:.3f}'.format(epoch+1,epoch_test_loss.result(),epoch_test_acc.result()))

        epoch_test_loss.reset_states()
        epoch_test_acc.reset_states()

# plt.plot(range(1,num_epochs+1),all_train_acc_result_list,label='acc')
# plt.plot(range(1,num_epochs+1),all_test_acc_result_list,label='val_acc')
# plt.legend()
# plt.show()
#
# plt.plot(range(1,num_epochs+1),all_train_loss_result_list,label='loss')
# plt.plot(range(1,num_epochs+1),all_test_loss_result_list,label='val_loss')
# plt.legend()
# plt.show()

model.save('0_or_1_256')


def load_img(path):
    img_raw = tf.io.read_file(path)
    img_tensor = tf.image.decode_jpeg(img_raw, channels=3)
    img_tensor = tf.image.resize(img_tensor, [256, 256])
    img_tensor = tf.cast(img_tensor, tf.float32)
    img_tensor = img_tensor / 255
    return img_tensor


def predict(path):
    img = load_img(path)
    img = tf.expand_dims(img, axis=0)
    result = model.predict(img)
    print(result)
    if (result > 0):
        print('cat=1')
    else:
        print('dog=0')



predict('00.jpg')
predict('0.jpg')
predict('1.jpg')
predict('11.jpg')



  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值