import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
train_image_path =glob.glob('../data1/train/*/*.jpg')
test_image_path =glob.glob('../data1/test/*/*.jpg')
train_image_label=[]
for p in train_image_path:
if(p.split('\\')[1]=='cat'):
train_image_label.append(1)
else:
train_image_label.append(0)
test_image_label=[]
for p in test_image_path:
if(p.split('\\')[1]=='cat'):
test_image_label.append(1)
else:
test_image_label.append(0)
def train_load_image(path ,label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image ,channels=1)
image = tf.image.resize(image ,[60 ,72] )
image = tf.image.random_crop(image ,[50 ,60 ,1] )
image = tf.image.random_flip_left_right(image )
image = tf.image.random_flip_up_down(image )
image = tf.image.random_brightness(image ,0.3 )
image = tf.image.random_contrast(image ,0.2 ,0.4 )
image = tf.cast(image, tf.float32)
image =image /255
return image ,label
def test_load_image(path ,label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image ,channels=1)
image = tf.image.resize(image ,[50 ,60] )
image = tf.cast(image ,tf.float32)
image = image /255
return image ,label
train_image_ds =tf.data.Dataset.from_tensor_slices((train_image_path ,train_image_label))
test_image_ds =tf.data.Dataset.from_tensor_slices((test_image_path ,test_image_label))
AUTOTUNE =tf.data.experimental.AUTOTUNE
train_image_ds =train_image_ds.map(train_load_image ,num_parallel_calls=AUTOTUNE)
test_image_ds =test_image_ds.map(test_load_image ,num_parallel_calls=AUTOTUNE)
BATCH_SIZE =32
train_cout =len(train_image_path)
train_image_ds =train_image_ds.shuffle(train_cout).batch(BATCH_SIZE)
train_image_ds =train_image_ds.prefetch(AUTOTUNE)
test_image_ds =test_image_ds.batch(BATCH_SIZE)
test_image_ds =test_image_ds.prefetch(AUTOTUNE)
model =keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=6,kernel_size=(5 ,5),input_shape=(50,60,1),activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same",pool_size=(2,2)))
model.add(tf.keras.layers.Conv2D(16,kernel_size=(5,5),activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same",pool_size=(2,2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(120, activation='relu'))
model.add(tf.keras.layers.Dense(1))
model.summary()
loss_func =tf.keras.losses.BinaryCrossentropy(from_logits=True)
optimizer =tf.keras.optimizers.Adam()
epoch_train_loss =tf.keras.metrics.Mean('train_loss')
epoch_train_acc =tf.keras.metrics.Accuracy('train_acc')
epoch_test_loss =tf.keras.metrics.Mean('test_loss')
epoch_test_acc =tf.keras.metrics.Accuracy('test_acc')
def train_step(model ,images ,labels):
with tf.GradientTape() as t:
pred =model(images)
batch_loss =loss_func(labels ,pred)
grads =t.gradient(batch_loss ,model.trainable_variables)
optimizer.apply_gradients(zip(grads ,model.trainable_variables))
epoch_test_loss(batch_loss)
pred_val =[]
for p in pred:
if p> 0:
pred_val.append(1)
else:
pred_val.append(0)
epoch_train_acc(labels, tf.cast(pred_val, tf.int32))
def test_step(model, images, labels):
pred = model(images)
batch_loss = loss_func(labels, pred)
epoch_test_loss(batch_loss)
pred_val = []
for p in pred:
if p > 0:
pred_val.append(1)
else:
pred_val.append(0)
epoch_test_acc(labels, tf.cast(pred_val, tf.int32))
all_train_loss_result_list = []
all_train_acc_result_list = []
all_test_loss_result_list = []
all_test_acc_result_list = []
num_epochs = 30
for epoch in range(num_epochs):
batch_num = 0
for imgs, labels in train_image_ds:
batch_num = batch_num + 1
train_step(model, imgs, labels)
all_train_loss_result_list.append(epoch_train_loss.result())
all_train_acc_result_list.append(epoch_train_acc.result())
print('Epoch:{},loss:{},acc{:.3f}'.format(epoch+1,epoch_train_loss.result(),epoch_train_acc.result()))
epoch_train_loss.reset_states()
epoch_train_acc.reset_states()
batch_num = 0
for imgs, labels in test_image_ds:
batch_num = batch_num + 1
test_step(model, imgs, labels)
all_test_loss_result_list.append(epoch_test_loss.result())
all_test_acc_result_list.append(epoch_test_acc.result())
print('Epoch:{},loss:{},acc{:.3f}'.format(epoch+1,epoch_test_loss.result(),epoch_test_acc.result()))
epoch_test_loss.reset_states()
epoch_test_acc.reset_states()
plt.plot(range(1,num_epochs+1),all_train_acc_result_list,label='acc')
plt.plot(range(1,num_epochs+1),all_test_acc_result_list,label='val_acc')
plt.legend()
plt.show()
plt.plot(range(1,num_epochs+1),all_train_loss_result_list,label='loss')
plt.plot(range(1,num_epochs+1),all_test_loss_result_list,label='val_loss')
plt.legend()
plt.show()
model.save('51fit')
def load_img(path):
img_raw = tf.io.read_file(path)
img_tensor = tf.image.decode_jpeg(img_raw, channels=1)
img_tensor = tf.image.resize(img_tensor, [50, 60])
img_tensor = tf.cast(img_tensor, tf.float32)
img_tensor = img_tensor / 255
return img_tensor
def predict(path):
img = load_img(path)
img = tf.expand_dims(img, axis=0)
result = model.predict(img)
print(result)
if (result > 0):
print('cat=1')
else:
print('dog=0')
predict('00.jpg')
predict('0.jpg')
predict('1.jpg')
predict('11.jpg')
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
train_image_path =glob.glob('../data1/train/*/*.jpg')
test_image_path =glob.glob('../data1/test/*/*.jpg')
train_image_label =[int(p.split('\\')[1 ]=='cat' )for p in train_image_path]
test_image_label =[int(p.split('\\')[1 ]=='cat' )for p in test_image_path]
def train_load_image(path ,label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image ,channels=3)
image = tf.image.resize(image ,[360 ,360] )
image = tf.image.random_crop(image ,[256 ,256 ,3] )
image = tf.image.random_flip_left_right(image )
image = tf.image.random_flip_up_down(image )
image = tf.image.random_brightness(image ,0.3 )
image = tf.image.random_contrast(image ,0.2 ,0.4 )
image = tf.cast(image, tf.float32)
image =image /255
return image ,label
def test_load_image(path ,label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image ,channels=3)
image = tf.image.resize(image ,[256 ,256] )
image = tf.cast(image ,tf.float32)
image = image /255
return image ,label
train_image_ds =tf.data.Dataset.from_tensor_slices((train_image_path ,train_image_label))
test_image_ds =tf.data.Dataset.from_tensor_slices((test_image_path ,test_image_label))
AUTOTUNE =tf.data.experimental.AUTOTUNE
train_image_ds =train_image_ds.map(train_load_image ,num_parallel_calls=AUTOTUNE)
test_image_ds =test_image_ds.map(test_load_image ,num_parallel_calls=AUTOTUNE)
BATCH_SIZE =16
train_cout =len(train_image_path)
train_image_ds =train_image_ds.shuffle(train_cout).batch(BATCH_SIZE)
train_image_ds =train_image_ds.prefetch(AUTOTUNE)
test_image_ds =test_image_ds.batch(BATCH_SIZE)
test_image_ds =test_image_ds.prefetch(AUTOTUNE)
model =keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64,kernel_size=(3 ,3),input_shape=(256,256,3),activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=64,kernel_size=(3 ,3),padding="same",activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same"))
model.add(tf.keras.layers.Conv2D(filters=128,kernel_size=(3 ,3),padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=128,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same"))
model.add(tf.keras.layers.Conv2D(filters=256,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=256,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same"))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.Conv2D(filters=512,kernel_size=(3 ,3) ,padding="same",activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding="same"))
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(256,activation='relu'))
model.add(tf.keras.layers.Dense(1))
model.summary()
loss_func =tf.keras.losses.BinaryCrossentropy(from_logits=True)
optimizer =tf.keras.optimizers.Adam()
epoch_train_loss =tf.keras.metrics.Mean('train_loss')
epoch_train_acc =tf.keras.metrics.Accuracy('train_acc')
epoch_test_loss =tf.keras.metrics.Mean('test_loss')
epoch_test_acc =tf.keras.metrics.Accuracy('test_acc')
def train_setp(model ,images ,labels):
with tf.GradientTape() as t:
pred =model(images)
batch_loss =loss_func(labels ,pred)
grads =t.gradient(batch_loss ,model.trainable_variables)
optimizer.apply_gradients(zip(grads ,model.trainable_variables))
epoch_test_loss(batch_loss)
pred_val =[]
for p in pred:
if p> 0:
pred_val.append(1)
else:
pred_val.append(0)
epoch_train_acc(labels, tf.cast(pred_val, tf.int32))
def test_step(model, images, labels):
pred = model(images)
batch_loss = loss_func(labels, pred)
epoch_test_loss(batch_loss)
pred_val = []
for p in pred:
if p > 0:
pred_val.append(1)
else:
pred_val.append(0)
epoch_test_acc(labels, tf.cast(pred_val, tf.int32))
all_train_loss_result_list = []
all_train_acc_result_list = []
all_test_loss_result_list = []
all_test_acc_result_list = []
num_epochs = 2
for epoch in range(num_epochs):
batch_num = 0
for imgs, labels in train_image_ds:
batch_num = batch_num + 1
train_setp(model, imgs, labels)
all_test_loss_result_list.append(epoch_train_loss.result())
all_train_acc_result_list.append(epoch_train_acc.result())
print('Epoch:{},loss:{},acc{:.3f}'.format(epoch+1,epoch_train_loss.result(),epoch_train_acc.result()))
epoch_train_loss.reset_states()
epoch_train_acc.reset_states()
batch_num = 0
for imgs, labels in test_image_ds:
batch_num = batch_num + 1
test_step(model, imgs, labels)
all_test_loss_result_list.append(epoch_test_loss.result())
all_test_acc_result_list.append(epoch_test_acc.result())
print('Epoch:{},loss:{},acc{:.3f}'.format(epoch+1,epoch_test_loss.result(),epoch_test_acc.result()))
epoch_test_loss.reset_states()
epoch_test_acc.reset_states()
model.save('0_or_1_256')
def load_img(path):
img_raw = tf.io.read_file(path)
img_tensor = tf.image.decode_jpeg(img_raw, channels=3)
img_tensor = tf.image.resize(img_tensor, [256, 256])
img_tensor = tf.cast(img_tensor, tf.float32)
img_tensor = img_tensor / 255
return img_tensor
def predict(path):
img = load_img(path)
img = tf.expand_dims(img, axis=0)
result = model.predict(img)
print(result)
if (result > 0):
print('cat=1')
else:
print('dog=0')
predict('00.jpg')
predict('0.jpg')
predict('1.jpg')
predict('11.jpg')