keras编写cv模型

import tensorflow as tf
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense,GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.import backend as K
import os
from keras.utils import plot_model
from matplotlib import pyplot as plt
import tensorflow as tf
tf.__version__
'1.9.0'
import keras as k
k.__version__
'2.2.0'

在keras中使用tensorboard,(使用callback)

RUN = RUN + 1 if 'RUN' in locals() else 1   # locals() 函数会以字典类型返回当前位置的全部局部变量。

LOG_DIR = model_save_path + '/training_logs/run{}'.format(RUN)
LOG_FILE_PATH = LOG_DIR + '/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'   # 模型Log文件以及.h5模型文件存放地址

tensorboard = TensorBoard(log_dir=LOG_DIR, write_images=True)
checkpoint = ModelCheckpoint(filepath=LOG_FILE_PATH, monitor='val_loss', verbose=1, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1)

history = model.fit_generator(generator=gen.generate(True), steps_per_epoch=int(gen.train_batches / 4),
                              validation_data=gen.generate(False), validation_steps=int(gen.val_batches / 4),
                              epochs=EPOCHS, verbose=1, callbacks=[tensorboard, checkpoint, early_stopping])

fine-tune,先冻结模型,训练,等模型稳定后解冻,再次训练权重

base_model=InceptionV3(weights='imagenet',include_top=False)
print(base_model.summary())
plot_model(base_model,to_file='InceptionV3.png')

x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(1024,activation='relu')(x)
predictions=Dense(2,activation='softmax')(x)
model=Model(input=base_model.input,outputs=predictions)
print(base_model.summary())
plot_model(model,to_file='InceptionV3.png')
#冻结特征提取层(从 InceptionV3 copy来的层)
for layer in base_model.layers:
    layer.trainable=False
model.compile(optimizer='rmsprop',loss='categorical_crossentropy')

train_datagen=ImageDateGenerator(
    rescale=1./255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_filp=True
)
train_generator=train_datagen.flow_from_directory(
    '',#数据集路径/train(存储各个类别的子文件夹)
    target_size=(150,150),
    batch_size=32,
    class_mode='categorical')

test_datagen=ImageDateGenerator(rescale=1./255)
validation_generator=test_datagen.flow_from_directory(
    'path',
    target_size=(150,150),
    batch_size=32,
    class_mode='categorical')
    
model.fit_generator(
    train_generator,
    steps_per_epoch=2000,
    epoch=1,
    validation_data=validation_generator,
    validation_steps=800)
#冻结网络的前两个inception blocks (即前249层),训练剩余层
for i,layer in enumerate(base_model.layers):
    print(i,layer.name)
for layer in model.layers[:249]:
    layer.trainable=False
for layer in model.layers[249:]:
    layer.trainable=True
#重新编译模型
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='categorical_crossentropy')

model.fit_generator(
    train_generator,
    steps_per_epoch=2000,
    epoch=1,
    validation_data=validation_generator,
    validation_steps=800)
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing import image
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import cv2
import yaml
from tensorflow.keras.models import_from_yaml
batch_size=16


#tf.keras
l=tf.keras.layers

#下面是一个Alexnet模型
model=Sequential()
model.add(l.Conv2D(filters=96,kernel_size=(11,11),strides=(4,4),padding='valid',\
                  input_shape=(227,227,3),activation='relu'))
model.add(l.BatchNormalization())
model.add(l.MaxPooling2D(pool_size(3,3),strides(2,227),padding='valid'))

model.add(l.Conv2D(256,(5,5),(1,1),padding='same',activation='relu'))
model.add(l.BatchNormalization())
model.add(l.MaxPooling2D((3,3),(2,2),padding='valid'))

model.add(l.Conv2D(384,(3,3),(1,1),'same',activation='relu'))

model.add(l.Conv2D(384,(3,3),(1,1),'same',activation='relu'))

model.add(l.Conv2D(256,(3,3),(1,1),'same',activation='relu'))
model.add(l.MaxPooling2D((3,3),(2,2),'valid'))
model.add(l.Flatten())
model.add(l.Dense(4096,activation='relu'))
models.add(l.Dropout(0.5))
model.add(l.Dense(4096,activation'relu'))
model.add(l.Dropout(0.5))
model.add(l.Dense(1000,activation='relu'))
model.add(l.Dropout(0.5))
model.add(l.Dense(2,activation='softmax'))
model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])

datagen=ImageDataGenerator(samplewise_center=True,rescale=1.0/255)
train_generator=datagen.flow_from_directory(\
                path='',#类别子文件加的上一级文件夹
                classes=['cat','dog'],
                target_size=(227,227),
                class_mode='categorical',
                batch_size=batch_size)
validation_generator=datagen.flow_from_directory(
                    path,
                    classes=['cat','dog'],
                    target_size=(227,227),
                    class_mode='categorical',
                    batch_size=batch_size)
model.fit_generator(generator=train_generator,steps_per_epoch=20000/16,epochs=10,
                   validation_data=validation_generator,validation_steps=2496/16)
yaml_string=model.to_yaml()#保存模型结构到yaml文件
open('./model_architecture.yaml','w').write(yaml_string)
model.save_weights('./model.h5')#保存模型参数

imgs=[]
img=cv2.imread('test.jpg')
img=cv2.resize(imgs,(227,227))
imgs.append(img)
a=np.array(imgs)
result=model.predict(a)#概率
idx=np.argmax(result)

if idx==0:
    print('cat\n')
else:
    print('dog\n')
cv2.imshow('image',img)
cv2.waitKey(0)


import numpy as np
import os
from shutil import copyfile,rmtree
# 数据预处理,将原始花朵数据集转换成keras要求的格式,如下.每一个子文件夹代表一类
'''
data/
    train/
        class1/
            img1
            img2
            ...
        class2/
            img1
            ...
    validation/
        class1/
            img1
            img2
            ...
        class2/
            img1
            ...
    test/
        class1/
            img1
            img2
            ...
        class2/
            img1
            ...
'''

data_folder = '/Users/shidanlifuhetian/All/data/flowers17/'#原始图像目录
new_data_folder = './flowers17/'#新的文件夹

# add label first
file_a = open(data_folder+'files.txt',mode='r')
text = file_a.readlines()
file_a.close()
labels = []
file_b = open('./newtext.txt',mode='a')
for i,item in enumerate(text):
    if i%80 ==0:
        class_num = i//80
    t = item.split('\n')
    newtext = t[0]+' flower_'+chr(65+class_num)+'\n'
    print(newtext)
    file_b.write(newtext)
file_b.close()
# split dataset
if os.path.exists(new_data_folder):
    rmtree(new_data_folder)
train_size = 800
val_size = int((1360-train_size)/2)
test_size = int(val_size)

np.random.seed(0)# 统一seed,保证每次随机结果都一样

label_file = open('newtext.txt')

labels = label_file.readlines()
np.random.shuffle(labels)

current_i = 0
def save_images(current_i,phase,d_size):
    if phase == 'train':
        dst_folder = new_data_folder+'train/'
    elif phase == 'test':
        dst_folder = new_data_folder+'test/'
    elif phase == 'validation':
        dst_folder = new_data_folder+'validation/'
    else:
        print('phase error')
        exit()

    for i in range(current_i,current_i+d_size):
        item = labels[i]
        r = item.split(' ')
        img_full_path = data_folder+r[0]
        img_class = r[1].split('\n')[0]
        img_new_path = dst_folder+img_class+'/'+r[0]

        if not os.path.exists(dst_folder+img_class):
            os.makedirs(dst_folder+img_class)
        copyfile(img_full_path,img_new_path)
        print(img_new_path,' copied')
    current_i = i

    return current_i

new_i = save_images(current_i=0,phase='train',d_size=train_size)
new_i = save_images(current_i=new_i,phase='test',d_size=test_size)
new_i = save_images(current_i=new_i,phase='validation',d_size=val_size)
from keras.applications import VGG16
train_dir=os.path.join(base_dir,'train')
conv_base=VGG16(weights='imagenet',
               include_top=False,
               input_shape=(150,150,3))
from keras import layers
from keras import models
model=models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid'))
conv_base.trainable=False
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
train_datagen=ImageDataGenerator(\
            rescale=1./255,
            rotation_range=40,
            width_shift_range=0.2,
            height_shift_range=0.2,
            shear_range=0.2,
            zoom_range=0.2,
            horizontal_flip=True,
            fill_mode='nearest')
test_datagen=ImageDataGenerator(rescale=1./255)
train_generator=train_datagen.flow_from_dirctory(
                train_dir,
                target_size=(150,150),
                batch_size=20,
                class_mode='binary')
validation_generator=test_datagen.flow_from_dirctory(
                    validation_dir,
                    target_size=(150,150),
                    batch_size=20,
                    class_mode='binary')
model.compile(loss='binary_crossentropy',
             optimizers=optimizers.RMSprop(2e-5),
             metrics=['acc'])
history=model.fit_generator(
        train_generator,
        steps_per_epoch=100,
        epoch=30,
        validation_data=validation_generator,
        validation_steps=100)
import matplotlib.pyplot as plt
acc=history.history['acc']
val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(1,len(acc)+1)
plt.plot(epochs,acc,a'bo',label='Training acc')
plt.plot(epochs,val_acc,'b',label='validation acc')


from keras import backend as K
model=load_model('')
layer_1=K.function([model.layers[0].input],[model.layers[1].output])
f1=layer_1([input_image])[0]
for _ in range(32):
    show_img=f1[:,:,:,_]
    show_img.shape=[149,149]
    plt.subplot(4,8,_+1)
    plt.imshow(show_img,cmap='gray')
    plt.axis('off')
plt.show()

import numpy as np
#from processor import process_image
from keras.models import load_model
from keras import backend as K
import matplotlib.pyplot as plt
import cv2
from keras.preprocessing.image import img_to_array, load_img
import numpy as np

def process_image(image, target_shape):
    #"""Given an image, process it and return the array."""
    # Load the image.
    h, w, _ = target_shape
    image = load_img(image, target_size=(h, w))

    # Turn it into numpy, normalize and return.
    img_arr = img_to_array(image)
    x = (img_arr / 255.).astype(np.float32)

    return x

def main():
    model = load_model('inception.026-1.07.hdf5') #replaced by your model name
    # Get all our test images.
    image='img03.jpg'
    images=cv2.imread('img03.jpg')
#     cv2.imshow("Image", images)
#     cv2.waitKey(0)
    # Turn the image into an array.
    image_arr = process_image(image, (299, 299, 3))# 根据载入的训练好的模型的配置,将图像统一尺寸
    image_arr = np.expand_dims(image_arr, axis=0)
 
    # 设置可视化的层
    layer_1 = K.function([model.layers[0].input], [model.layers[1].output])
    f1 = layer_1([image_arr])[0]
    for _ in range(32):
        show_img = f1[:, :, :, _]
        show_img.shape = [149, 149]
        plt.figure(figsize=(10,10))
        plt.subplot(4,8 , _ + 1)
        plt.subplot(4,8, _ + 1)
        plt.imshow(show_img, cmap='gray')
        plt.axis('off')
    plt.show()
    # conv layer: 299
    layer_1 = K.function([model.layers[0].input], [model.layers[299].output])
    f1 = layer_1([image_arr])[0]
    for _ in range(81):
        show_img = f1[:, :, :, _]
        show_img.shape = [8, 8]
        plt.figure(figsize=(10,10))
        plt.subplot(9, 9, _ + 1)
        plt.imshow(show_img, cmap='gray')
        plt.axis('off')
    plt.show()
    print('This is the end !')
 
if __name__ == '__main__':
    main()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值