tensorflow+VGG16训练的模型

tensorflow+VGG16训练的模型

一、训练生成模型:
训练生成模型的关键点:
1.将图片生成自己的数据集
2.将标签转换成独热编码
3.编辑VGG16的模型
4.训练模型。
5.保存模型。
图片的类型:如图是224x224的图像,像这样命名,标签在前,方便读取图像时获取标签
在这里插入图片描述

Tensorflow代码:
import tensorflow as tf
from tensorflow.python.framework import graph_util
import os
from PIL import Image
import numpy as np

def getOneHot(labels, num):
    a = []
    for i in labels:
        k = np.zeros(num)
        k[i] = 1
        a.append(k)
    return np.array(a,dtype=np.float32)

def read_data(data_dir):
    datas = []
    labels = []
    fpaths = []
    for fname in os.listdir(data_dir):
        fpath = os.path.join(data_dir, fname)
        fpaths.append(fpath)
        image = Image.open(fpath)
        data = np.array(image, dtype=np.float32) / 255.0
        label = int(fname.split("_")[0])
        datas.append(data)
        labels.append(label)

    datas = np.array(datas)
    labels = np.array(labels)
    print("shape of datas: {}\tshape of labels: {}".format(datas.shape, labels.shape))
    return fpaths, datas, labels

def conv2d(conv_input, name, kernel, batch_output, height, width):
    batch_input = conv_input.get_shape()[-1].value
    W = tf.Variable(tf.truncated_normal([kernel, kernel, batch_input, batch_output], stddev=0.1))
    bia = tf.Variable([batch_output], dtype=tf.float32)
    conv_2d = tf.nn.conv2d(conv_input, W, strides=[1, height, width, 1], padding='SAME') + bia
    return tf.nn.relu(conv_2d, name=name)

def max_pool_2x2(x, name, fh, fw, height, width):
    return tf.nn.max_pool(x, ksize=[1,fh,fw,1], strides=[1,height,width,1], padding='SAME', name=name)

def fc_op(fc_input, name, batch_output):
    batch_input = fc_input.get_shape()[-1].value
    W = tf.Variable(tf.truncated_normal([batch_input, batch_output], stddev=0.1))
    bia = tf.Variable([batch_output], dtype=tf.float32)
    return tf.nn.relu(tf.matmul(fc_input, W) + bia, name=name)

def getVGG16(datas_input, keep_prob):
    #block1 -- outputs 112x112x64
    conv1_1 = conv2d(datas_input, "conv1_1", 3, 64, 1, 1)
    conv1_2 = conv2d(conv1_1, "conv1_2", 3, 64, 1, 1)
    pool1 = max_pool_2x2(conv1_2, "pool1", 2, 2, 2, 2)

    #block2 -- outputs 56x56x128
    conv2_1 = conv2d(pool1, "conv2_1", 3, 128, 1, 1)
    conv2_2 = conv2d(conv2_1, "conv2_1", 3, 128, 1, 1)
    pool2 = max_pool_2x2(conv2_2, "pool2", 2, 2, 2, 2)

    #block3 -- outputs 28x28x256
    conv3_1 = conv2d(pool2, "conv3_1", 3, 256, 1, 1)
    conv3_2 = conv2d(conv3_1, "conv3_2", 3, 256, 1, 1)
    conv3_3 = conv2d(conv3_2, "conv3_3", 3, 256, 1, 1)
    pool3 = max_pool_2x2(conv3_3, "pool3", 2, 2, 2, 2)

    #block4 -- outputs 14x14x512
    conv4_1 = conv2d(pool3, "conv4_1", 3, 512, 1, 1)
    conv4_2 = conv2d(conv4_1, "conv4_2", 3, 512, 1, 1)
    conv4_3 = conv2d(conv4_2, "conv4_3", 3, 512, 1, 1)
    pool4 = max_pool_2x2(conv4_3, "pool4", 2, 2, 2, 2)

    #block5 -- outputs 7x7x512
    conv5_1 = conv2d(pool4, "conv5_1", 3, 512, 1, 1)
    conv5_2 = conv2d(conv5_1, "conv5_2", 3, 512, 1, 1)
    conv5_3 = conv2d(conv5_2, "conv5_3", 3, 512, 1, 1)
    pool5 = max_pool_2x2(conv5_3, "pool5", 2, 2, 2, 2)

    # flatten
    shp = pool5.get_shape()
    flattened_shape = shp[1].value * shp[2].value * shp[3].value
    resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1")

    # fully connected
    fc6 = fc_op(resh1, "fc6", 4096)
    fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop")
    fc7 = fc_op(fc6_drop, "fc7", 4096)
    fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop")
    fc8 = fc_op(fc7_drop, "fc8", 3)

    return fc8

if __name__ == "__main__":
    learning_rate = 0.001
    display_step = 5
    epochs = 10
    keep_prob = 0.5
    data_dir = "./images"

    fpaths, datas, labels = read_data(data_dir)
    labels = getOneHot(labels, 3)

    datas_placeholder = tf.placeholder(tf.float32, [None, 224, 224, 3])
    y_ = tf.placeholder(tf.float32, [None, 3])
    logits = getVGG16(datas_placeholder, 0.5)

    # y_conv = tf.nn.softmax(net, name='output')
    # #损失函数和损失优化
    # cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv)))
    # train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
    train_step = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)

    #测试准确率,跟Softmax回归模型的一样
    correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels,1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    #将训练结果保存,如果不保存我们这次训练结束后的结果也随着程序运行结束而释放了
    saveFile = 'mnist_model.ckpt'
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # 初始化所有变量
        sess.run(tf.global_variables_initializer())
        for i in range(150):
            train_accuracy =sess.run(cost, feed_dict={datas_placeholder:datas, y_:labels})
            print("step %d, training accuracy %g" % (i, train_accuracy))
            sess.run(train_step, feed_dict={datas_placeholder:datas, y_:labels})

有问题可以加入我的群,QQ群号109530447

  • 0
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值