tensorflow-VGG

感悟:

机子太差,参数太多内存炸了

keras真香

参考:

https://blog.csdn.net/weixin_43624538/article/details/84563093

https://blog.csdn.net/shankezh/article/details/87814520

https://blog.csdn.net/m0_37917271/article/details/82286252

代码:

tensorflow版本,可能有错误= =

import tensorflow as tf
import cv2
import numpy as np
import os
from PIL import Image
from sklearn.model_selection import train_test_split
class VGG(object):
    def __init__(self):
        self.n_input = 27*27*3
        self.n_classes = 2
        self.batch_size = 10
        self.training_iters = 20
        self.display_step = 20
        self.learning_rate = 0.001

    def conv2d(self,x,filter,k_size,stride=[1,1],padding='SAME',activation = tf.nn.relu,scope='conv2d'):
        return tf.layers.conv2d(inputs=x,filters=filter,kernel_size=k_size,
                                strides=stride,padding=padding,name=scope,activation=activation)

    def maxpool2d(self,x,pool_size=[2,2],stride=[2,2],padding='SAME',scope='maxpool2d'):
        return tf.layers.max_pooling2d(inputs=x,pool_size=pool_size,strides=stride,padding=padding,name=scope)

    def dropoutx(self,x,d_rate):
        return tf.layers.dropout(x,rate=d_rate)

    def norm(self, x, l_size, bias=1.0, alpha=0.001 / 9.0, beta=0.75, scope='norm'):
        return tf.nn.lrn(x, l_size, bias=bias, alpha=alpha, beta=beta, name=scope)

    def set_net(self,x,d_rate=0.8):
        x = tf.reshape(x,[-1,27,27,3])

        net = self.conv2d(x,filter=64,k_size=[3,3],scope='conv1_1')
        net = self.conv2d(net,filter=64,k_size=[3,3],scope='conv1_2')
        net = self.maxpool2d(net,scope='pool1')

        net = self.conv2d(x, filter=128, k_size=[3, 3], scope='conv2_1')
        net = self.conv2d(net, filter=128, k_size=[3, 3], scope='conv2_2')
        net = self.maxpool2d(net, scope='pool2')

        net = self.conv2d(x, filter=256, k_size=[3, 3], scope='conv3_1')
        net = self.conv2d(net, filter=256, k_size=[3, 3], scope='conv3_2')
        net = self.conv2d(net, filter=256, k_size=[3, 3], scope='conv3_3')
        net = self.maxpool2d(net, scope='pool3')


        shape = net.get_shape()
        print(shape)
        len = shape[1].value*shape[2].value*shape[3].value
        print(len)
        net = tf.reshape(net,[-1, len])
        net = tf.layers.dense(net,4096,activation=tf.nn.relu,use_bias=True,name='fc1')
        net = self.dropoutx(net,d_rate)
        net = tf.layers.dense(net,4096,activation=tf.nn.relu,use_bias=True,name='fc2')
        net = self.dropoutx(net,d_rate)
        out = tf.layers.dense(net,2,activation=tf.nn.relu,use_bias=True,name='fc3')
        return out

    def avgg_prediction(self, X,Y, scope='vgg'):
        X_train, X_vaild, y_train, y_vaild = train_test_split(X, Y, test_size=0.2)
        x = tf.placeholder(tf.float32, [None, self.n_input])
        y = tf.placeholder(tf.float32, [None, self.n_classes])

        pred = self.set_net(x)  # pred是计算完的值,此时还没归一化
        a = tf.nn.softmax(pred)  # a是归一化后的值。

        # 定义损失函数和学习步骤
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))  # 这个是损失loss
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(cost)  # 最小化loss
        # 测试网络
        correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        # 初始化所有的共享变量
        init = tf.initialize_all_variables()
        with tf.Session() as sess:
            sess.run(init)
            step = 1
            # Keep training until reach max iterations
            while step * self.batch_size < self.training_iters:  # 直到达到最大迭代次数,没考虑梯度!!!
                batch_xs, batch_ys = X_train[self.batch_size*(step-1):self.batch_size*step],y_train[self.batch_size*(step-1):self.batch_size*step]
                batch_xs = np.reshape(batch_xs,(-1,27*27*3))
                print('~!!!!!!!!',batch_xs.shape)
                print(batch_ys.shape)
                # 获取批数据
                sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
                if step % self.display_step == 0:  # 每一步里有64batch,64*20=1280
                    # 计算精度
                    acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys})
                    # 计算损失值
                    loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})
                    print("Iter " + str(step * self.batch_size) + ", Minibatch Loss= " + "{:.6f}".format(
                        loss) + ", Training Accuracy = " + "{:.5f}".format(acc))
                step += 1
            print("Optimization Finished!")
            # 计算测试精度
            X_vaild = np.reshape(X_vaild, (-1, 27 * 27 * 3))
            print("Testing Accuracy:", sess.run(accuracy,
                                                feed_dict={x: X_vaild[:256], y: y_vaild[:256],
                                                           }))  # 拿前256个来测试
            print("Testing Result:", sess.run(a, feed_dict={x: X_vaild[63:64], y: y_vaild[63:64],
                                                            }))  # 数组范围,从0开始,含左不含右

            print(y_vaild[63:64])




 keras版本,真香

import cv2
import numpy as np
import os
from PIL import Image
valid_exts = [".jpg",".gif",".png",".tga", ".jpeg"]
n = 800
cwd = os.getcwd()
width = 128
paths = {"images/cats", "images/dogs"}
nclass = len(paths)
X = np.zeros((n,width,width,3),dtype=np.uint8)
Y = np.zeros((n,),dtype=np.uint8)
imgcnt = 0
for i,relpath in enumerate(paths):
    fullpath = cwd + "/" + relpath
    print(fullpath)
    flist = os.listdir(fullpath)
    for f in flist:
        if os.path.splitext(f)[1].lower() not in valid_exts:
            continue
        path = os.path.join(fullpath, f)
        img = Image.open(path)
        img = img.convert("RGB")
        img = np.array(img)
        #print(img.shape)
        X[imgcnt] = cv2.resize(img,(width,width))
        Y[imgcnt] = i
        imgcnt += 1
        
print(imgcnt)
import random
import matplotlib.pyplot as plt

%matplotlib inline
%config InlineBackend.figure_format = 'retina'

plt.figure(figsize=(12,10))
for i in range(12):
    random_index = random.randint(0,n-1)
    plt.subplot(3,4,1+i)
    plt.imshow(X[random_index])
    plt.title(['dog','cat'][Y[random_index]])
from sklearn.model_selection import train_test_split
X_train,X_vaild,y_train,y_vaild = train_test_split(X,Y,test_size=0.2)
from keras.layers import *
from keras.models import *
inputs = Input((width,width,3))
x = inputs
for i , layer_num in enumerate([2,3,3,3]):
    for j in range(layer_num):
        x = Conv2D(32*2**i,3,padding='same',activation='relu')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
    x = MaxPooling2D(2)(x)
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(1,activation='sigmoid')(x)

model = Model(inputs,x)
model.compile(optimizer='adam',
             loss='binary_crossentropy',
             metrics=['accuracy'])
h = model.fit(X_train,y_train,batch_size=128,epochs=20,validation_data=(X_vaild,y_vaild))

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值