Tensorflow实战VGG

这个实现的这个不是VGG Net,是一个普通结构的卷积网络。而且VGG也不是指一个模型,而是根据不同参数控制的一组模型,ResNet同理。

1、TF1.0

import tensorflow as tf
import os
import pickle
import numpy as np

CIFAR_DIR = "./cifar-10-batches-py"
print(os.listdir(CIFAR_DIR))

def load_data(filename):
    """read data from data file."""
    with open(filename, 'rb') as f:
        data = pickle.load(f, encoding='bytes')
        return data[b'data'], data[b'labels']

# tensorflow.Dataset.
class CifarData:
    def __init__(self, filenames, need_shuffle):
        all_data = []
        all_labels = []
        for filename in filenames:
            data, labels = load_data(filename)
            all_data.append(data)
            all_labels.append(labels)
        self._data = np.vstack(all_data)
        self._data = self._data / 127.5 - 1
        self._labels = np.hstack(all_labels)
        print(self._data.shape)
        print(self._labels.shape)
        
        self._num_examples = self._data.shape[0]
        self._need_shuffle = need_shuffle
        self._indicator = 0
        if self._need_shuffle:
            self._shuffle_data()
            
    def _shuffle_data(self):
        # [0,1,2,3,4,5] -> [5,3,2,4,0,1]
        p = np.random.permutation(self._num_examples)
        self._data = self._data[p]
        self._labels = self._labels[p]
    
    def next_batch(self, batch_size):
        """return batch_size examples as a batch."""
        end_indicator = self._indicator + batch_size
        if end_indicator > self._num_examples:
            if self._need_shuffle:
                self._shuffle_data()
                self._indicator = 0
                end_indicator = batch_size
            else:
                raise Exception("have no more examples")
        if end_indicator > self._num_examples:
            raise Exception("batch size is larger than all examples")
        batch_data = self._data[self._indicator: end_indicator]
        batch_labels = self._labels[self._indicator: end_indicator]
        self._indicator = end_indicator
        return batch_data, batch_labels

train_filenames = [os.path.join(CIFAR_DIR, 'data_batch_%d' % i) for i in range(1, 6)]
test_filenames = [os.path.join(CIFAR_DIR, 'test_batch')]

train_data = CifarData(train_filenames, True)
test_data = CifarData(test_filenames, False)

x = tf.placeholder(tf.float32, [None, 3072])
y = tf.placeholder(tf.int64, [None])
# [None], eg: [0,5,6,3]
x_image = tf.reshape(x, [-1, 3, 32, 32])
# 32*32
x_image = tf.transpose(x_image, perm=[0, 2, 3, 1])

# conv1: 神经元图, feature_map, 输出图像

#print(x_image.shape())
conv1_1 = tf.layers.conv2d(x_image,
                           32, # output channel number
                           (3,3), # kernel size
                           padding = 'same',
                           activation = tf.nn.relu,
                           name = 'conv1_1')

conv1_2 = tf.layers.conv2d(conv1_1,
                           32, # output channel number
                           (3,3), # kernel size
                           padding = 'same',
                           activation = tf.nn.relu,
                           name = 'conv1_2')

pooling1 = tf.layers.max_pooling2d(conv1_2,
                                   (2, 2), # kernel size
                                   (2, 2), # stride
                                   name = 'pool1')

conv2_1 = tf.layers.conv2d(pooling1,
                           32, # output channel number
                           (3,3), # kernel size
                           padding = 'same',
                           activation = tf.nn.relu,
                           name = 'conv2_1')

conv2_2 = tf.layers.conv2d(conv2_1,
                           32, # output channel number
                           (3,3), # kernel size
                           padding = 'same',
                           activation = tf.nn.relu,
                           name = 'conv2_2')
pooling2 = tf.layers.max_pooling2d(conv2_2,
                                   (2, 2), # kernel size
                                   (2, 2), # stride
                                   name = 'pool2')

conv3_1 = tf.layers.conv2d(pooling2,
                           32, # output channel number
                           (3,3), # kernel size
                           padding = 'same',
                           activation = tf.nn.relu,
                           name = 'conv3_1')

conv3_2 = tf.layers.conv2d(conv3_1,
                           32, # output channel number
                           (3,3), # kernel size
                           padding = 'same',
                           activation = tf.nn.relu,
                           name = 'conv3_2')

pooling3 = tf.layers.max_pooling2d(conv3_2,
                                   (2, 2), # kernel size
                                   (2, 2), # stride
                                   name = 'pool3')

flatten = tf.layers.flatten(pooling3)
y_ = tf.layers.dense(flatten, 10)

loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
# y_ -> sofmax
# y -> one_hot
# loss = ylogy_

# indices
predict = tf.argmax(y_, 1)
# [1,0,1,1,1,0,0,0]
correct_prediction = tf.equal(predict, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))

with tf.name_scope('train_op'):
    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)

init = tf.global_variables_initializer()
batch_size = 20
train_steps = 10000
test_steps = 100

# train 10k: 73.4%
with tf.Session() as sess:
    sess.run(init)
    for i in range(train_steps):
        batch_data, batch_labels = train_data.next_batch(batch_size)
        loss_val, acc_val, _ = sess.run(
            [loss, accuracy, train_op],
            feed_dict={
                x: batch_data,
                y: batch_labels})
        if (i+1) % 100 == 0:
            print('[Train] Step: %d, loss: %4.5f, acc: %4.5f' 
                  % (i+1, loss_val, acc_val))
        if (i+1) % 1000 == 0:
            test_data = CifarData(test_filenames, False)
            all_test_acc_val = []
            for j in range(test_steps):
                test_batch_data, test_batch_labels \
                    = test_data.next_batch(batch_size)
                test_acc_val = sess.run(
                    [accuracy],
                    feed_dict = {
                        x: test_batch_data, 
                        y: test_batch_labels
                    })
                all_test_acc_val.append(test_acc_val)
            test_acc = np.mean(all_test_acc_val)
            print('[Test ] Step: %d, acc: %4.5f' % (i+1, test_acc))

2、TF2.0

import tensorflow as tf
import os
import pickle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import sklearn
import pandas as pd
import sys
import time

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)
    
CIFAR_DIR = os.path.join("cifar-10-batches-py")
print(os.listdir(CIFAR_DIR))
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)

def load_data(filename):
    """read data from data file."""
    with open(filename, 'rb') as f:
        data = pickle.load(f, encoding='bytes')
        return data[b'data'], data[b'labels']
    
train_filenames = [os.path.join(CIFAR_DIR, 'data_batch_%d' % i) for i in range(1, 6)]
test_filenames = [os.path.join(CIFAR_DIR, 'test_batch')]
all_filenames = train_filenames + test_filenames

all_train_data = []
all_train_labels = []
all_test_data = []
all_test_labels = []
for filename in all_filenames:
    if filename.startswith(os.path.join(CIFAR_DIR, 'data')):
        data, labels = load_data(filename)
        all_train_data.append(data)
        all_train_labels.append(labels)
        train_data = np.vstack(all_train_data)
        train_labels = np.hstack(all_train_labels)
    else:
        data, labels = load_data(filename)
        all_test_data.append(data)
        all_test_labels.append(labels)
        test_data = np.vstack(all_test_data)
        test_labels = np.hstack(all_test_labels)
print(train_data.shape)

from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()

train_data_scaled = scaler.fit_transform(
    train_data.astype(np.float32).reshape(-1, 1)).reshape( -1, 3, 32, 32)
test_data_scaled = scaler.transform(
    test_data.astype(np.float32).reshape(-1, 1)).reshape(-1, 3, 32, 32)

print(train_data_scaled.shape)
print(test_data_scaled.shape)

train_data_scaled = tf.transpose(train_data_scaled, perm=[0, 2, 3, 1])
test_data_scaled = tf.transpose(test_data_scaled, perm=[0, 2, 3, 1])

print(train_data_scaled.shape)
print(test_data_scaled.shape)
"""
model = keras.models.Sequential([
    keras.layers.Dense(100,input_shape=train_data.shape[1:], activation='relu'),
    keras.layers.Dense(100, activation='relu'),
    keras.layers.Dense(50, activation='relu'),
    keras.layers.Dense(10, activation='softmax')
])
"""
model = keras.models.Sequential()
#filters = 32,输出有多少个通道,即有多少个卷积核
model.add(keras.layers.Conv2D(filters=32, kernel_size=3,
                              padding='same',
                              activation='relu',
                              input_shape=train_data_scaled.shape[1:]))
model.add(keras.layers.Conv2D(filters=32, kernel_size=3,
                              padding='same',
                              activation='relu'))

model.add(keras.layers.MaxPool2D(pool_size=2))

model.add(keras.layers.Conv2D(filters=64, kernel_size=3,
                              padding='same',
                              activation='relu'))
model.add(keras.layers.Conv2D(filters=64, kernel_size=3,
                              padding='same',
                              activation='relu'))

model.add(keras.layers.MaxPool2D(pool_size=2))

model.add(keras.layers.Conv2D(filters=128, kernel_size=3,
                              padding='same',
                              activation='relu'))
model.add(keras.layers.Conv2D(filters=128, kernel_size=3,
                              padding='same',
                              activation='relu'))

model.add(keras.layers.MaxPool2D(pool_size=2))

model.add(keras.layers.Flatten())

model.add(keras.layers.Dense(10, activation="softmax"))

model.compile(loss="sparse_categorical_crossentropy",
              #optimizer = keras.optimizers.SGD(0.001),
              optimizer = "sgd",
              metrics = ["accuracy"])

model.summary()

history = model.fit(train_data_scaled, train_labels, epochs=100,
                    validation_data=(test_data_scaled, test_labels))

#把值的变化过程用图打印出来
def plot_learning_curves(history):
    pd.DataFrame(history.history).plot(figsize=(8, 5))
    plt.grid(True)
    plt.gca().set_ylim(0, 3)
    plt.show()

plot_learning_curves(history)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值