1. 卷积神经网络使用tf实现交通信号识别,并画出accuracy、loss图像

import pickle
from collections import Counter
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.contrib.layers import flatten
import matplotlib.pyplot as plt
training_file = 'train.p'
test_file = 'test.p'
with open(training_file, 'rb') as f:
    train = pickle.load(f)
with open(test_file, 'rb') as f:
    test = pickle.load(f)
x_train, y_train = train['features'], train['labels']
x_test, y_test = test['features'], test['labels']
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
c = Counter(y_train)
print(c.items())
x_train_norm = x_train.astype(float)/255 - 0.5
x_test = x_test.astype(float)/255 - 0.5
x_train, x_valid, y_train, y_valid = train_test_split(x_train_norm, y_train, test_size=0.2, random_state=2275)
print('training dataset:', x_train.shape, y_train.shape)
print('validation dataset:', x_valid.shape, y_valid.shape)
EPOCHS = 20
BATCH_SIZE = 128
dropout = 0.70


def LeNet(x, add_dropout=False):
    mu = 0
    sigma = 0.1
    # input [32,32,3]    output[28,28,6]
    conv1_w = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean=mu, stddev=sigma))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1 = tf.nn.conv2d(x, conv1_w, strides=[1, 1, 1, 1], padding='VALID', name='conv1') + conv1_b
    conv1 = tf.nn.relu(conv1)
    # shape[14,14,6]
    conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
    conv2_w = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma))
    conv2_b = tf.Variable(tf.zeros(16))
    # [10,10,16]
    conv2 = tf.nn.conv2d(conv1, conv2_w, strides=[1, 1, 1, 1], padding='VALID', name='conv2') + conv2_b
    conv2 = tf.nn.relu(conv2)
    # 【5,5,16】
    conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
    # 400
    fc0 = flatten(conv2)
    fc1_w = tf.Variable(tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma))
    fc1_b = tf.Variable(tf.zeros(120))
    # 120
    fc1 = tf.matmul(fc0, fc1_w) + fc1_b
    fc1 = tf.nn.relu(fc1)
    if add_dropout:
        fc1 = tf.nn.dropout(fc1, keep_prob=dropout)
    fc2_w = tf.Variable(tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma))
    fc2_b = tf.Variable(tf.zeros(84))
    # 84
    fc2 = tf.matmul(fc1, fc2_w) + fc2_b
    fc2 = tf.nn.relu(fc2)
    if add_dropout:
        fc2 = tf.nn.dropout(fc2, keep_prob=dropout)
    fc3_w = tf.Variable(tf.truncated_normal(shape=(84, 43), mean=mu, stddev=sigma))
    fc3_b = tf.Variable(tf.zeros(43))
    logits = tf.matmul(fc2, fc3_w) + fc3_b
    regularizers = tf.nn.l2_loss(conv1_w) + tf.nn.l2_loss(conv2_w) + tf.nn.l2_loss(fc1_w) +
      tf.nn.l2_loss(fc2_w) + tf.nn.l2_loss(fc3_w)
    return logits, regularizers


x = tf.placeholder(dtype=tf.float32, shape=(None, 32, 32, 3))
y = tf.placeholder(dtype=tf.int32, shape=(None))
num_labels = 43
one_hot_y = tf.one_hot(y, num_labels)
rate = 0.001
factor = 5e-4
logits, regularizers = LeNet(x, True)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
loss_operation += factor * regularizers
optimizer = tf.train.AdamOptimizer(learning_rate=rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
softmax = tf.nn.softmax(logits)
prediction = tf.argmax(logits, 1)


def evaluate(x_data, y_data):
    num_examples = len(x_data)
    total_accuracy = 0
    total_loss = 0
    pred = []
    sess = tf.get_default_session()
    for offset in range(0, num_examples, BATCH_SIZE):
        batch_x, batch_y = x_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
        accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
        loss = sess.run(loss_operation, feed_dict={x: batch_x, y: batch_y})
        predictions = sess.run(prediction, feed_dict={x: batch_x, y: batch_y})
        pred.append(predictions)
        total_accuracy += (accuracy * len(batch_x))
        total_loss += (loss * len(batch_x))
    return total_accuracy / num_examples, pred, total_loss / num_examples


# 训练模型
val_accuracy = []
train_accuracy = []
val_loss = []
train_loss = []
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    num_examples = len(x_train)
    print('Training...')
    for i in range(EPOCHS):
        x_train, y_train = shuffle(x_train, y_train)
        for offset in range(0, num_examples, BATCH_SIZE):
            end = offset + BATCH_SIZE
            batch_x, batch_y = x_train[offset:end], y_train[offset:end]
            sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
        validation_accuracy, _, validation_loss = evaluate(x_valid, y_valid)
        training_accuracy, _, training_loss = evaluate(x_train, y_train)
        val_accuracy.append(validation_accuracy)
        val_loss.append(validation_loss)
        train_accuracy.append(training_accuracy)
        train_loss.append(training_loss)
        print('EPOCH{}...'.format(i+1))
        print('Validation Accuarcy = {:.3f}'.format(validation_accuracy))
        print()
    saver.save(sess, './lenet')
    print('Model saved')
# 画出accuracy、loss图像
plt.figure(figsize=(20, 10))
fig = plt.figure()
a = fig.add_subplot(121)
line_one, = plt.plot(val_accuracy, label='Validation')
line_two, = plt.plot(train_accuracy, label='Training')
plt.ylabel('Accuracy values')
plt.xlabel('No. of epochs')
plt.legend(handles=[line_one, line_two])
a = fig.add_subplot(122)
line_one, = plt.plot(val_loss, label='Validation')
line_two, = plt.plot(train_loss, label='Training')
plt.ylabel('Loss values')
plt.xlabel('No .of epochs')
a.legend(handles=[line_one, line_two])
plt.show()

with tf.Session() as sess:
    saver.restore(sess, tf.train.latest_checkpoint('.'))
    test_accuracy, pred, _ = evaluate(x_test, y_test)
    print('Test Accuracy = {:.3f}'.format(test_accuracy))

# 查看测试集类标签精准度
import itertools
new_pred = list(itertools.chain.from_iterable(pred)) # Flatten list from the tensorflow
new_pred2 = np.array(new_pred) # Convert list to array
print(len(y_test), len(new_pred2))
from sklearn.metrics import classification_report
print(classification_report(y_test, new_pred2))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值