深度学习 神经网络 神经元 单层神经网络的实现

使用 tensorflow1.14, 数据集 cifar10

代码

初始化

import tensorflow as tf
import pickle as pk
import numpy as np
import os

CIFAR_DIR = './cifar-10-batches-py'

数据集一览

data_batch_1 ~ 5 为训练数据集, test_batch 为测试集

os.listdir(CIFAR_DIR)
运行结果
['batches.meta',
 'data_batch_1',
 'data_batch_2',
 'data_batch_3',
 'data_batch_4',
 'data_batch_5',
 'readme.html',
 'test_batch']

看看图片的shape, 标签等东西

with open(os.path.join(CIFAR_DIR, "data_batch_1"), "rb") as f:
    data = pk.load(f, encoding='bytes')  # load 默认使用ascii解码, 这里要用二进制来
    print(type(data))  # <class 'dict'>
    print(data.keys())  # dict_keys([b'batch_label', b'labels', b'data', b'filenames'])
    
    print(type(data[b'batch_label']))  # <class 'bytes'>
    print(type(data[b'labels']))  # <class 'list'>
    print(type(data[b'data']))  # <class 'numpy.ndarray'>
    print(type(data[b'filenames']))  # <class 'list'>
    print("----------------------")
    print(data[b'batch_label'])  # b'training batch 1 of 5'
    print(data[b'labels'][0:2])  # [6, 9]
    print(data[b'data'].shape)  # 10000张图片, 32*32*3图片
    print(data[b'data'][0:2])  
    print(data[b'filenames'][0:2])  # [b'leptodactylus_pentadactylus_s_000004.png', b'camion_s_000148.png']
运行结果
<class 'dict'>
dict_keys([b'batch_label', b'labels', b'data', b'filenames'])
<class 'bytes'>
<class 'list'>
<class 'numpy.ndarray'>
<class 'list'>
----------------------
b'training batch 1 of 5'
[6, 9]
(10000, 3072)
[[ 59  43  50 ... 140  84  72]
 [154 126 105 ... 139 142 144]]
[b'leptodactylus_pentadactylus_s_000004.png', b'camion_s_000148.png']

打开一张图片看看, 像素有点低…

img_arr = data[b'data'][6666]
img_arr = img_arr.reshape((3, 32, 32))  # 一张图片3072个像素, 前32*32是R层, 然后依次是G, B层, 所以得先分三层, 再分成32*32 
img_arr = img_arr.transpose((1, 2, 0))  # 调换位置成 32 32 3 的格式, 转成这样后一个32*3就能代表一行的像素, 32行就是一张图片 

import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
%matplotlib inline
plt.imshow(img_arr)

在这里插入图片描述

数据预处理

  • 定义一个数据预处理的类
  • 设置训练数据集

也可以使用 tensorflow.Dataset 来实现

def load_data(filename):
    """读文件数据"""
    with open(filename, 'rb') as f:
        data = pk.load(f, encoding='bytes')
        return data[b'data'], data[b'labels']
    
class CifarData:
    """对Cifar10的数据进行处理"""
    def __init__(self, filenames, need_shuffle):
        """
        filename: 训练或者测试数据集的名称
        need_shuffle: 数据过完一遍之后(一个batch之后)是否需要打乱. 测试数据集不需要打乱
        """
        all_data = []  # 存储一个个图片向量
        all_labels = []  # 存储图片标签
        for filename in filenames:
            data, labels = load_data(filename)
            for item, label in zip(data, labels):
                if label in [0, 1]: # 做二分类任务, 只取0, 1两个标签的图
                    all_data.append(item) # 把label是0或1的图片加入
                    all_labels.append(label)
                    
        self._data = np.vstack(all_data) # 将all_data变成 图片数量*3072 的矩阵
#         self._data = self._data / 255
        self._data = self._data / 127.5 - 1 # 归一化, 范围调整在-1, 1之间
        print(self._data.shape)  # test
        
        self._labels = np.hstack(all_labels) # 变成一长行
        print(self._labels.shape)  # test
        
        self._num_examples = self._data.shape[0]  # 图片数量
        self._need_shuffle = need_shuffle # 训练集需要打乱, 测试不需要, 打乱增强泛化能力
        self._indicator = 0 # 存储当前的位置: 一个一个batch送到计算图去运算, _indicator用来指明当前遍历到的位置
        if self._need_shuffle:
            self._shuffle_data()
            
    def _shuffle_data(self):
	    """打乱"""
        p = np.random.permutation(self._num_examples)  # 得到一个 0~_num_examples 的混乱序列
        self._data = self._data[p]  # 把数据按照一个混乱的序列去排, 达到打乱的效果
        self._labels = self._labels[p]  # 做跟 _data 一样的打乱
        
    def next_batch(self, batch_size): # 批次(样本)数目
        """return batch_size examples as a batch"""
        if batch_size > self._num_examples:  # 如果batch_size比整个数据集都大
            raise Exception("batch size is larger than all examples")
            
        end_indicator = self._indicator + batch_size  # batch的结束位置 
        if end_indicator > self._num_examples:
            if self._need_shuffle:  # 允许打乱, 则复用之前的数据
                self._shuffle_data()
                self._indicator = 0
                end_indicator = batch_size # 0 + batch_size
            else:  # 没有数据了
                raise Exception("have no more examples. if this is the first batch, then the batch size is lager than all examples. ")
            
        batch_data = self._data[self._indicator: end_indicator]  # batch大小的数据
        batch_labels = self._labels[self._indicator: end_indicator]  # 对应的label
#         self._indicator += batch_size
        self._indicator = end_indicator  # 维护indicator的值
        return batch_data, batch_labels 

train_filenames = [os.path.join(CIFAR_DIR, "data_batch_%d" % i) for i in range(1, 6)]
test_filenames = [os.path.join(CIFAR_DIR, "test_batch")]

train_data = CifarData(train_filenames, need_shuffle=True)
test_data = CifarData(test_filenames, need_shuffle=False)  # 测试机不需要打乱

############## test ###############
batch_data, batch_labels = train_data.next_batch(10)
print(batch_data.shape)
print(batch_data)
print(batch_labels)
运行结果
(10000, 3072)  # 训练集数据
(10000,)  # 训练集标签
(2000, 3072)  # 测试集数据
(2000,)  # 测试集标签
(10, 3072)  # 一个batch10张图
[[ 1.          1.          1.         ...  1.          1.
   1.        ]
 [-0.35686275 -0.43529412 -0.42745098 ... -0.03529412  0.12156863
   0.12156863]
 [ 0.09019608  0.01960784  0.12156863 ... -0.82745098 -0.85882353
  -0.54509804]
 ...
 [ 0.05098039  0.15294118  0.10588235 ...  0.33333333  0.31764706
   0.30196078]
 [ 0.08235294 -0.12941176 -0.12156863 ...  0.27843137  0.24705882
   0.23921569]
 [ 0.5372549   0.42745098  0.56862745 ... -0.25490196 -0.25490196
  -0.2627451 ]]
[1 0 0 0 0 0 0 0 1 1]

构建模型图

  1. 设置数据 x x x, 数据标签 y y y
  2. 初始化权重 W W W (最后要训练的), 以及偏置 b b b
  3. 设置 x ⋅ W + b x \cdot W + b xW+b, 并转为概率值
  4. 定义损失函数, 准确率
  5. 定义优化器, 来最小化损失函数
x = tf.placeholder(tf.float32, [None, 3072])  # 图片
y = tf.placeholder(tf.int64, [None])  # 图片label
# None 表示还不确定几张图片, 或者说图片的数量可变, 因为 batch size可以被调整
# placeholder表示占位, 先构建计算图的结构


# shape [3072, 1]
w = tf.get_variable('w', [x.get_shape()[-1], 1],  # 大小为 x 的shape
                   initializer = tf.random_normal_initializer(0, 1))  # 使用正太分布初始化
# shape [1, ]
b = tf.get_variable('b', [1],  # 偏置, 维度与 w 的第二维一样
                   initializer = tf.constant_initializer(0.0))  # 使用常量初始化


# shape [None, 3072] * [3072, 1] = [None, 1]
y_ = tf.matmul(x, w) + b  # matrix multiple 矩阵乘
# shape [None, 1]
predict_y_1 = tf.nn.sigmoid(y_)  # 将 y_变为概率值
# shape [None] -> [None, 1]
y_reshaped = tf.reshape(y, (-1, 1)) # 转成一样的才能做差别分析
y_reshaped_float = tf.cast(y_reshaped, tf.float32) # tf对类型很敏感


# 均方误差
loss = tf.reduce_mean(tf.square(y_reshaped_float - predict_y_1))


# 准确率
predict = predict_y_1 > 0.5 # bool 类似[true, false, true, true...]
correct_prediction = tf.equal(tf.cast(predict, tf.int64), y_reshaped)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))

# 定义一个梯度下降的方法
with tf.name_scope('train_op'):
    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)  # 学习率 0.001

神经元

init = tf.global_variables_initializer()  # 用来执行初始化
batch_size = 20  # 一次喂20张图片
train_steps = 100000  # 训练次数
test_steps = 100  # 训练集一共2000张图片, 一次喂20张, 所以一共喂100次(因为数据不打乱, 所以要严格等于)
# 一个神经元
with tf.Session() as sess:
    sess.run(init)
    for i in range(train_steps):
        batch_data, batch_labels = train_data.next_batch(batch_size)
        loss_val, acc_val, _ = sess.run(
            [loss, accuracy, train_op],  # 计算loss, accuracy, 然后训练, 如果不加train_op则只计算不训练
            feed_dict={
                x: batch_data,
                y: batch_labels,
            })
        
        if (i+1) % 500 == 0:
            print("[Train] Step: %d, loss: %4.5f, acc: %4.5f" \
                 % (i+1, loss_val, acc_val))
            
        # 通过测试集看准确率
        if (i+1) % 5000 == 0:
            test_data = CifarData(test_filenames, need_shuffle=False)  # 因为没有打乱, 数据集会被用完, 所以每次得新建一个CifarData
            all_test_acc_val = []  # 存放准确率
            for j in range(test_steps):
                test_batch_data, test_batch_labels = test_data.next_batch(batch_size)
                test_acc_val = sess.run(
                    [accuracy], feed_dict = {
                        x: test_batch_data,
                        y: test_batch_labels
                    }
                )
                all_test_acc_val.append(test_acc_val)
            
            test_acc = np.mean(all_test_acc_val)
            print("[Test] Step: %d, acc: %4.5f" % (i+1, acc_val))
训练结果
[Train] Step: 500, loss: 0.24986, acc: 0.75000
[Train] Step: 1000, loss: 0.24355, acc: 0.75000
[Train] Step: 1500, loss: 0.15000, acc: 0.85000
[Train] Step: 2000, loss: 0.33586, acc: 0.65000
[Train] Step: 2500, loss: 0.10048, acc: 0.90000
[Train] Step: 3000, loss: 0.39996, acc: 0.60000
[Train] Step: 3500, loss: 0.14202, acc: 0.85000
[Train] Step: 4000, loss: 0.14709, acc: 0.85000
[Train] Step: 4500, loss: 0.20128, acc: 0.80000
[Train] Step: 5000, loss: 0.10128, acc: 0.90000
(2000, 3072)
(2000,)
.......
(2000,)
[Test] Step: 95000, acc: 0.70000
[Train] Step: 95500, loss: 0.25005, acc: 0.75000
[Train] Step: 96000, loss: 0.05000, acc: 0.95000
[Train] Step: 96500, loss: 0.20000, acc: 0.80000
[Train] Step: 97000, loss: 0.05003, acc: 0.95000
[Train] Step: 97500, loss: 0.25051, acc: 0.75000
[Train] Step: 98000, loss: 0.10230, acc: 0.90000
[Train] Step: 98500, loss: 0.19962, acc: 0.80000
[Train] Step: 99000, loss: 0.15157, acc: 0.85000
[Train] Step: 99500, loss: 0.20032, acc: 0.80000
[Train] Step: 100000, loss: 0.10221, acc: 0.90000
(2000, 3072)
(2000,)
[Test] Step: 100000, acc: 0.90000

单层神经网络

需要修改原来的 CifarData的初始化函数, 不再只取两种标签的数据

class CifarData:
    def __init__(self, filenames, need_shuffle):
        all_data = []
        all_labels = []
        for filename in filenames:  # 十个种类的数据
            data, labels = load_data(filename)
            all_data.append(data)
            all_labels.append(labels)
        self._data = np.vstack(all_data) # 将all_data变成 图片数量*3072 的矩阵
        self._data = self._data / 127.5 - 1 # 归一化
        self._labels = np.hstack(all_labels) 
        self._num_examples = self._data.shape[0]
        self._need_shuffle = need_shuffle
        self._indicator = 0
        if self._need_shuffle:
            self._shuffle_data()

模型图需要修改 loss函数, 准确率

x = tf.placeholder(tf.float32, [None, 3072]) # None 表示还不确定几张图片
y = tf.placeholder(tf.int64, [None])

# shape: [3072, 10]
w = tf.get_variable('w', [x.get_shape()[-1], 10], 
                   initializer = tf.random_normal_initializer(0, 1))

# shape: [10, ]
b = tf.get_variable('b', [10],
                   initializer = tf.random_normal_initializer(0, 1))

# shape: [None, 3072] * [3072, 10] = [None, 10]
y_ = tf.matmul(x, w) + b

# 平方差损失
"""
# something like [[0.01, 0.9,..., 0.03], [...],...], 每一项有10个值, 分别代表可能label几的概率
predict_y = tf.nn.softmax(y_) # e^x / sum(e^x) 
y_one_hot = tf.one_hot(y, depth=10, dtype=tf.float32)  # 转为 onehot 编码
loss = tf.reduce_mean(tf.square(y_one_hot - predict_y))
"""
# 交叉熵损失
loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
# 包含的步骤: 1. y_->sofmax; 2. y->one_hot; 3. loss = ylogy_

# shape: [None,]
predict = tf.argmax(y_, 1)  # 预测值. 返回y_的第二维中最大值的索引, 即为预测结果
correct_prediction = tf.equal(predict, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))

with tf.name_scope('train_op'):
    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)

训练部分代码不需要改变
如果报错

注意点

  • 图片一般要归一化处理, 因为图片的数值普遍较大, 使用sigmoid容易偏向一方, 最后的导致梯度消失

other

  • 数据集下载
    链接:https://pan.baidu.com/s/1sI90OHBjWvuJZDmmdj9pFA
    提取码:cnwp
  • InternalError: 2 root error(s) found. 均为 Internal: Blas GEMM launch failed
    solution 加入
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    config = tf.ConfigProto(allow_soft_placement = True)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.5)
    config.gpu_options.allow_growth = True
     
    sess0 = tf.InteractiveSession(config = config)
    
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值