TensorFlow学习

TensorFlow学习

线性回归问题

import matplotlib.pyplot as plt  # 载入matplotlib
import numpy as np  # 载入numpy
import tensorflow.compat.v1 as tf

def prepare_data():
    # 直接采用np生成等差数列的方法,生成100个点,每个点的取值在-1~1之间
    x_data = np.linspace(-1, 1, 100)
    # y = 2x +1 + 噪声, 其中,噪声的维度与x_data一致
    y_data = 2 * x_data + 1.0 + np.random.randn(*x_data.shape) * 0.4

    # 画出随机生成数据的散点图
    # plt.scatter(x_data, y_data)
    # 画出我们想要学习到的线性函数 y = 2x +1
    # plt.plot(x_data, 2 * x_data + 1.0, color='red', linewidth=3)
    # plt.show()
    return x_data,y_data

def model(x, w, b):
    return tf.multiply(x, w) + b

def train(x_data, y_data,sess,optimizer,loss_function):
    # 开始训练,轮数为 epoch,采用SGD随机梯度下降优化方法
    global loss
    for epoch in range(train_epochs):
        for xs, ys in zip(x_data, y_data):
            _, loss = sess.run([optimizer, loss_function], feed_dict={x: xs, y: ys})
        print("第%d轮,loss:%f"%(epoch , loss))
        b0temp = b.eval(session=sess)
        w0temp = w.eval(session=sess)
        plt.plot(x_data, w0temp * x_data + b0temp)  # 画图
    plt.show()

def infer(x_test):
    predict = sess.run(pred, feed_dict={x: x_test})
    print("预测值:%f" % predict)
    target = 2 * x_test + 1.0
    print("目标值:%f" % target)

tf.compat.v1.disable_eager_execution()
# 迭代次数(训练轮数)
train_epochs = 10
# 学习率
learning_rate = 0.05
# 控制显示loss值的粒度
display_step = 10
# 构建线性函数的斜率,变量w
w = tf.Variable(1.0, name="w0")
# 构建线性函数的截距,变量b
b = tf.Variable(0.0, name="b0")
# 定义训练数据的占位符,x是特征值,y是标签值
x = tf.placeholder("float", name="x")
y = tf.placeholder("float", name="y")
if __name__ == '__main__':
    x_data,y_data = prepare_data()
    # pred是预测值,前向计算
    pred = model(x, w, b)
    # 采用均方差作为损失函数
    loss_function = tf.reduce_mean(tf.square(y - pred))
    # 梯度下降优化器
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)
    train(x_data,y_data,sess,optimizer,loss_function)

    infer(3.21)


波士顿房价预测

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
import tensorflow.compat.v1 as tf

def prepare_data():
    # 读取数据文件
    df = pd.read_csv("data/boston.csv", header=0)
    # 获取df的值
    df = df.values
    # 把 df 转换为 np 的数组格式
    df = np.array(df)
    # 对特征数据 【0到11】列 做(0-1)归一化
    for i in range(12):
        df[:, i] = (df[:, i] - df[:, i].min()) / (df[:, i].max() - df[:, i].min())
    # x_data 为 归一化后的前12列特征数据
    x_data = df[:, :12]
    # y_data 为最后1列标签数据
    y_data = df[:, 12]

    return x_data,y_data

def model(x, w, b):
    return tf.multiply(x, w) + b

def train():
    loss_list = []  # 用于保存loss值的列表
    for epoch in range(train_epochs):
        loss_sum = 0.0
        for xs, ys in zip(x_data, y_data):
            xs = xs.reshape(1, 12)
            ys = ys.reshape(1, 1)
            _,  loss = sess.run([optimizer, loss_function], feed_dict={x: xs, y: ys})

            # writer.add_summary(summary_str, epoch)
            loss_sum = loss_sum + loss
        # 打乱数据顺序,避免过拟合
        xvalues, yvalues = shuffle(x_data, y_data)

        b0temp = b.eval(session=sess)
        w0temp = w.eval(session=sess)
        loss_average = loss_sum / len(y_data)
        loss_list.append(loss_average)  # 每轮添加一次
        print("epoch=", epoch + 1, "loss=", loss_average, "b=", b0temp, "w=", w0temp)
    plt.plot(loss_list)
    plt.show()

def infer(n):
    x_test = x_data[n]

    x_test = x_test.reshape(1, 12)
    predict = sess.run(pred, feed_dict={x: x_test})
    print("预测值:%f" % predict)

    target = y_data[n]
    print("标签值:%f" % target)

tf.compat.v1.disable_eager_execution()

if __name__ == '__main__':
    x_data,y_data = prepare_data()

# 定义了一个命名空间
with tf.name_scope("Model"):
    x = tf.placeholder(tf.float32, [None, 12], name="X")  # 12个特征数据(12列)
    y = tf.placeholder(tf.float32, [None, 1], name="Y")  # 1个标签数据(1列)
    # w 初始化值为shape=(12,1)的随机数
    w = tf.Variable(tf.random_normal([12, 1], stddev=0.01), name="W")

    # b 初始化值为 1.0
    b = tf.Variable(1.0, name="b")
    # w和x是矩阵相乘,用matmul,不能用mutiply或者*
    def model(x, w, b):
        return tf.matmul(x, w) + b
    # 预测计算操作,前向计算节点
    pred = model(x, w, b)

    # 迭代轮次
    train_epochs = 50
    # 学习率
    learning_rate = 0.01
    # 定义损失函数
    loss_function = tf.reduce_mean(tf.pow(y-pred, 2)) #均方误差
    # 创建优化器
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
    sess = tf.Session()
    # 定义初始化变量的操作
    init = tf.global_variables_initializer()
    sess.run(init)

    train()
    infer(348)

手写体数字识别

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
import tensorflow.compat.v1 as tf
import input_data as input_data
import matplotlib.pyplot as plt
import numpy as np
def train():
    # 开始训练
    for epoch in range(train_epochs):
        for batch in range(total_batch):
            xs, ys = mnist.train.next_batch(batch_size)  # 读取批次数据
            sess.run(optimizer, feed_dict={x: xs, y: ys})  # 执行批次训练

        # total_batch个批次训练完成后,使用验证数据计算误差与准确率;验证集没有分批
        loss, acc = sess.run([loss_function, accuracy],
                             feed_dict={x: mnist.validation.images, y: mnist.validation.labels})
        # 打印训练过程中的详细信息
        if (epoch + 1) % display_step == 0:
            print("Train Epoch:", '%02d' % (epoch + 1), "Loss=", "{:.9f}".format(loss), \
                  " Accuracy=", "{:.4f}".format(acc))

    print("Train Finished!")

def accu(sess):
    # 测试集
    accu_test = sess.run(accuracy,
                         feed_dict={x: mnist.test.images, y: mnist.test.labels})

    print("Test Accuracy:", accu_test)

    # 验证集
    accu_validation = sess.run(accuracy,
                               feed_dict={x: mnist.validation.images, y: mnist.validation.labels})

    print("Test Accuracy:", accu_validation)

    # 训练集
    accu_train = sess.run(accuracy,
                          feed_dict={x: mnist.train.images, y: mnist.train.labels})

    print("Test Accuracy:", accu_train)

tf.compat.v1.disable_eager_execution()


def plot_image(image):
    plt.imshow(image.reshape(28, 28), cmap='binary')
    plt.show()

def plot_images_labels_prediction(images,  # 图像列表
                                  labels,  # 标签列表
                                  prediction,  # 预测值列表
                                  index,  # 从第index个开始显示
                                  num=10):  # 缺省一次显示 10 幅
    fig = plt.gcf()  # 获取当前图表,Get Current Figure
    fig.set_size_inches(10, 12)  # 1英寸等于 2.54 cm
    if num > 25:
        num = 25  # 最多显示25个子图
    for i in range(0, num):
        ax = plt.subplot(5, 5, i + 1)  # 获取当前要处理的子图

        ax.imshow(np.reshape(images[index], (28, 28)),  # 显示第index个图像
                  cmap='binary')

        title = "label=" + str(np.argmax(labels[index]))  # 构建该图上要显示的title信息
        if len(prediction) > 0:
            title += ",predict=" + str(prediction[index])

        ax.set_title(title, fontsize=10)  # 显示图上的title信息
        ax.set_xticks([]);  # 不显示坐标轴
        ax.set_yticks([])
        index += 1
    plt.show()

if __name__ == '__main__':
    # mnist导入 :https: // blog.csdn.net / Skymelu / article / details / 105567348
    mnist = input_data.read_data_sets("data/mnist/", one_hot=True)

    # 定义x和y的占位符
    # mnist 中每张图片共有28*28=784个像素点
    x = tf.placeholder(tf.float32, [None, 784], name="X")
    # 0-9 一共10个数字=> 10 个类别
    y = tf.placeholder(tf.float32, [None, 10], name="Y")
    # 定义变量
    W = tf.Variable(tf.random_normal([784, 10]),name="W")
    b = tf.Variable(tf.zeros([10]),name="b")

    forward=tf.matmul(x, W) + b # 前向计算
    pred = tf.nn.softmax(forward) # Softmax分类

    # 设置训练参数
    train_epochs = 50  # 训练轮数
    batch_size = 100  # 单次训练样本数(批次大小)
    total_batch = int(mnist.train.num_examples / batch_size)  # 一轮训练有多少批次
    display_step = 1  # 显示粒度
    learning_rate = 0.01  # 学习率

    # 定义交叉熵损失函数
    loss_function = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred),reduction_indices=1))

    # 梯度下降优化器
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)

    # 定义准确率
    # 检查预测类别tf.argmax(pred, 1)与实际类别tf.argmax(y, 1)的匹配情况
    # argmax取出最大值的下标,1代表第二维————列,表示求最大值的列下标
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    # 准确率,将布尔值转化为浮点数,并计算平均值
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    sess = tf.Session()  # 声明会话
    init = tf.global_variables_initializer()  # 变量初始化
    sess.run(init)

    train()
    accu(sess)

    # 由于pred预测结果是one-hot编码格式,所以需要转换为0~9数字
    prediction_result = sess.run(tf.argmax(pred, 1),
                                 feed_dict={x: mnist.test.images})
    plot_images_labels_prediction(mnist.test.images,mnist.test.labels,prediction_result, 10, 25)

CIFAR数据集预测

import urllib.request
import tarfile
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
import os
import numpy as np
import pickle as p
from time import time

def load_CIFAR_batch(filename):
    """ load single batch of cifar """
    with open(filename, 'rb') as f:
        # 一个样本由标签和图像数据组成
        # <1 x label><3072 x pixel> (3072=32x32x3)
        # ...
        # <1 x label><3072 x pixel>
        data_dict = p.load(f, encoding='bytes')
        images = data_dict[b'data']
        labels = data_dict[b'labels']

        # 把原始数据结构调整为: BCWH
        images = images.reshape(10000, 3, 32, 32)
        # tensorflow处理图像数据的结构:BWHC
        # 把通道数据C移动到最后一个维度
        images = images.transpose(0, 2, 3, 1)

        labels = np.array(labels)

        return images, labels


def load_CIFAR_data(data_dir):
    """load CIFAR data"""

    images_train = []
    labels_train = []
    for i in range(5):
        f = os.path.join(data_dir, 'data_batch_%d' % (i + 1))
        print('loading ', f)
        # 调用 load_CIFAR_batch( )获得批量的图像及其对应的标签
        image_batch, label_batch = load_CIFAR_batch(f)
        images_train.append(image_batch)
        labels_train.append(label_batch)
        Xtrain = np.concatenate(images_train)
        Ytrain = np.concatenate(labels_train)
        del image_batch, label_batch

    Xtest, Ytest = load_CIFAR_batch(os.path.join(data_dir, 'test_batch'))
    print('finished loadding CIFAR-10 data')

    # 返回训练集的图像和标签,测试集的图像和标签
    return Xtrain, Ytrain, Xtest, Ytest

def plot_images_labels_prediction(images, labels, prediction, idx, num=10):
    fig = plt.gcf()
    fig.set_size_inches(12, 6)
    if num > 10:
        num = 10
    for i in range(0, num):
        ax = plt.subplot(2, 5, 1 + i)
        ax.imshow(images[idx], cmap='binary')

        title = str(i) + ',' + label_dict[labels[idx]]
        if len(prediction) > 0:
            title += '=>' + label_dict[prediction[idx]]

        ax.set_title(title, fontsize=10)

        idx += 1
    plt.show()

# 定义权值
def weight(shape):
    # 在构建模型时,需要使用tf.Variable来创建一个变量
    # 在训练时,这个变量不断更新
    # 使用函数tf.truncated_normal(截断的正态分布)生成标准差为0.1的随机数来初始化权值
    return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name ='W')

# 定义偏置
# 初始化为0.1
def bias(shape):
    return tf.Variable(tf.constant(0.1, shape=shape), name = 'b')

# 定义卷积操作
# 步长为1,padding为'SAME'
def conv2d(x, W):
    # tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)
    return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')

# 定义池化操作
# 步长为2,即原尺寸的长和宽各除以2
def max_pool_2x2(x):
    # tf.nn.max_pool(value, ksize, strides, padding, name=None)
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

data_dir = 'data/cifar-10-batches-py/'
Xtrain, Ytrain, Xtest, Ytest = load_CIFAR_data(data_dir)
# 定义标签字典,每一个数字所代表的图像类别的名称
label_dict = {0: "airplane", 1: "automobile", 2: "bird", 3: "cat", 4: "deer",
              5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "truck"}

# 定义显示图像数据及其对应标签的函数

# 显示图像数据及其对应标签
plot_images_labels_prediction(Xtest, Ytest, [], 1, 10)

tf.compat.v1.disable_eager_execution()

if __name__ == '__main__':
    # 下载
    url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
    filepath = 'data/cifar-10-python.tar.gz'
    if not os.path.isfile(filepath):
        result = urllib.request.urlretrieve(url, filepath)
        print('downloaded:', result)
    else:
        print('Data file already exists.')

    # 解压
    if not os.path.exists("data/cifar-10-batches-py"):
        tfile = tarfile.open("data/cifar-10-python.tar.gz", 'r:gz')
        result = tfile.extractall('data/')
        print('Extracted to ./data/cifar-10-batches-py/')
    else:
        print('Directory already exists.')

    print('training data shape:', Xtrain.shape)
    print('training labels shape:', Ytrain.shape)
    print('test data shape:', Xtest.shape)
    print('test labels shape:', Ytest.shape)

    # 查看图像数据信息
    # 显示第一个图的第一个像素点
    Xtrain[0][0][0]

    # 将图像进行数字标准化
    Xtrain_normalize = Xtrain.astype('float32') / 255.0
    Xtest_normalize = Xtest.astype('float32') / 255.0

    # 查看预处理后图像数据信息
    Xtrain_normalize[0][0][0]

    # 查看标签数据
    Ytrain[:10]

    #  独热编码
    from sklearn.preprocessing import OneHotEncoder

    encoder = OneHotEncoder(sparse=False)

    yy = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]
    encoder.fit(yy)
    Ytrain_reshape = Ytrain.reshape(-1, 1)
    Ytrain_onehot = encoder.transform(Ytrain_reshape)
    Ytest_reshape = Ytest.reshape(-1, 1)
    Ytest_onehot = encoder.transform(Ytest_reshape)

    # 显示编码后的情况
    Ytrain_onehot.shape
    Ytrain[:5]
    Ytrain_onehot[:5]

    tf.reset_default_graph()

    # 定义网络
    # 输入层
    # 32x32图像,通道为3(RGB)
    with tf.name_scope('input_layer'):
        x = tf.placeholder('float', shape=[None, 32, 32, 3], name="x")

        # 第1个卷积层
    # 输入通道:3,输出通道:32,卷积后图像尺寸不变,依然是32x32
    with tf.name_scope('conv_1'):
        W1 = weight([3, 3, 3, 32])  # [k_width, k_height, input_chn, output_chn]
        b1 = bias([32])  # 与output_chn 一致
        conv_1 = conv2d(x, W1) + b1
        conv_1 = tf.nn.relu(conv_1)

    # 第1个池化层
    # 将32x32图像缩小为16x16,池化不改变通道数量,因此依然是32个
    with tf.name_scope('pool_1'):
        pool_1 = max_pool_2x2(conv_1)

    # 第2个卷积层
    # 输入通道:32,输出通道:64,卷积后图像尺寸不变,依然是16x16
    with tf.name_scope('conv_2'):
        W2 = weight([3, 3, 32, 64])
        b2 = bias([64])
        conv_2 = conv2d(pool_1, W2) + b2
        conv_2 = tf.nn.relu(conv_2)

    # 第2个池化层
    # 将16x16图像缩小为8x8,池化不改变通道数量,因此依然是64个
    with tf.name_scope('pool_2'):
        pool_2 = max_pool_2x2(conv_2)

    # 全连接层
    # 将池第2个池化层的64个8x8的图像转换为一维的向量,长度是 64*8*8=4096
    # 128个神经元
    with tf.name_scope('fc'):
        W3 = weight([4096, 128])  # 有128个神经元
        b3 = bias([128])
        flat = tf.reshape(pool_2, [-1, 4096])
        h = tf.nn.relu(tf.matmul(flat, W3) + b3)
        h_dropout = tf.nn.dropout(h, keep_prob=0.8)

    # 输出层
    # 输出层共有10个神经元,对应到0-9这10个类别
    with tf.name_scope('output_layer'):
        W4 = weight([128, 10])
        b4 = bias([10])
        pred = tf.nn.softmax(tf.matmul(h_dropout, W4) + b4)

    with tf.name_scope("optimizer"):
        # 定义占位符
        y = tf.placeholder("float", shape=[None, 10],
                           name="label")
        # 定义损失函数
        loss_function = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits
            (logits=pred,
             labels=y))
        # 选择优化器
        optimizer = tf.train.AdamOptimizer(learning_rate=0.0001) \
            .minimize(loss_function)

    with tf.name_scope("evaluation"):
        correct_prediction = tf.equal(tf.argmax(pred, 1),
                                      tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    train_epochs = 25
    batch_size = 50
    total_batch = int(len(Xtrain) / batch_size)
    epoch_list = [];
    accuracy_list = [];
    loss_list = [];

    epoch = tf.Variable(0, name='epoch', trainable=False)

    startTime = time()

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)

    # 设置检查点存储目录
    ckpt_dir = "CIFAR10_log/"
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)

    # 生成saver
    saver = tf.train.Saver(max_to_keep=1)

    # 如果有检查点文件,读取最新的检查点文件,恢复各种变量值
    ckpt = tf.train.latest_checkpoint(ckpt_dir)
    if ckpt != None:
        saver.restore(sess, ckpt)  # 加载所有的参数
        # 从这里开始就可以直接使用模型进行预测,或者接着继续训练了
    else:
        print("Training from scratch.")

    # 获取续训参数
    start = sess.run(epoch)
    print("Training starts form {} epoch.".format(start + 1))


    def get_train_batch(number, batch_size):
        return Xtrain_normalize[number * batch_size:(number + 1) * batch_size], \
            Ytrain_onehot[number * batch_size:(number + 1) * batch_size]

    # 训练
    for ep in range(start, train_epochs):
        for i in range(total_batch):
            batch_x, batch_y = get_train_batch(i, batch_size)  # 读取批次数据
            sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})  # 执行批次训练

            if i % 100 == 0:
                print("Step {}".format(i), "finished")

        # total_batch个批次训练完成后 使用验证数据计算误差与准确率
        loss, acc = sess.run([loss_function, accuracy], feed_dict={x: batch_x, y: batch_y})
        epoch_list.append(ep + 1)
        loss_list.append(loss);
        accuracy_list.append(acc)

        # 打印训练过程中的详细信息
        print("Train Epoch:", '%02d' % (sess.run(epoch) + 1), \
              "Loss = ", "{:.6f}".format(loss), "Accuracy = ", acc)

        # 保存检查点
        saver.save(sess, ckpt_dir + "CIFAR10_cnn_model.cpkt", global_step=ep + 1)
        sess.run(epoch.assign(ep + 1))

    # 显示运行总时间
    duration = time() - startTime
    print("Train Finished takes : ", duration)

    # 可视化损失值
    fig = plt.gcf()
    fig.set_size_inches(4, 2)
    plt.plot(epoch_list, loss_list, label='loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['loss'], loc='upper right')
    # 可视化准确率
    plt.plot(epoch_list, accuracy_list, label="accuracy")
    fig = plt.gcf()
    fig.set_size_inches(4, 2)
    plt.ylim(0.1, 1)
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend()
    plt.show()

    # 计算测试集准确率
    test_total_batch = int(len(Xtest_normalize) / batch_size)
    test_acc_sum = 0.0
    for i in range(test_total_batch):
        test_image_batch = Xtest_normalize[i * batch_size:(i + 1) * batch_size]
        test_label_batch = Ytest_onehot[i * batch_size:(i + 1) * batch_size]
        test_batch_acc = sess.run(accuracy, feed_dict={x: test_image_batch, y: test_label_batch})
        test_acc_sum += test_batch_acc
    test_acc = float(test_acc_sum / test_total_batch)
    print("Test accuracy:{:.6f}".format(test_acc))
    # 预测
    test_pred = sess.run(pred, feed_dict={x: Xtest_normalize[:10]})
    prediction_result = sess.run(tf.argmax(test_pred, 1))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值