tensorflow编程实战

1:线性回归模型

下列代码中,首先生成了一组训练数据,然后定义了一个线性回归模型,并使用梯度下降优化器来最小化损失函数。在训练过程中,将训练数据逐个输入模型,并进行参数更新。最后,使用训练好的模型对测试数据进行预测,并可视化结果。

import tensorflow as tf
import numpy as np
# 生成训练数据
x_train = np.linspace(-1, 1, 100)
y_train = 2 * x_train + np.random.randn(*x_train.shape) * 0.3
# 定义模型
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.zeros([1]), name="bias")
# 构建模型
Y_pred = tf.add(tf.multiply(X, W), b)
# 定义损失函数
cost = tf.reduce_mean(tf.square(Y_pred - Y))
# 定义优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
# 初始化变量
init = tf.global_variables_initializer()
# 训练模型
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(20):
        for x, y in zip(x_train, y_train):
            sess.run(optimizer, feed_dict={X: x, Y: y})
        if (epoch + 1) % 10 == 0:
            c = sess.run(cost, feed_dict={X: x_train, Y: y_train})
            print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(c), \
                  "W=", sess.run(W), "b=", sess.run(b))
    print("Optimization Finished!")
    training_cost = sess.run(cost, feed_dict={X: x_train, Y: y_train})
    print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b))
    # 可视化结果
    import matplotlib.pyplot as plt
    plt.plot(x_train, y_train, 'ro', label='Original data')
    plt.plot(x_train, sess.run(W) * x_train + sess.run(b), label='Fitted line')
    plt.legend()
    plt.show()

在这里插入图片描述

2:实现卷积神经网络做图像分类

使用CIFAR-10数据集,该数据集包含10个类别的60000张32x32的彩色图像。我们使用了Conv2D层和MaxPooling2D层来构建卷积神经网络,以及Flatten层和全连接层来进行分类。我们使用SparseCategoricalCrossentropy损失函数,这个损失函数适用于有多个类别的分类问题。我们使用fit函数训练模型,并使用evaluate函数在测试集上评估模型的准确性

import tensorflow as tf
from tensorflow.keras import datasets, layers, models
# 加载数据集
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# 将像素值缩放到0-1之间
train_images, test_images = train_images / 255.0, test_images / 255.0
# 定义模型
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
# 编译模型
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
# 训练模型
history = model.fit(train_images, train_labels, epochs=10, 
                    validation_data=(test_images, test_labels))
# 评估模型
test_loss, test_acc = model.evaluate(test_images,  test_labels, verbose=2)
print(test_acc)

在这里插入图片描述

3:LSTM实现情感分类

使用IMDB数据集,该数据集包含50000个电影评论,其中每个评论都被标记为正面或负面。使用LSTM模型,处理每个评论的词向量序列,并根据评论的情感进行分类。使用train_test_split函数将数据集分为训练集和测试集,并使用fit函数训练模型。最后,对新的评论进行情感分类,并输出分类结果。在程序运行时,可以看到分类结果,
如果新评论是正面的,程序会输出

This is a positive review.

否则输出

This is a negative review.
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.layers import Embedding, LSTM, Dense
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
# 加载IMDB数据集
max_features = 10000
maxlen = 500
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
# 构建LSTM模型
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# 训练LSTM模型
history = model.fit(x_train, y_train, batch_size=64, epochs=5, validation_data=(x_test, y_test))
# 可视化训练过程
plt.plot(history.history['acc'], label='train_acc')
plt.plot(history.history['val_acc'], label='val_acc')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# 对新数据进行分类
new_review = "This movie is terrible and boring"
new_review = [imdb.get_word_index()[word]+3 for word in new_review.split() if word in imdb.get_word_index()]
new_review = sequence.pad_sequences([new_review], maxlen=maxlen)
prediction = model.predict(new_review)[0][0]
# 输出情感分类结果
if prediction >= 0.5:
    print("This is a positive review.")
else:
    print("This is a negative review.")

在这里插入图片描述

4:用GAN生成手写数字

使用MNIST数据集,该数据集包含手写数字图像。所以使用GAN模型,生成手写数字图像。定义生成器模型、判别器模型和GAN模型,并使用Adam优化器和二元交叉熵损失函数

import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten
from tensorflow.keras.layers import BatchNormalization, Activation, Conv2DTranspose, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
import numpy as np
import matplotlib.pyplot as plt
# 定义生成器模型
def build_generator(z_dim):
    model = Sequential()
    model.add(Dense(256, input_dim=z_dim))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dense(512))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dense(1024))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dense(28*28, activation='tanh'))
    model.add(Reshape((28, 28, 1)))
    return model
# 定义判别器模型
def build_discriminator(img_shape):
    model = Sequential()
    model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=img_shape, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, kernel_size=3, strides=2, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Conv2D(128, kernel_size=3, strides=2, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    return model
# 定义GAN模型
def build_gan(generator, discriminator):
    discriminator.trainable = False
    model = Sequential()
    model.add(generator)
    model.add(discriminator)
    return model
# 加载MNIST数据集
(x_train, y_train), (_, _) = mnist.load_data()
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
x_train = np.expand_dims(x_train, axis=3)
# 定义超参数
z_dim = 100
img_shape = (28, 28, 1)
# 构建生成器、判别器和GAN模型
generator = build_generator(z_dim)
discriminator = build_discriminator(img_shape)
gan = build_gan(generator, discriminator)
# 编译判别器和GAN模型
discriminator.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
gan.compile(loss='binary_crossentropy', optimizer=Adam())
# 训练GAN模型
epochs = 10000
batch_size = 128
sample_interval = 1000
for epoch in range(epochs):
    # 随机选择一批真实图像
    idx = np.random.randint(0, x_train.shape[0], batch_size)
    real_imgs = x_train[idx]
    # 生成一批随机噪声
    noise = np.random.normal(0, 1, (batch_size, z_dim))
    # 生成一批假图像
    fake_imgs = generator.predict(noise)
    # 训练判别器
    d_loss_real = discriminator.train_on_batch(real_imgs, np.ones((batch_size, 1)))
    d_loss_fake = discriminator.train_on_batch(fake_imgs, np.zeros((batch_size, 1)))
    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
    # 训练生成器
    g_loss = gan.train_on_batch(noise, np.ones((batch_size, 1)))
    # 每sample_interval个epoch保存一次生成器生成的图像
    if epoch % sample_interval == 0:
        print('Epoch %d, Discriminator Loss: %f, Generator Loss: %f' % (epoch, d_loss[0], g_loss))
        r, c = 5, 5
        noise = np.random.normal(0, 1, (r * c, z_dim))
        gen_imgs = generator.predict(noise)
        gen_imgs = 0.5 * gen_imgs + 0.5
        fig, axs = plt.subplots(r, c)
        cnt = 0
        for i in range(r):
            for j in range(c):
                axs[i,j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
                axs[i,j].axis('off')
                cnt += 1
        plt.show()

在这里插入图片描述

5:实现自编码器

使用MNIST数据集,该数据集包含手写数字图像。使用自编码器模型,对手写数字图像进行压缩和重建。定义输入层、编码层、解码层和输出层,并使用Adadelta优化器和二元交叉熵损失函数编译自编码器模型。使用训练集进行训练,并在测试集上评估模型性能。最后,使用自编码器模型重建测试集图像,并可视化原始图像和重建的图像。

import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
# 加载MNIST数据集
(x_train, _), (x_test, _) = mnist.load_data()
# 数据预处理
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), np.prod(x_train.shape[1:])))
x_test = np.reshape(x_test, (len(x_test), np.prod(x_test.shape[1:])))
# 定义输入层和编码层
input_img = Input(shape=(784,))
encoded = Dense(128, activation='relu')(input_img)
# 定义解码层和输出层
decoded = Dense(784, activation='sigmoid')(encoded)
# 定义自编码器模型
autoencoder = Model(input_img, decoded)
# 定义编码器模型
encoder = Model(input_img, encoded)
# 定义解码器模型
encoded_input = Input(shape=(128,))
decoder_layer = autoencoder.layers[-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))
# 编译自编码器模型
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# 训练自编码器模型
autoencoder.fit(x_train, x_train,
                epochs=50,
                batch_size=256,
                shuffle=True,
                validation_data=(x_test, x_test))
# 使用自编码器模型重建测试集图像
decoded_imgs = autoencoder.predict(x_test)
# 可视化重建的图像
n = 10  # 可视化的图像数量
plt.figure(figsize=(20, 4))
for i in range(n):
    # 显示原始图像
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_test[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    # 显示重建的图像
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

在这里插入图片描述

  • 2
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值