tensorflow笔记2
示例:有一堆x1\x2数据以及对应的标签y(0,1),若在给给出一个x1\x2数据,请你预测出y的值
将x1和x2分别作为横纵坐标八数据可视化成数据点,标签为1的点标为红色,为0的点为蓝色,让神经网络画出一条线区分红色点和蓝色点。
思路: 新用神经网络拟合出输入特征x和输出标签y的函数关系,然后生成网格覆盖这些点,把网格的交点(横纵坐标)作为输入送入训练好的神经网络模型中,
神经网络会为每一个坐标生成一个预测值,区分输出偏向1还是0,把输出预测值为0.5的线标出颜色,这条线就是区分0和1的线了。
import tensorflow as tf
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('dot.csv')
x_data = np.array(df[['x1', 'x2']])
y_data = np.array(df['y_c'])
x_train = x_data.reshape(-1,2)
y_train = y_data.reshape(-1, 1)
Y_c = [['red' if y else 'blue'] for y in y_train]
x_train = tf.cast(x_train, tf.float32)
y_train = tf.cast(y_train, tf.float32)
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)
w1 = tf.Variable(tf.random.normal([2, 11]), dtype=tf.float32)
b1 = tf.Variable(tf.constant(0.01, shape=[11]))
w2 = tf.Variable(tf.random.normal([11, 1]), dtype=tf.float32)
b2 = tf.Variable(tf.constant(0.01, shape=[1]))
lr = 0.005
epoch = 800
for epoch in range(epoch):
for step, (x_train, y_train) in enumerate(train_db):
with tf.GradientTape() as tape:
h1 = tf.matmul(x_train, w1) + b1
h1 = tf.nn.relu(h1)
y = tf.matmul(h1, w2) + b2
loss_mse = tf.reduce_mean(tf.square(y_train - y))
loss_regularization = []
loss_regularization.append(tf.nn.l2_loss(w1))
loss_regularization.append(tf.nn.l2_loss(w2))
loss_regularization = tf.reduce_sum(loss_regularization)
loss = loss_mse + 0.03 * loss_regularization
variables = [w1, b1, w2, b2]
grads = tape.gradient(loss, variables)
w1.assign_sub(lr * grads[0])
b1.assign_sub(lr * grads[1])
w2.assign_sub(lr * grads[2])
b2.assign_sub(lr * grads[3])
if epoch % 20 == 0:
print('epoch:', epoch, 'loss:', float(loss))
print("*******predict*******")
xx, yy = np.mgrid[-3:3:.1, -3:3:0.1]
grid = np.c_[xx.ravel(), yy.ravel()]
grid = tf.cast(grid, tf.float32)
probs = []
for x_predict in grid:
h1 = tf.matmul([x_predict], w1) + b1
h1 = tf.nn.relu(h1)
y = tf.matmul(h1, w2) + b2
probs.append(y)
x1 = x_data[:, 0]
x2 = x_data[:, 1]
probs = np.array(probs).reshape(xx.shape)
plt.scatter(x1, x2, color=np.squeeze(Y_c))
plt.contour(xx, yy, probs, levels=[.5])
plt.show()
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
用6步法实现鸢尾花分类
import tensorflow as tf
from sklearn import datasets
import numpy as np
x_train = datasets.load_iris().data
y_train = datasets.load_iris().target
np.random.seed(116)
np.random.shuffle(x_train)
np.random.seed(116)
np.random.shuffle(y_train)
tf.random.set_seed(116)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(3, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())
])
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.1),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accu
model.fit(x_train, y_train, batch_size=32, epochs=500, validation_split=0.2, validation_freq=20)
每次训练送入神经网络的样本数 数据集迭代循环500次 从训练集选择20%的数据作为测试集
model.summary()
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
Sequential可以搭建出上层输出就是下层输入的顺序网络结构,针对非顺序、跳连的网络结构,我们选择用class类封装一个网络结构。
init函数准备出搭建神经网络所需要的各种积木,call函数调用搭建好的积木,实现前向传播。
class类实现鸢尾花分类
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import Model
from sklearn import datasets
import numpy as np
x_train = datasets.load_iris().data
y_train = datasets.load_iris().target
np.random.seed(116)
np.random.shuffle(x_train)
np.random.seed(116)
np.random.shuffle(y_train)
tf.random.set_seed(116)
class IrisModel(Model):
def __init__(self):
super(IrisModel, self).__init__()
self.d1 = Dense(3, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())
def call(self, x):
y = self.d1(x)
return y
model = IrisModel()
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.1),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
model.fit(x_train, y_train, batch_size=32, epochs=500, validation_split=0.2, validation_freq=20)
model.summary()