1 自定义前向传播和训练
import tensorflow as tf
from sklearn import datasets
from matplotlib import pyplot as plt
import numpy as np
x_data = datasets.load_iris().data
y_data = datasets.load_iris().target
np.random.seed(116)
np.random.shuffle(x_data)
np.random.seed(116)
np.random.shuffle(y_data)
tf.random.set_seed(116)
x_train = x_data[:-30]
y_train = y_data[:-30]
x_test = x_data[-30:]
y_test = y_data[-30:]
x_train = tf.cast(x_train,tf.float32)
x_test = tf.cast(x_test,tf.float32)
train_bd = tf.data.Dataset.from_tensor_slices((x_train,y_train)).batch(32)
test_bd = tf.data.Dataset.from_tensor_slices((x_test,y_test)).batch(32)
w1 = tf.Variable(tf.random.truncated_normal([4,3],stddev=0.1))
b1 = tf.Variable(tf.random.truncated_normal([3],stddev=0.1))
lr = 0.1
train_loss_results = []
test_acc = []
epoch = 500
loss_all = 0
for epoch in range(epoch):
for step,(x_train,y_train) in enumerate(train_bd):
with tf.GradientTape() as tape:
y = tf.matmul(x_train,w1) + b1
y = tf.nn.softmax(y)
y_ = tf.one_hot(y_train,depth=3)
loss = tf.reduce_mean(tf.square(y_-y))
loss_all += loss.numpy()
grads = tape.gradient(loss,[w1,b1])
w1.assign_sub(lr*grads[0])
b1.assign_sub(lr*grads[1])
print("epoch{},loss:{}".format(epoch,loss_all/4))
train_loss_results.append(loss_all/4)
loss_all = 0
total_corrent,total_number = 0,0
for x_test,y_test in test_bd:
y = tf.matmul(x_test,w1) + b1
y = tf.nn.softmax(y)
pred = tf.argmax(y,axis=1)
pred = tf.cast(pred,dtype=y_test.dtype)
corrent =tf.cast(tf.equal(pred,y_test),dtype=tf.int32)
corrent = tf.reduce_sum(corrent)
total_corrent +=int(corrent)
total_number += x_test.shape[0]
acc = total_corrent/total_number
test_acc.append(acc)
print("Teat_acc:",acc)
print("…………………………………………………………")
plt.title("损失曲线")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.plot(train_loss_results,label="$loss$")
plt.legend()
plt.show()
plt.title("准确率")
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.plot(test_acc,label="$Accuracy$")
plt.legend()
plt.show()
2 tf.keras.models.Sequential
import tensorflow as tf
from sklearn import datasets
import numpy as np
x_train = datasets.load_iris().data
y_train = datasets.load_iris().target
np.random.seed(116)
np.random.shuffle(x_train)
np.random.seed(116)
np.random.shuffle(y_train)
tf.random.set_seed(116)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(10,activation=tf.nn.relu),
tf.keras.layers.Dense(3,activation='softmax',kernel_regularizer=tf.keras.regularizers.l2())
])
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.1),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
model.fit(x_train,y_train,batch_size=32,epochs=500,validation_split=0.2,validation_freq=20)
model.summary()
3 class 方法
import tensorflow as tf
from sklearn import datasets
import numpy as np
from tensorflow.keras.layers import Dense
from tensorflow.keras import Model
x_train = datasets.load_iris().data
y_train = datasets.load_iris().target
np.random.seed(116)
np.random.shuffle(x_train)
np.random.seed(116)
np.random.shuffle(y_train)
tf.random.set_seed(116)
class IrisModel(Model):
def __init__(self):
super(IrisModel,self).__init__()
self.d1 = tf.keras.layers.Dense(10,activation="relu")
self.d2 = Dense(3,activation="sigmoid",kernel_regularizer=tf.keras.regularizers.l2())
def call(self, x):
y1 = self.d1(x)
y = self.d2(y1)
return y
model = IrisModel()
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.1),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=["sparse_categorical_accuracy"])
model.fit(x_train,y_train,batch_size=32,epochs=500,validation_split=0.2,validation_freq=20)
model.summary()