import tensorflow as tf
if __name__ == '__main__':
v = tf.Variable(0.0)#变量<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=0.0>
v.assign(5)#<tf.Variable 'UnreadVariable' shape=() dtype=float32, numpy=5.0>
v.assign_add(1)#<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=6.0>
v.read_value()#tf.Tensor(6.0, shape=(), dtype=float32)取值
#微分
w = tf.Variable([[1.0]])
with tf.GradientTape() as t:#自动记录运算过程,
loss = w*w
grad = t.gradient(loss,w)#tf.Tensor([[2.]], shape=(1, 1), dtype=float32).微分
w = tf.constant(3.0)#常量
with tf.GradientTape() as t: # 自动记录运算过程
t.watch(w)
loss = w * w
grad = t.gradient(loss, w)#tf.Tensor(6.0, shape=(), dtype=float32)
w = tf.constant(3.0) # 常量
with tf.GradientTape(persistent=True) as t: # 自动记录运算过程,True永久记录,可以多次进行微分
t.watch(w)
y = w * w
z= y * y
dy_dw = t.gradient(y, w)#tf.Tensor(6.0, shape=(), dtype=float32)
dz_dw = t.gradient(z,w)#tf.Tensor(108.0, shape=(), dtype=float32)
(train_image,train_labels),_= tf.keras.datasets.mnist.load_data()
train_image = tf.expand_dims(train_image,-1)
train_image = tf.cast(train_image/255,tf.float32)
train_labels = tf.cast(train_labels,tf.int64)
dataset = tf.data.Dataset.from_tensor_slices(
(train_image,train_labels)
)
dataset = dataset.shuffle(10000).repeat().batch(32)
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(16,[3,3],activation='relu',input_shape=(28,28,1)),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(16, [3, 3], activation='relu'),
tf.keras.layers.GlobalMaxPooling2D(),
tf.keras.layers.Dense(10)
]
)
#自定义模型
optimizer = tf.keras.optimizers.Adam()
loss_func = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
features,labels = next(iter(dataset))#TensorShape([32, 28, 28, 1]),TensorShape([32])
def loss(model,x,y):
y_ = model(x)#预测
return loss_func(y,y_)
def train_one_sample(model,images,labels):
with tf.GradientTape() as t:
loss_step = loss(model,images,labels)
grads = t.gradient(loss_step,model.trainable_variables)
optimizer.apply_gradients(zip(grads,model.trainable_variables))
def train():
for epoch in range(5):
for (batch,(images,labels)) in enumerate(dataset):
train_one_sample(model,images,labels)
print('Epoch{} is finished'.format(epoch))
train()
tf.keras.metrics汇总计算模块
#tf.keras.metrics 汇总计算模块
m = tf.keras.metrics.Mean()
m([10,20,30,40])
print(m.result().numpy())#25.0
m.reset_states()#重置
#a = tf.keras.metrics.sparse_categorical_accuracy('acc')#'acc'起的名称
train_loss = tf.keras.metrics['train_loss']
train_accuracy = tf.keras.metrics.SparseCategoricalCrossentropy['train_accuracy']
def train_one_sample(model,images,labels):
with tf.GradientTape() as t:
pred = model(images)
loss_step = loss(model,images,labels)
grads = t.gradient(loss_step,model.trainable_variables)
optimizer.apply_gradients(zip(grads,model.trainable_variables))
train_loss(loss_step)
train_accuracy(labels,pred)
def train():
for epoch in range(5):
for (batch,(images,labels)) in enumerate(dataset):
train_one_sample(model,images,labels)
print('Epoch{} loss is {} accuracy is {}'.format(epoch,
train_loss.result(),
train_accuracy.result()))