Tensorflow与神经网络

数据加载
from tensorflow.keras import datasets
# x : [60000, 28, 28]
# y : [60000]
(x, y), (x_val, y_val) = datasets.mnist.load_data() 
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)
y = tf.one_hot(y, depth=10)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
train_dataset.shuffle(60000),batch(100)

# 换一种写法
def preprocess(x,y):
    x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
	y = tf.convert_to_tensor(y, dtype=tf.int32)
	y = tf.one_hot(y, depth=10)
    return x,y
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
train_dataset = train_dataset.map(preprocess).batch(32)
# dataset其他功能
# train_dataset.shuffle(n):打散,n为数据集大小
# train_dataset.batch(200):设置batch为200
res = next(iter(train_dataset))
res[0].shape, res[1].shape  # [32,32,32,3],[32,1,10]

# 正常情况下:
db = tf.data.Dataset.from_tensor_slices((x,y))
db = db.map(preprocess).shuffle(10000).batch(batchsz)
for epoch in range(epochs):
	for step, (x,y) in enumerate(db):
全连接层
net = tf.keras.layers.Dense(512)
x = tf.random.normal([4,784])		# [4,784]
out = net(x)						# [4,512]
net.kernel.shape					# [784,512]
net.bias.shape						# [512]

net.build(input_shape=(2,4))		# [4,512]

x = tf.random.normal([2,3])
model = keras.Sequential([
	keras.layers.Dense(2,activation='relu'),
    keras.layers.Dense(2,activation='relu'),
    keras.layers.Dense(2)
])
model.build(input_shape=[None,3])
误差计算
# MSE
tf.reduce_mean(tf.losses.MSE(out,y))
# CrossEntropy
tf.losses.categorical_crossentropy([0,1,0,0],[0.25,0.25,0.25,0.25])
tf.losses.binary_crossentropy([1],[0.1])	# 单输出二分类
# 注意:1. GT 必须是one-hot encoding
#	   2. from_logits=True
tf.losses.categorical_crossentropy(tf.one_hot(y,depth=3),logits,from_logits=True)
梯度下降
w = tf.constant(1.)
x = tf.constant(2.)
with tf.GradientTape() as tape:		# 跟踪梯度环境
    tape.watch([w])
    y=x*w
grad = tape.gradient(y,[w])			# y 对 w 求导

with tf.GradientTape(persistent=True) as tape: # 可以求多次梯度
损失函数梯度
# MSE gradient
with tf.GradientTape() as tape:
    tape.watch([w,b])
    prob = tf.nn.softmax(x@w+b,axis=1)
    loss = tf.reduce_mean(tf.losses.MSE(tf.one_hot(y,depth=3),prob))
grads = tape.gradient(y,[w,b])	
可视化
  • Tensorboard——tensorflow
  • Visdom——pytorch
# pip install tensorboard
# 1. 进入当前目录,运行:tensorboard --logdir logs  , 会生成一个端口号比如8080
# 2. 打开网页 localhost:8080
# 3. 新建summary
current_time = datatime.datatime.now().shrftime("%Y%m%d-%H%M%S")
log_dir = 'logs/' + current_time
summary_writer = tf.summary.create_file_writer(log_dir)
# 标量
with summary_writer.as_default():
    tf.summary.scalar('loss',float(loss),step=epoch)
    tf.summary.scalar('accuracy',float(train_accuracy),step=epoch)
# 图片
with summary_writer.as_default():
    tf.summary.image("sample:",sample_img,step=0)
# 多张图片
sample_imgs = tf.reshape(sample_imgs, [-1,28,28,1])
with summary_writer.as_default():
    tf.summary.image("samples:",sample_imgs,max_output=25,step=0)
# 多张图片联合显示
figure = image_grid(sample_imgs)
tf.summary.image("samples:",plot_to_image(figure),step=0)
keras 高阶API
  • dataset
  • layers
  • losses
  • metrics
  • optimizers
# metrics
acc_meter = metrics.Accuracy()
loss_meter = metrics.Mean()
acc_meter.update_state(y, pred)
loss_meter.update_state(loss)
print('loss:',loss_meter.result().numpy())
# 清除缓存
acc_meter.reset_states()
# compile fit evaluate predict
############################ 训练
# 指定优化器,loss, acc过程
network.compile(optimizer = optimizers.Adam(lr=0.01), loss=tf.losses.CategoricalCrossentropy(from_logits=True))
# 指定epoch
network.fit(db,epochs=10)
############################ 测试
network.compile(optimizer = optimizers.Adam(lr=0.01), loss=tf.losses.CategoricalCrossentropy(from_logits=True),metrics=['accuracy'])
# 每两个epoch进行一次validation,db_val是验证集,db_test是测试集
network.fit(db,epochs=10,validation_data=db_val,validation_freq=2)
network.evaluate(db_test)

# metwork.predict(x) <=> network(x)
自定义网络层
  • keras.Sequential
  • 自定义层继承自keras.layers.Layer,自定义模型继承自keras.Model
# keras.Sequential
network = Sequential([layers.Dense(256,activation='relu'),
                      layers.Dense(64, activation='relu'),
                      layers.Dense(10)])
network.build(input_shape=(None, 28*28))
network.summary()
# 所有可训练的参数:model.trainable_variables
# model(x) -> 调用model.__call__(x) -> 调用我们自己实现的call方法
# 自定义model,要实现__init__方法和call方法,且必须继承自keras.Model

class myDense(layers.Layer):
    def __init__(self,in_dim,out_dim):
        super(MyDense, self).__init__()
        self.kernel = self.add_variable('w',[in_dim,out_dim])
        self.bias = self.add_variable('b',[out_dim])
    def call(self,inputs,training=None):
        out = inputs @ self.kernel + self.bias
        return out

class MyModel(keras.Model):
    def __init__(self):
        super(MyModel, self).__init__()
        self.fc1 = MyDense(28*28,256)
        self.fc2 = MyDense(256,64)
        self.fc3 = MyDense(64,10)
    def call(self, inputs, training=None):
		x = self.fc1(inputs)
        x = tf.nn.relu(x)
        x = self.fc2(inputs)
        x = tf.nn.relu(x)
        x = self.fc3(inputs)
        return x 
模型的加载与保存
# save_weights
network = Sequential() network.compile() 
network.fit() 
network.evaluate()

model.save_weights('./weights.ckpt')
del network

network = Sequential() network.compile()
network.load_weights('./weights.ckpt')
network.evaluate()
# save model
network.save('model.h5')
network = tf.keras.models.load_model('model.h5')
交叉验证
for epoch in range(epochs):
    idx = tf.range(60000)
    idx = tf.random.shuffle(idx)
    x_train,y_train = tf.gather(x,idx[:50000]),tf.gather(y,idx[:50000])
    x_val,y_val = tf.gather(x,idx[-10000:]),tf.gather(y,idx[-10000:])
    
network.fit(db,epochs=6,validation_split=0.1,validation_freq=2)
其他知识
  • 正则化
  • 动量与学习率
  • Dropout
# 正则化
keras.layers.Dense(16,kernel_regularizer=keras.regularizers.l2(0.001),activation=tf,nn,relu)
# flexible regularization
loss = ~
loss_regularization = []
for p in network.trainable_variables:
    loss_regularization.append(tf.nn.l2_loss(p))
loss_regularization = tf.reduce_sum(tf.stack(loss_regularization))
loss = loss + 0.0001 * loss_regularization

# 动量
optimizer = SGD(learning_rate=0.02,momentum=0.9)

# 学习率衰减
optimizer = SGD(learning_rate=0.1)
for epoch in range(100):
    optimizer.learning_rate = 0.1 * (100 - epoch) / 100
    
# Dropout
layers.Dense(256, activation='relu')
layers.Dropout(0.3)	# 0.3 rate to drop
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值