目标: 梯度累加, 实现"显存扩大".
error:
1. 梯度累积容易out of memory
2. 梯度累积+ 梯度裁剪容易 out of memory
3. 梯度累积完, 更新时, 震荡幅度很大, 对模型影响很大
未解决:
1. 对每次累积的梯度做处理
# 定义
def gradients_add(self,optimizer,loss,var_list):
'''
:param optimizer: 优化器
:param loss: 损失
:param var_list: 参数
:return:
'''
with tf.name_scope('gradient'):
gradient_all = optimizer.compute_gradients(loss,var_list=var_list) # gradient of network (with NoneType)
grads_vars = [v for (g, v) in gradient_all if g is not None] # all variable that has gradients
gradient = optimizer.compute_gradients(loss, grads_vars) # gradient of network (without NoneType)
grads_holder = [(tf.placeholder(tf.float32, shape=g.get_shape()), v)
for (g, v) in gradient]
train_op = optimizer.apply_gradients(grads_holder)
return train_op,gradient,grads_holder
# 使用
grads_sum = []
for i in epoch:
grads_sum.append(ses.run(gradient))
if grads_flag% 10 == 0 and grads_flag != 0 : # 梯度累积10
grads_sum = {}
for i in range(len(grads_holder)):
k = grads_holder[i][0]
# tf.clip_by_global_norm(grads,5) # 梯度裁剪
grads_sum[k] = sum([g[i][0] for g in grads])
_ = self.ses.run(train_op, feed_dict=grads_sum)
grads = []