# Tensorflow语法:
out = tf.clip_by_value(input, minvalue, maxvalue)# Pytorch语法:
out = torch.clamp(input,min,max, out=None)
计算输出相对于输入的梯度之和
# Tensorflow语法:
grad = tf.gradients(ys, xs)[0]# Pytorch语法:
grad = torch.autograd.grad(ys, xs)[0]
禁用梯度计算的上下文管理器
# Tensorflow语法:暂时停止在此磁带上记录操作。在此上下文管理器处于活动状态时执行的操作将不会记录在磁带上。这对于减少跟踪所有计算所使用的内存非常有用。
x = tf.constant(4.0)with tf.GradientTape()as tape:with tape.stop_recording():
y = x **2
dy_dx = tape.gradient(y, x)print(dy_dx)None# Pytorch语法:>>> x = torch.tensor([1.], requires_grad=True)>>>with torch.no_grad():... y = x *2>>> y.requires_grad
False>>> @torch.no_grad()...defdoubler(x):...return x *2>>> z = doubler(x)>>> z.requires_grad
False
# Tensorflow语法:
tf.eye(num_rows)# Construct one identity matrix.
tf.eye(2)==>[[1.,0.],[0.,1.]]# Construct a batch of 3 identity matrices, each 2 x 2.# batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
batch_identity = tf.eye(2, batch_shape=[3])# Construct one 2 x 3 "identity" matrix
tf.eye(2, num_columns=3)==>[[1.,0.,0.],[0.,1.,0.]]# Pytorch语法:
torch.eye(num_rows)>>> torch.eye(3)
tensor([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]])
应用布尔掩码到张量上
# Tensorflow语法:
tf.boolean_mask(tensor, mask, axis=None, name='boolean_mask')
tensor =[0,1,2,3]# 1-D example
mask = np.array([True,False,True,False])
out = tf.boolean_mask(tensor, mask)# out : tf.Tensor([0 2], shape=(2,), dtype=int32)
tensor =[[0,1,2],[3,4,5],[6,7,8]]# 2-D example
mask = np.array([[True,False,False],[False,True,False],[False,False,True]])
out = tf.boolean_mask(tensor, mask)# out : tf.Tensor([0 4 8], shape=(3,), dtype=int32)# Pytorch语法:
torch.masked_select(input, mask)>>> x = torch.randn(3,4)>>> x
tensor([[0.3552,-2.3825,-0.8297,0.3477],[-1.2035,1.2252,0.5002,0.6248],[0.1307,-2.0608,0.1244,2.0139]])>>> mask = x.ge(0.5)>>> mask
tensor([[False,False,False,False],[False,True,True,True],[False,False,False,True]])>>> torch.masked_select(x, mask)
tensor([1.2252,0.5002,0.6248,2.0139])