import torch
a = torch.rand(3,4)
b = torch.rand(4)print("a+b:\t", a+b)
c = torch.add(a, b)print("c:\t", c)# a-b == sub(a, b)
d = torch.all(torch.eq(a-b, torch.sub(a, b)))print("d:\t", d)# a/b == div(a, b)
e = torch.all(torch.eq(a/b, torch.div(a, b)))print("e:\t", e)
import torch
# 需要将列表中数据转换为float型
a = torch.tensor([[3.,3.],[3.,3.]])
b = torch.ones(2,2)print("b:\t", b)# torch.mm : only for 2d
c = torch.mm(a, b)print("c:\t", c)# torch.matmul
d = torch.matmul(a, b)print("d:\t", d)print("a@b:\t", a@b)
import torch
# 将784 降维至 512
a = torch.rand(4,784)
x = torch.rand(4,784)
w = torch.rand(512,784)
c =(x@w.t()).shape
print("c:\t", c)
c: torch.Size([4,512])
>2d tensor matmul
import torch
a = torch.rand(4,3,28,64)
b = torch.rand(4,3,64,32)# torch.mm()是不能用于>2维的"""
c = torch.mm(a, b).shape
print("C:\t", c)
"""
d =torch.matmul(a, b).shape
print("d:\t", d)
e = torch.rand(4,1,64,32)# 矩阵相乘
f = torch.matmul(a, e).shape
print("e:\t", f)
import torch
grad = torch.rand(2,3)*15print("grad:\t", grad)
a = grad.max()print("a:\t", a)
b = grad.median()print("b:\t", b)# clamp(min)
c = grad.clamp(10)print("c:\t", c)# clamp(min, max)
d = grad.clamp(0,10)print("d:\t", d)