# torch.randn()、torch.mean()、torch.pow()、torch.matmul()

### torch.mean()

torch.mean(input) 输出input 各个元素的的均值，不指定任何参数就是所有元素的算术平均值，指定参数可以计算每一行或者 每一列的算术平均数

a=torch.randn(3)  #生成一个一维的矩阵
b=torch.randn(1,3)  #生成一个二维的矩阵
print(a)
print(b)
torch.mean(a)

tensor([-1.0737, -0.8689, -0.9553])
tensor([[-0.4005, -0.6812,  0.0958]])

tensor(-0.9659)

a=torch.randn(4,4)
print(a)
c=torch.mean(a,dim=0,keepdim=True)
print(c)
d=torch.mean(a,dim=1,keepdim=True)
print(d)

tensor([[ 0.2378, -1.1380,  0.7964, -0.1413],
[ 0.4622, -1.7003, -1.1628,  0.8930],
[-2.0379, -1.7137,  0.6423, -0.2026],
[ 0.3512, -0.1251, -0.8315,  2.2642]])

tensor([[-0.2467, -1.1693, -0.1389,  0.7033]])

tensor([[-0.0612],
[-0.3770],
[-0.8280],
[ 0.4147]])

### torch.pow()

a=torch.tensor(3)
b=torch.pow(a,2)
print(b)
c=torch.randn(4)
print(c)
d=torch.pow(c,2)
print(d)

tensor(9)
tensor([ 0.0923,  0.7006, -0.2963,  0.6543])
tensor([0.0085, 0.4909, 0.0878, 0.4282])

### torch.matmul()

torch.matmul 是做矩阵乘法

a=torch.tensor([1,2,3])
b=torch.tensor([3,4,5])
torch.matmul(a, b)

tensor(26)

### torch.ones_like()

torch.ones_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor

import torch
m = Variable(torch.FloatTensor([[2, 3]]), requires_grad=True) # 构建一个 1 x 2 的矩阵
n = Variable(torch.zeros(1, 2)) # 构建一个相同大小的 0 矩阵
print(m)
print(n)
# 通过 m 中的值计算新的 n 中的值
n[0, 0] = m[0, 0] ** 2
n[0, 1] = m[0, 1] ** 3
print(n[0,0])
print(n)

tensor([[2., 3.]], requires_grad=True)
tensor([[0., 0.]])
tensor([[ 4., 27.]], grad_fn=<CopySlices>)
 n.backward(torch.ones_like(n))
# 相当于n.backward(torch.FloatTensor([[1,1]]))
print(m.grad)

tensor([[ 4., 27.]])

07-17 4090
06-03 3555
02-05 3150
08-14 1万+
04-12 4171
05-10 2万+