where
#高阶 op
#Tensor advanced operation
#where Gather
#example
cond = torch.tensor([[0.6769,0.7271],
[0.8884,0.4163]])
a = torch.tensor([[0.,0.],
[0.,0.]])
b = torch.tensor([[1.,1.],
[1.,1.]])
print(torch.where(cond>0.5,a,b))
# tensor([[0., 0.],
# [0., 1.]])
Gather
prob = torch.rand(4,10)
print(prob)
# tensor([[0.3942, 0.9214, 0.0140, 0.3338, 0.6830, 0.9334, 0.3668, 0.3390, 0.6481,
# 0.8863],
# [0.1298, 0.1974, 0.7513, 0.7331, 0.8322, 0.7839, 0.3067, 0.6788, 0.9944,
# 0.3597],
# [0.4434, 0.0887, 0.3435, 0.2912, 0.6668, 0.6734, 0.6875, 0.6746, 0.8056,
# 0.4545],
# [0.1018, 0.2697, 0.5875, 0.9712, 0.7195, 0.5911, 0.4615, 0.2659, 0.0606,
# 0.9976]])
idx = prob.topk(dim=1,k=3)
print(idx)
# torch.return_types.topk(
# values=tensor(
# [[0.9334, 0.9214, 0.8863],
# [0.9944, 0.8322, 0.7839],
# [0.8056, 0.6875, 0.6746],
# [0.9976, 0.9712, 0.7195]]),
# indices=tensor(
# [[5, 1, 9],
# [8, 4, 5],
# [8, 6, 7],
# [9, 3, 4]]))
idx=idx[1] #这句话,是什么意思呢?
print(idx)
# tensor([[5, 1, 9],
# [8, 4, 5],
# [8, 6, 7],
# [9, 3, 4]])
label = torch.arange(10)+100
print(label)
# tensor([100, 101, 102, 103, 104, 105, 106, 107, 108, 109])
reslut = torch.gather(label.expand(4,10),dim=1,index=idx.long())
print(reslut)
# tensor([[105, 101, 109],
# [108, 104, 105],
# [108, 106, 107],
# [109, 103, 104]])
mse_loss
import torch
import torch.nn as nn
mse_loss = nn.MSELoss()
#什么是梯度 神经网络中的核心概念
x=torch.ones(1)
print(x)
# tensor([1.])
w=torch.full([1],2.)
print(w)
# tensor([2.])
w.requires_grad_()
print(w)
# tensor([2.], requires_grad=True)
mse= mse_loss(torch.ones(1),x*w)
print(mse)
# tensor(1., grad_fn=<MseLossBackward>)
print(torch.autograd.grad(mse,[w]))
# (tensor([2.]),)
#第二种方法
mse.backward()
print(w.grad)
# tensor([2.])