device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
A = torch.IntTensor([[1,2,3],[4,5,6]])
print(A)
print(A.device) #此时的环境还是cpu环境
A = A.to(device) #将A的环境设置为device环境 这里转化为cuda环境
print(A.device)
print(A.cpu().device) #将A的环境设置为cpu
print(A.cpu().numpy()) #在cuda环境下tensor不能直接转化为numpy类型 临时成cpu而已
print(A.device) #依旧是cuda环境
标量Tensor求导
# 求 f(x) = a*x**2 + b*x + c 的导数
x = torch.tensor(-2.0, requires_grad=True)
a = torch.tensor(1.0)
b = torch.tensor(2.0)
c = torch.tensor(3.0)
y = a * torch.pow(x, 2) + b * x + c
y.backward() # backward求得的梯度会存储在自变量x的grad属性中
dy_dx = x.grad
print(dy_dx)
# 非标量Tensor求导
# 求 f(x) = a*x**2 + b*x + c 的导数
x = torch.tensor([[-2.0, -1.0], [0.0, 1.0]], requires_grad=True)
a = torch.tensor(1.0)
b = torch.tensor(2.0)
c = torch.tensor(3.0)
gradient = torch.tensor([[1.0, 1.0], [1.0, 1.0]])
y = a * torch.pow(x, 2) + b * x + c
y.backward(gradient=gradient)
dy_dx = x.grad
print(dy_dx)
# 使用标量求导方式解决非标量求导
# 求 f(x) = a*x**2 + b*x + c 的导数
x = torch.tensor([[-2.0, -1.0], [0.0, 1.0]], requires_grad=True)
a = torch.tensor(1.0)
b = torch.tensor(2.0)
c = torch.tensor(3.0)
gradient = torch.tensor([[1.0, 1.0], [1.0, 1.0]])
y = a * torch.pow(x, 2) + b * x + c
z = torch.sum(y * gradient)
z.backward()
dy_dx = x.grad
print(dy_dx)
import torch
import torch.nn as nn
m = nn.Sigmoid()
loss = nn.BCELoss(size_average=False, reduce=False)
input = torch.randn(3, requires_grad=True) #表示生成一维的含有三个随机数的数据,满足均值为0,方差为1的正态分布
target = torch.empty(3).random_(2)
lossinput = m(input)
output = loss(lossinput, target)
print("输入值:")
print(lossinput)
print("输出的目标值:")
print(target)
print("计算loss的结果:")
print(output)