pytorch基本计算

import torch
import torch.nn as nn
import torch.nn.functional as F

from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt

import numpy as np

torch.manual_seed(446)
np.random.seed(446)
# torch与numpy的区别
# numpy不能做gradient,但是torch可以
x_numpy = np.array([0.1, 0.2, 0.3])
x_torch = torch.tensor([0.1, 0.2, 0.3])
print('x_numpy', 'x_torch')
print(x_numpy, x_torch)

x_numpy x_torch
[0.1 0.2 0.3] tensor([0.1000, 0.2000, 0.3000])

# 把torch转成numpy,把numpy转成torch
print(torch.from_numpy(x_numpy), x_torch.numpy())

tensor([0.1000, 0.2000, 0.3000], dtype=torch.float64) [0.1 0.2 0.3]

# 加法
y_numpy = np.array([3,4,5.])
y_torch = torch.tensor([3,4,5.])
print('x+y')
print(x_numpy + y_numpy, x_torch + y_torch)

x+y
[3.1 4.2 5.3] tensor([3.1000, 4.2000, 5.3000])

print("norm")
print(np.linalg.norm(x_numpy), torch.norm(x_torch))

norm
0.37416573867739417 tensor(0.3742)

# 使用tensor.view()来reshapetensor
# MNIST
N, C, W, H = 10000, 3, 28, 28
X = torch.randn((N, C, W, H))
print(X.shape)
print(X.view(N, C, 784).shape)
print(X.view(-1, C, 784).shape)

torch.Size([10000, 3, 28, 28])
torch.Size([10000, 3, 784])
torch.Size([10000, 3, 784])

#广播机制,其中一个维度是1或者不存在
x=torch.empty(5,1,4,1)
y=torch.empty(  3,1,1)
print((x+y).size())

torch.Size([5, 3, 4, 1])

#we set requires_grad=True to let Pytorch know to keep the graph
a = torch.tensor(2.0, requires_grad=True)
b = torch.tensor(1.0, requires_grad=True)
c = a + b
d = b + 1
e = c * d
print('c', c)
print('d', d)
print('e', e)

c tensor(3., grad_fn=)
d tensor(2., grad_fn=)
e tensor(6., grad_fn=)

# 在cpu或者cpu计算
cpu = torch.device("cpu")
gpu = torch.device("cuda")

x = torch.rand(10)
print(x)
x = x.to(gpu)
print(x)
x = x.to(cpu)
print(x)

tensor([0.3959, 0.6177, 0.7256, 0.0971, 0.9186, 0.8277, 0.4409, 0.9344, 0.8967,
0.1897])

# 求导
def f(x):
    return (x-2)**2

def fp(x):
    return 2*(x-2)

x = torch.tensor([1.0], requires_grad=True)

y = f(x)
y.backward()   

print(fp(x))
print('Pytorch\'s f\'(x):',x.grad)

tensor([-2.], grad_fn=<MulBackward0)
Pytorch’s f’(x): tensor([-2.])

# 线性回归
d = 2
n = 50
X = torch.randn(n, d)
true_w = torch.tensor([[-1.0], [2.0]])
y = X @ true_w + torch.randn(n, 1) * 0.1
print('X shape', X.shape)
print('y shape', y.shape)
print('w shape', true_w.shape)

X shape torch.Size([50, 2])
y shape torch.Size([50, 1])
w shape torch.Size([2, 1])

d_in = 3
d_out = 4
linear_module = nn.Linear(d_in, d_out)

example_tensor = torch.tensor([[1.,2,3], [4,5,6]])
transformed = linear_module(example_tensor)
print('example_tensor', example_tensor.shape)
print('transformed', transformed.shape)
print('w:', linear_module.weight)   # 计算权重
print('b:', linear_module.bias)     # 计算偏差

example_tensor torch.Size([2, 3])
transformed torch.Size([2, 4])
w: Parameter containing:
tensor([[ 0.5260, 0.4925, -0.0887],
[ 0.3944, 0.4080, 0.2182],
[-0.1409, 0.0518, 0.3034],
[ 0.0913, 0.2452, -0.2616]], requires_grad=True)
b: Parameter containing:
tensor([0.5021, 0.0118, 0.1383, 0.4757], requires_grad=True)

# 激活函数
activation_fn = nn.ReLU()
example_tensor = torch.tensor([-1.0, 1.0, 0.0])
activated = activation_fn(example_tensor)
print('example_tensor', example_tensor)
print('activated', activated)

example_tensor tensor([-1., 1., 0.])
activated tensor([0., 1., 0.])

# sequential聚合
d_in = 3
d_hidden = 4
d_out = 1
model = torch.nn.Sequential(
    nn.Linear(d_in, d_hidden),
    nn.Tanh(),
    nn.Linear(d_hidden, d_out),
    nn.Sigmoid()
)

example_tensor = torch.tensor([[1.,2,3], [4,5,6]])
transformed = model(example_tensor)
print('transformed', transformed.shape)

transformed torch.Size([2, 1])

# 获取模型的参数
params = model.parameters()

for param in params:
    print(param)

Parameter containing:
tensor([[ 0.5431, 0.0524, 0.1126],
[ 0.2683, -0.2361, 0.2769],
[-0.1380, 0.5661, -0.1071],
[-0.3357, 0.0848, -0.0454]], requires_grad=True)
Parameter containing:
tensor([-0.2858, -0.0707, -0.3168, 0.1860], requires_grad=True)
Parameter containing:
tensor([[-0.1412, 0.4412, 0.4086, 0.2844]], requires_grad=True)
Parameter containing:
tensor([-0.4175], requires_grad=True)

# 均方损失
mse_loss_fn = nn.MSELoss()

input = torch.tensor([[0. ,0, 0]])
target = torch.tensor([[1. , 0, -1]])

loss = mse_loss_fn(input, target)

print(loss)

tensor(0.6667)

#优化器
# create a simple model
model = nn.Linear(1, 1)

# create a simple dataset
x_simple = torch.tensor([[1.]])
y_simple = torch.tensor([[2.]])

# create optimizer
optim = torch.optim.SGD(model.parameters(), lr=1e-2)

y_hat = model(x_simple)
print('model params before:', model.weight)
loss = mse_loss_fn(y_hat, y_simple)
optim.zero_grad()
loss.backward()
optim.step()
print('model params after:', model.weight)

model params before: Parameter containing:
tensor([[-0.5237]], requires_grad=True)
model params after: Parameter containing:
tensor([[-0.4818]], requires_grad=True)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值