摘要
"""
PyTorch是什么?
================
这是一个基于python的科学计算程序包,针对两类用户:
- NumPy的替代品,使用gpu的力量
-深度学习研究平台,提供最大的灵活性和速度
张量
张量类似于NumPy的ndarrays,另外张量还可以用于GPU加速计算。
""'
from __future__ import print_function
import torch
###############################################################
# Construct a 5x3 matrix, 未初始化:
x = torch.empty(5, 3)
print(x)
>>>tensor([[7.0345e-43, 0.0000e+00, 0.0000e+00],
[0.0000e+00, 0.0000e+00, 0.0000e+00],
[0.0000e+00, 0.0000e+00, 0.0000e+00],
[0.0000e+00, 1.6816e-44, 0.0000e+00],
[0.0000e+00, 0.0000e+00, 0.0000e+00]])
###############################################################
# Construct a randomly initialized matrix:
x = torch.rand(5, 3)
print(x)
>>>tensor([[0.4082, 0.4602, 0.6282],
[0.4219, 0.7871, 0.5646],
[0.7035, 0.6932, 0.2241],
[0.8516, 0.0043, 0.4181],
[0.3937, 0.1077, 0.0528]])
###############################################################
# Construct a matrix filled zeros and of dtype long:
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
>>>tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
###############################################################
# Construct a tensor directly from data:
x = torch.tensor([5.5, 3])
print(x)
>>>tensor([5.5000, 3.0000])
###############################################################
# 或者在现有张量的基础上创建一个张量。这些方法将重用输入张量的属性,
# 例如dtype,除非用户提供新的值
x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes
print(x)
>>>tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=torch.float64)
x = torch.randn_like(x, dtype=torch.float) # override dtype!
print(x) # result has the same size
>>>tensor([[-0.5215, 0.4511, -0.6932],
[-1.4510, 0.4799, 1.3552],
[ 0.0439, 1.5147, -0.8348],
[-0.4401, 0.7001, -0.7771],
[ 0.4279, -0.1357, -1.8610]])
###############################################################
# Get its size:
print(x.size())
>>>torch.Size([5, 3])
###############################################################
# . .note::
#'torch.Size'实际上是一个元组,所以它支持所有的元组操作。
# 操作
# ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
# 操作有多种语法。在下面的示例中,我们将研究加法操作。
# 添加:语法1
y = torch.rand(5, 3)
print(x + y)
>>>tensor([[ 1.8948e-01, 1.1501e+00, 2.3105e-01],
[-8.8147e-01, 5.2951e-01, 2.1378e+00],
[ 4.7292e-01, 1.9604e+00, 3.4334e-02],
[-1.2493e-03, 7.5624e-01, -2.7771e-01],
[ 1.2265e+00, 4.7505e-01, -1.1567e+00]])
###############################################################
# 添加:语法2
print(torch.add(x, y))
>>>tensor([[ 1.8948e-01, 1.1501e+00, 2.3105e-01],
[-8.8147e-01, 5.2951e-01, 2.1378e+00],
[ 4.7292e-01, 1.9604e+00, 3.4334e-02],
[-1.2493e-03, 7.5624e-01, -2.7771e-01],
[ 1.2265e+00, 4.7505e-01, -1.1567e+00]])
###############################################################
# 补充:提供一个输出张量作为参数
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
>>>tensor([[ 1.8948e-01, 1.1501e+00, 2.3105e-01],
[-8.8147e-01, 5.2951e-01, 2.1378e+00],
[ 4.7292e-01, 1.9604e+00, 3.4334e-02],
[-1.2493e-03, 7.5624e-01, -2.7771e-01],
[ 1.2265e+00, 4.7505e-01, -1.1567e+00]])
###############################################################
# Addition: in-place
# adds x to y
y.add_(x)
print(y)
>>>tensor([[ 1.8948e-01, 1.1501e+00, 2.3105e-01],
[-8.8147e-01, 5.2951e-01, 2.1378e+00],
[ 4.7292e-01, 1.9604e+00, 3.4334e-02],
[-1.2493e-03, 7.5624e-01, -2.7771e-01],
[ 1.2265e+00, 4.7505e-01, -1.1567e+00]])
###############################################################
# .. note::
# 任何改变张量的操作都是用“_”后置的。
# 例如:'x.copy_ (y)', 'x.t_() ',将改变“x”。
#
#你可以使用标准的NumPy-like的索引!
print(x[:, 1])
>>>tensor([ 0.4511, 0.4799, 1.5147, 0.7001, -0.1357])
###############################################################
# Resizing: If you want to resize/reshape tensor, you can use ``torch.view``:
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())
>>>torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
###############################################################
# If you have a one element tensor, use ``.item()`` to get the value as a
# Python number
x = torch.randn(1)
print(x)
>>>tensor([-0.8630])
print(x.item())
>>>-0.8630334138870239
###############################################################
# * *以后阅读:* *
#
#
# 100+张量运算,包括转置、索引、切片、数学运算、线性代数、随机数等,
# 在这里被描述<https://pytorch.org/docs/torch>。
#
# NumPy桥
# - - - - - - - - - - - -
# 将Torch张量转换为NumPy数组,反之亦然,这很简单。
# Torch张量和NumPy数组将共享它们的底层内存位置(如果Torch张量位于CPU上),
# 更改一个将更改另一个。
# 转换 Torch Tensor to a NumPy Array
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
a = torch.ones(5)
print(a)
>>>tensor([1., 1., 1., 1., 1.])
###############################################################
#
b = a.numpy()
print(b)
>>>[1. 1. 1. 1. 1.]
###############################################################
# See how the numpy array changed in value.
a.add_(1)
print(a)
>>>tensor([2., 2., 2., 2., 2.])
print(b)
>>>[2. 2. 2. 2. 2.]
###############################################################
# Converting NumPy Array to Torch Tensor
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# See how changing the np array changed the Torch Tensor automatically
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
>>>[2. 2. 2. 2. 2.]
print(b)
>>>tensor([2., 2., 2., 2., 2.], dtype=torch.float64)
###############################################################
# 除了CharTensor外,CPU上的所有张量都支持来回转换成NumPy
#
# CUDA Tensors
# - - - - - - - - - - - -
# 张量可以使用'.to'方法移动到任何设备上
# 只有在CUDA可用时才运行这个单元
# 我们将使用“torch.device”对象来移动GPU中的张量
if torch.cuda.is_available():
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
x = x.to(device) # or just use strings ``.to("cuda")``
z = x + y
print(z)
print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!