!nvidia-smi
Mon Aug 16 14:23:34 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 430.26 Driver Version: 430.26 CUDA Version: 10.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 GeForce RTX 208... Off | 00000000:01:00.0 Off | N/A |
| 22% 32C P8 1W / 250W | 9475MiB / 11018MiB | 0% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| 0 7024 C ...t/anaconda3/envs/pytorch0.4/bin/python3 9465MiB |
+-----------------------------------------------------------------------------+
import torch
from torch import nn
torch.device('cpu'), torch.device('cuda')
(device(type='cpu'), device(type='cuda'))
torch.cuda.device_count()
1
def try_gpu(i=0):
"""如果存在GPU就返回GPU(i),否则返回cpu()"""
if torch.cuda.device_count() >= i+1:
return torch.device(i)
return torch.device('cpu')
def try_all_gpus():
"""返回所有可用的gpu,如果没有gpu,则返回cpu"""
devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]
return devices if devices else [torch.device('cpu')]
try_gpu(), try_gpu(10), try_all_gpus()
(device(type='cuda', index=0),
device(type='cpu'),
[device(type='cuda', index=0)])
x = torch.tensor([1, 2, 3])
x.device
device(type='cpu')
X = torch.ones(2, 3, device=try_gpu())
X
tensor([[1., 1., 1.],
[1., 1., 1.]], device='cuda:0')
Y = torch.rand(2, 3, device=try_gpu(0))
Y
tensor([[0.7585, 0.3336, 0.2373],
[0.6617, 0.2012, 0.7119]], device='cuda:0')
Z = X.cuda(0)
print(X)
print(Z)
tensor([[1., 1., 1.],
[1., 1., 1.]], device='cuda:0')
tensor([[1., 1., 1.],
[1., 1., 1.]], device='cuda:0')
Y + Z
tensor([[1.7585, 1.3336, 1.2373],
[1.6617, 1.2012, 1.7119]], device='cuda:0')
Z.cuda(0) is Z
True
net = nn.Sequential(nn.Linear(3, 1))
net = net.to(device=try_gpu())
net(X)
tensor([[-0.1770],
[-0.1770]], device='cuda:0', grad_fn=<ThAddmmBackward>)
device(type='cuda', index=0)