动手学深度学习--关于pytorch

import torch
from torch import nn
from torch.nn import functional as F

net = nn.Sequential(nn.Linear(20,256),nn.ReLU(),nn.Linear(256,10))
X = torch.rand(2,20)
net(X)

class MLP(nn.module):
	def __init__(self):
		super().__init__()#父类初始化函数
		self.hidden = nn.Linear(20,256)
		self.out = nn.Linear(256,10)
	def forward(self,X):
		return self.out(F.ReLU(self.hidden(X)))

net = MLP()
net(X)


clas MySequential(nn.Module):
	def __init__(self,*args):
		super().__init__()
		for block in args:
			self._modules[block] = block

		def forward(self,X):
			for block in self._modules.values():
				X = block(X)
			return X
net = MySequential(nn.Linear(20,256),nn.ReLU(),nn.Linear(256,10))
net(X)

class FixedHiddenMLP(nn.Module):
	def __init__(self):
		super().__init__()
		self.rand_weight = torch.rand((20,20), requires_grad=False)
		self.linear = nn.Linear(20,20)
	def forward(self,X):
		X = self.linear(X)
		X = F.relu(torch.mm(X,self.rand_weight)+1)
		X = self.linear(X)
		while X.abs().sum() > 1:
			X /= 2
		return X.sum()
net = FixedHiddenMLP()
net(X)
		

读写文件

import torch
from torch import nn
from torch.nn import function as F

x = torch.arange(4)
torch.save(x,'x-file')
#----------------
y = torch.zeros(4)
torch.save([x,y], 'x-files')
#-------------------
mydict = {'x' = x, 'y' = y }
torch.save(mydict,'mydict')
x2 = torch.load('x-file')
x2
#----存储模型的参数-------
class MLP(nn.Module):
	def __init__(self):
		super().__init__()
		self.hidden = nn.Linear(20,256)
		self.output = nn.Linear(256,10)

	def forward(self,x):
		return self.output(F.relu(self.hidden(x)))
net = MLP()
X = torch.randn(size=(2,20))
Y = net(X)
torch.save(net.state_dict(),'mlp.params')

#实例化原始多层感知机模型的一个备份,直接读取文件中存储的参数
clone = MLP()
clone.load_state_dict(torch.load('mlp.params'))
clone.eval()

Y_clone = clone(X)
Y_clone == Y

关于GPU

!nvidia-smi

import torch
from torch import nn

torch.device('cpu'),torch.cuda.device('cuda'),torch.cuda.device('cuda:1')
#--------查看可用gpu的数量--------------------
torch.cuda.device_count()

#-----允许我们在请求的GPU不存在的情况下运行代码---------
def try_gpu(i=0):
	'''如果存在,则返回gpu(i),否则返回cpu()'''
	if torch.cuda.device_count >= i+1:
		return torch.device(f'cuda:{i}')
	return torch.device('cpu')
	
def try_all_gpus():
	'''返回所有可用的GPU,如果没有GPU,则返回[cpu(),]'''
	devices = [
		torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())
	return device if devices else [torch.device('cpu')]

try_gpu(),try_gpu(10),try_all_gpus()

查询张量所在的设备

x = torch.tensor([1,2,3])
x.device

存储在GPU上,在同一个GPU上进行发操作

x = torch.ones(23,device = try_gpu())
z =  x.cuda(1)
net = nn.Sequential(nn.Linear(3,1))
net = net.to(device=try_gpu())
net(x)

#------确认模型参数存储在同一个GPU上-------
net[0].weight.data.device
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值