目录
一.全连接层
1.nn.Linear
不需要建立w/b,将torch.Size([1, 784])输出为torch.Size([1, 10])即实现in784->out200,200->200,200->10
import torch
import torch.nn as nn
x = torch.randn([1,784])
x.shape
layer1 = nn.Linear(784,200)
layer2 = nn.Linear(200,200)
layer3 = nn.Linear(200,10)
x = layer1(x)
x.shape
x = layer2(x)
x.shape
x = layer3(x)
x.shape
2.加入relu
本来x直接送到下一个layer,加入激活函数Relu之后,确保非线性的保持。
import torch
import torch.nn as nn
import torch.nn.functional as F
x = torch.randn([1,784])
x.shape
layer1 = nn.Linear(784,200)
layer2 = nn.Linear(200,200)
layer3 = nn.Linear(200,10)
x = layer1(x)
x = F.relu(x,inplace=True)
x.shape
x = layer2(x)
x = F.relu(x,inplace=True)
x.shape
x = layer3(x)
x = F.relu(x,inplace=True)
x.shape
3.nn.ReLU与F.relu
class-style API:nn.ReLu大写, 必须先实例化再调用,w/b是内部成员不能私自访问,只能使用 .parameters 来访问。
layer=nn.ReLU()
x=layer(x)
function-style API: F.relu小写,底层操作,不需要实例。
x = F.relu(x,inplace=True)
4.封装网络结构
之前的3层是分开的,我们创建一个自己的类(网络结构),封装性更强,继承自nn.Module。
①初始化
②forward()
③不需要backward()
#封装网络结构
class MLP(nn.Module):
def __init__(self):
super(MLP,self).__init__()
self.model = nn.Sequential(
nn.Linear(784,200),
nn.LeakyReLU(inplace=True),
nn.Linear(200,200),
nn.LeakyReLU(inplace=True)
nn.Linear(200,10)
nn.LeakyReLU(inplace=True)
)
def forward(self,x):
x = self.model(x)
return x
#实例化
net = MLP()
optimizer = optim.SGD(net.parameters(),lr=learning_rate)
criteon = nn.CrossEntropyLoss()
#下文train与test的logits = forward(data)改为logits = net(data)
二.激活函数与GPU加速
1.激活函数
2.GPU加速
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device('cuda:0')
net = MLP().to(device)
optimizer = optim.SGD(net.parameters(),lr = learning_rate)
criteon = nn.CrossEntropyLoss().to(device)
for epoch in range(epochs):
for batch_idx,(data,target) in enumerate(train_loader):
data = data.view(-1,28*28)
data, target = data.to(device),target.to(device)
三.测试
四.封装网络结构+nn.ReLU+GPU加速实现多分类
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets,transforms
batch_size=200
learning_rate=0.001
epochs=10
#加载数据
train_loader=torch.utils.data.DataLoader(
datasets.MNIST('../data',train=True,download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.0381,))
])),
batch_size=batch_size,shuffle=True )
test_loader=torch.utils.data.DataLoader(
datasets.MNIST('../data',train=False,download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.0381,))
])),
batch_size=batch_size,shuffle=True )
#MLP的model
class MLP(nn.Module):
def __init__(self):
super(MLP,self).__init__()
self.model = nn.Sequential(
nn.Linear(784,200),
nn.LeakyReLU(inplace=True),
nn.Linear(200,200),
nn.LeakyReLU(inplace=True),
nn.Linear(200,10),
nn.LeakyReLU(inplace=True),
)
def forward(self,x):
x = self.model(x)
return x
#device = torch.device('cuda:0')
#net = MLP().to(device)
#optimizer = optim.SGD(net.parameters(),lr = learning_rate)
#criteon = nn.CrossEntropyLoss().to(device)
net = MLP()
optimizer = optim.SGD(net.parameters(),lr=learning_rate)
criteon = nn.CrossEntropyLoss()
#train
for epoch in range(epochs):
for batch_idx,(data,target) in enumerate(train_loader):
data = data.view(-1,28*28)
#data, target = data.to(device),target.to(device)GPU加速时补
logits = net(data)
loss = criteon(logits,target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch : {} [{}/{}({:.0f}%)]\tLoss:{:.6f}'.format(epoch,batch_idx*len(data),len(train_loader.dataset),100. * batch_idx / len(train_loader),loss.item()))
#test
test_loss = 0
correct = 0
for data, target in test_loader:
data = data.view(-1,28*28)
#data, target = data.to(device),target.to(device)GPU加速时补
logits = net(data)
test_loss += criteon(logits,target).item()
pred = logits.data.max(1)[1]
correct += pred.eq(target.data).sum()
test_loss /= len(test_loader.dataset)
print('\nTest set : Average loss:{:.4f}, Accuracy:{}/{} ({:.0f})%\n'.format(test_loss,correct,len(test_loader.dataset),100. * correct /len(test_loader.dataset)))