左上角为0点
卷积完成后 c w h 都可能变化
卷积运算 一个卷集核 最后只有 一个输出通道
3*w*h 变为1* w * h
多个卷积核,分别输出,再cat拼接
m个卷积核 m个输出通道
卷积核组合m x n x w x h
import torch
in_channels,out_channels= 5, 10 #输入通道为5 输出通道为10
width,height = 100, 100
kernel_size = 3
batch_size =1
input = torch.randn(batch_size, #小批量
in_channels, #n
width, #w
height) #h
###!!!!!!!!!
conv_layer = torch.nn.Conv2d(in_channels, #输入n
out_channels, #输出m
kernel_size=kernel_size)
'''
conv_layer = torch.nn.Conv2d(in_channels, #输入n层
out_channels, #输出m层
kernel_size=kernel_size,#卷积维度
stride =2,#步长
padding=1,#补充0
bias=False)
'''
output = conv_layer(input)
print(input.shape)
print(output.shape)
print(conv_layer.weight.shape)
补充0
代码
import torch
input = [3,4,5,6,7,
8,9,1,2,3,
4,5,6,7,8,
9,0,1,2,3,
4,5,6,7,8
]
input = torch.Tensor(input).view(1,1,5,5) # 1 n w h
conv_layer = torch.nn.Conv2d(1,1,kernel_size=3,padding=1,bias=False)
#conv_layer = torch.nn.Conv2d(1,2,kernel_size=3,padding=1,bias=False)
kernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3) #卷积核心 m n 3 3
#kernel = torch.Tensor([1,2,3,4,5,6,7,8,9,1,2,3,4,5,6,7,8,9]).view(2,1,3,3)
conv_layer.weight.data =kernel.data
output = conv_layer(input) #输出 1 m w' h’
print(output)
神经网络结构
下面流程图和代码不一致 relu层和pooling层位置不一样
gpu 跑代码
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
# prepare dataset
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
# design model using class
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
self.pooling = torch.nn.MaxPool2d(2)
self.fc = torch.nn.Linear(320, 10)
def forward(self, x):
# flatten data from (n,1,28,28) to (n, 784)
batch_size = x.size(0)
x = F.relu(self.pooling(self.conv1(x)))
x = F.relu(self.pooling(self.conv2(x))) ##先池化再relu
'''
x = self.pooling(F.relu(self.conv1(x)))
x = self.pooling(F.relu(self.conv2(x))) ##先relu再池化
'''
x = x.view(batch_size, -1) # -1 此处自动算出的是320
# print("x.shape",x.shape)
x = self.fc(x)
return x
model = Net()
#将模型全部放在显卡中 加入下两行
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device) #将模型全部放在显卡中
#==========
# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
# training cycle forward, backward, update
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
inputs, target = data
#将数据全部放在显卡中 加入下一行
inputs, target = inputs.to(device), target.to(device)#将输入输出全部放在显卡中 需要保证模型和数据在同一显卡中
#===============
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch+1, batch_idx+1, running_loss/300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, dim=1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('accuracy on test set: %d %% ' % (100*correct/total))
return correct/total
if __name__ == '__main__':
epoch_list = []
acc_list = []
for epoch in range(10):
train(epoch)
acc = test()
epoch_list.append(epoch)
acc_list.append(acc)
plt.plot(epoch_list,acc_list)
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
作业