目标检测小白系列!自存代码~

这是一个目标检测小白的自存代码~如果看官有目标检测领域好用的代码欢迎留言

torch搭建神经网络规范化代码

# 导入库
import torch
import torch.nn as nn
import torchvision
from torchvision import datasets,transforms
from torch.nn import functional as F
import torch.optim as optim

'''
**** Step1.网络定义****
'''
class Flatten(nn.Module):
    def __init__(self):
        super(Flatten , self).__init__()
    def __forward(self , input):
        return input.view(input.size(0),-1)
    
class MLP(nn.Module):
    def __init__(self):
        super(MLP , self).__init__()
        self.model = nn.Sequential()
        #名称一定不能一样否则报错
        self.model.add_module('Linear1',torch.nn.Linear(784 , 200))
        self.model.add_module('ReLU1', torch.nn.ReLU(inplace = True))
        self.model.add_module('Linear2', torch.nn.Linear(200 , 200))
        self.model.add_module('ReLU2', torch.nn.ReLU(inplace = True))
        self.model.add_module('Linear3', torch.nn.Linear(200 , 10))
        self.model.add_module('ReLU3', torch.nn.ReLU(inplace = True))
    def forward(self,x):
        x = self.model(x)
        return x
# Method 2 -------------------------------
# class MLP(nn.Module):
#     def __init__(self):
#         super(MLP , self).__init__()
#         self.model = nn.Sequential(
#             nn.Linear(784 , 200),
#             nn.ReLU(inplace = True),
#             nn.Linear(200 , 200),
#             nn.ReLU(inplace = True),
#             nn.Linear(200 , 10),
#             nn.ReLU(inplace = True)
#         )
#     def forward(self,x):
#         x = self.model(x)
#         return x
# Method 3 -------------------------------
# class Net3(torch.nn.Module):
#     def __init__(self):
#         super(Net3, self).__init__()
#         self.conv=torch.nn.Sequential()
#         self.conv.add_module("conv1",torch.nn.Conv2d(3, 32, 3, 1, 1))
#         self.conv.add_module("relu1",torch.nn.ReLU())
#         self.conv.add_module("pool1",torch.nn.MaxPool2d(2))
#         self.dense = torch.nn.Sequential()
#         self.dense.add_module("dense1",torch.nn.Linear(32 * 3 * 3, 128))
#         self.dense.add_module("relu2",torch.nn.ReLU())
#         self.dense.add_module("dense2",torch.nn.Linear(128, 10))
 
#     def forward(self, x):
#         conv_out = self.conv1(x)
#         res = conv_out.view(conv_out.size(0), -1)
#         out = self.dense(res)
#         return out
'''
**** Step2.参数定义****
'''
'''
Learning_rate,
		https://zhuanlan.zhihu.com/p/88640888
epochs,
	可以先设定一个固定的Epoch大小(100轮),一般当模型的loss不再持续减小,且精度不在10轮内提升,就可以提前停止训练了。(设置条件来停止epoch)
batch_size 
	语音、画面、自然语言不同任务时,一般最好的 batch size 分别为 8,16,32
从 batch size 以 128 为分界线。向下 (x0.5) 和向上 (x2) 训练后比较测试结果。若向下更好则再 x0.5,直到结果不再提升。
'''

epochs = 50
learning_rate = 1e-2
batch_size = 64
'''
**** Step3.数据载入****
'''
'''训练集读入'''
train_db = datasets.MNIST('data/mnist_data',
                          train = True,
                          download = False,
                          transform = torchvision.transforms.Compose([
                              torchvision.transforms.ToTensor(),
                              torchvision.transforms.Normalize((0.1307,),(0.3081,))
                          ])
                          )
'''训练集载入'''
train_loader = torch.utils.data.DataLoader(
    train_db,
    batch_size = batch_size,
    shuffle = True
)
'''测试集读入'''
test_db = datasets.MNIST('data/mnist_data',
                          train = True,
                          download = False,
                          transform = torchvision.transforms.Compose([
                              torchvision.transforms.ToTensor(),
                              torchvision.transforms.Normalize((0.1307,),(0.3081,))
                          ])
    
)
'''测试集载入'''
test_loader = torch.utils.data.DataLoader(
    test_db,
    batch_size = batch_size,
    shuffle = True
)
'''训练集重新划分'''
print('the present train_db is {} , test_db is {}'.format(len(train_db) , len(test_db)))
train_db , val_db = torch.utils.data.random_split(train_db , [50000 , 10000])
print('the present train_db is {} , val_db is {}'.format(len(train_db) , len(val_db)))

'''训练集重新载入'''
train_loader = torch.utils.data.DataLoader(
    train_db,
    batch_size = batch_size,
    shuffle = True
)
'''验证集载入'''
val_loader = torch.utils.data.DataLoader(
    val_db,
    batch_size = batch_size,
    shuffle = True
)
'''
**** Step4.数据训练 & 测试模型****
'''
#%%time
'''运行设备定义+网络实例化+优化器定义+损失函数定义'''
device = torch.device('cuda:0')
net = MLP().to(device)
optimizer = optim.SGD(net.parameters(), 
                      lr=learning_rate,
#                       momentum = 0.01,
#                       weight_decay = 0.01
                     )
criterion = nn.CrossEntropyLoss().to(device)
'''优化策略设置'''
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min' , 0.1)

'''训练'''
for epoch in range(epochs):
    for batch_idx,(data,target) in enumerate(train_loader):
        data = data.view(-1,28*28) #这里要跟据网络的数据要求来
        data, target = data.to(device), target.to(device)
        logits = net(data) #放入数据后自动训练,logits是喂入softmax前的一个向量值,size格式与原数据一样
        loss= criterion(logits,target)
        
        optimizer.zero_grad()#优化器清零梯度
        loss.backward()
        optimizer.step()#开启优化
        
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))

    '''val验证测试模型'''
    val_loss = 0
    val_correct = 0
    for data , target in val_loader:
        data = data.view(-1, 28*28)
        data, target = data.to(device), target.cuda()
        logits = net(data)  # logits是喂入softmax前的一个向量值,size格式与原数据一样
        val_loss += criterion(logits, target).item()
        pred = logits.data.max(1)[1]  # argmax()得到最大值的索引,因为这里的label与索引是相对应的
        val_correct += pred.eq(target.data).sum()
    val_loss /= len(val_loader.dataset)
    print('\nVAL set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        val_loss, val_correct, len(val_loader.dataset),
        100. * val_correct / len(val_loader.dataset)))
    scheduler.step(val_loss)
    print('=====the present learning epoch is {}======'.format(epoch+1))
#     print('=====the present learning rate is {}======'.format(learning_rate))
    
'''test验证测试模型'''
test_loss = 0
correct = 0
for data, target in test_loader:
    data = data.view(-1, 28 * 28)
    data, target = data.to(device), target.cuda()
    logits = net(data)
    test_loss += criterion(logits, target).item()

    pred = logits.data.max(1)[1]
    correct += pred.eq(target.data).sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
    test_loss, correct, len(test_loader.dataset),
    100. * correct / len(test_loader.dataset)))

to be continued

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值