pytorch技巧

轻纱随风难解思念的伤,英雄伤心为谁几度情长

pytorch模型网络结构可视化:

安装:

apt-get install graphviz
pip3 install graphviz
pip3 install pydot-ng

测试:

import torch
from torchvision import models
from torchviz import make_dot

model = models.alexnet()
x = torch.randn(1, 3, 224, 224)
vis_graph = make_dot(model(x),params=dict(model.named_parameters()))
vis_graph.view()

结果:

输出模型节点名称和大小:

for name, param in seq_model.named_parameters():
    print(name, param.shape)

定义网络的时候定义节点名称:

from collections import OrderedDict
seq_model = nn.Sequential(OrderedDict([
    ('hidden_linear', nn.Linear(1, 8)),
    ('hidden_activation', nn.Tanh()),
    ('output_linear', nn.Linear(8, 1))
    ]))

显卡设置:

/gpu:0
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
/gpu:0、/gpu:1 
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

查看模型每一层的输出尺寸:

from torchsummary import summary
summary(your_model, input_size=(channels, H, W))

梯度截断:

import torch.nn as nn

outputs = model(data)
loss= loss_fn(outputs, target)
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=20, norm_type=2)
optimizer.step()

parameters – 一个基于变量的迭代器,会进行梯度归一化
max_norm – 梯度的最大范数
norm_type – 规定范数的类型,默认为L2

onehot 编码:

方法1:

import torch
class_num = 8
batch_size = 4

def one_hot(label):
    """
    将一维列表转换为独热编码
    """
    label = label.resize_(batch_size, 1)
    m_zeros = torch.zeros(batch_size, class_num)
    # 从 value 中取值,然后根据 dim 和 index 给相应位置赋值
    onehot = m_zeros.scatter_(1, label, 1)  # (dim,index,value)

    return onehot.numpy()  # Tensor -> Numpy

label = torch.LongTensor(batch_size).random_() % class_num  # 对随机数取余
print(one_hot(label))

# output:
[[0. 0. 0. 1. 0. 0. 0. 0.]
 [0. 0. 0. 0. 1. 0. 0. 0.]
 [0. 0. 1. 0. 0. 0. 0. 0.]
 [0. 1. 0. 0. 0. 0. 0. 0.]]

方法2:

import torch.nn.functional as F
import torch

tensor =  torch.arange(0, 5) % 3  # tensor([0, 1, 2, 0, 1])
one_hot = F.one_hot(tensor, num_classes=5)

# 输出:
# tensor([[1, 0, 0, 0, 0],
#         [0, 1, 0, 0, 0],
#         [0, 0, 1, 0, 0],
#         [1, 0, 0, 0, 0],
#         [0, 1, 0, 0, 0]])

清理预先分配而未被占用显存:

torch.cuda.empty_cache()

训练过程中val阶段,不进行梯度反向传递:

with torch.no_grad():
    # 使用model进行预测的代码
    pass

学习率衰减:

import torch.optim as optim
from torch.optim import lr_scheduler

# 训练前的初始化
optimizer = optim.Adam(net.parameters(), lr=0.001)
scheduler = lr_scheduler.StepLR(optimizer, 10, 0.1)

# 训练过程中
for n in n_epoch:
    scheduler.step()
    ...

冻结某些层参数不跟新:

3个注意的地方(1)requires_grad = False(2)optimizer过滤出只需要梯度的权重(3)固定BN层,fix bn

def fix_bn(m):
    classname = m.__class__.__name__
    if classname.find('BatchNorm') != -1:
        m.eval()

def _train(epoch, train_loader, model, optimizer, criterion, args):
    model.train()

    #固定权重注意点3
    model.apply(fix_bn) # fix batchnorm
    print(model)

    losses = 0.
    losses_classify = 0.
    acc = 0.
    acc9 = np.zeros(8)
    total = 0.

    for idx, (data, target) in enumerate(train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()#torch.Size([64, 3, 224, 224]) torch.Size([64, 9])

        output = model(data)
        output = torch.sigmoid(output)
    

        #output = torch.where((output<threshhold) & (output>(1-threshhold)) , 1-target, output)



        pred = (output > threshhold).int().float()
        acc += (pred.eq(target).sum().item()/ target.size(1))
        acc9 += pred.eq(target).sum(0).cpu().numpy()
        total += target.size(0)

        optimizer.zero_grad()
        loss_classify = criterion(output, target)
        loss_smoothl1 = smoothL1(output, target) 

        #temp_tensor = torch.IntTensor([128,64,32,16,8,4,2,1]).cuda()
        #loss_sum = torch.abs(torch.sum(pred * temp_tensor) - torch.sum(target * temp_tensor))
        #loss_count = torch.abs(torch.sum(pred) - torch.sum(target))


        loss_cos = CustomCosineEmbeddingLoss(output, target)

        loss = loss_classify + loss_smoothl1 + loss_cos *2
        #loss = loss_classify + loss_smoothl1
        losses += loss
        losses_classify +=loss_classify
        loss.backward()
        if args.gradient_clip > 0:
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradient_clip)
        optimizer.step()


        if idx % args.print_intervals == 0 and idx != 0:
            print('[Epoch: {0:4d}], Loss: {1:.3f},  Loss classify: {2:.3f}, All Acc: {3}, Mean Acc: {4:.3f}, Correct {5} / Total {6}'.format(epoch,
                                                                                                 float(losses.detach().cpu().numpy()) / (idx + 1),
                                                                                                 float(losses_classify.detach().cpu().numpy()) / (idx + 1),
                                                                                                 acc9 / total * 100.,
                                                                                                 acc / total * 100.,
                                                                                                acc, total))



net = Net.CTPN()  # 获取网络结构
#固定权重注意点1
for name, value in net.named_parameters():
    if name in no_grad:
        value.requires_grad = False
    else:
        value.requires_grad = True


…………
##固定权重注意点2
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=0.01)
#optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)


if args.checkpoints is not None:
        checkpoints = torch.load(os.path.join('checkpoints', args.checkpoints))
        model.load_state_dict(checkpoints['model_state_dict'], strict=False)
        #optimizer.load_state_dict(checkpoints['optimizer_state_dict'])
        start_epoch = checkpoints['global_epoch']
    else:
        start_epoch = 1

    if args.cuda:
        model = model.cuda()

    if not args.evaluation:
        criterion = torch.nn.BCELoss()
        #criterion = torch.nn.BCEWithLogitsLoss()
        #criterion = torch.nn.MultiLabelSoftMarginLoss()
        #lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2, eta_min=0.0001)

        global_acc = 0.
        for epoch in range(start_epoch, args.epochs + 1):
            _train(epoch, train_loader, model, optimizer, criterion, args)
            best_acc = _eval(epoch, test_loader, model, args)
            if global_acc < best_acc:
                global_acc = best_acc
                save_checkpoint(best_acc, model, optimizer, args, epoch)

            #lr_scheduler.step()
            #print('Current Learning Rate: {}'.format(lr_scheduler.get_last_lr()))
    else:
        _eval(start_epoch, test_loader, model, args)

不同层设置不同的学习率:

conv1_params = []
conv2_params = []

for name, parms in net.named_parameters():
    if "convolution1" in name:
        conv1_params += [parms]
    else:
        conv2_params += [parms]

# 然后在优化器中进行如下操作:
optimizer = optim.Adam(
    [
        {"params": conv1_params, 'lr': 0.01},
        {"params": conv2_params, 'lr': 0.001},
    ],
    weight_decay=1e-3,
)

网络参数初始化:

方法1:

调用pytorch自身函数

torch.nn.init.eye(tensor)
torch.nn.init.constant(tensor, val)
torch.nn.init.normal(tensor, mean=0, std=1)
torch.nn.init.uniform(tensor, a=0, b=1)
torch.nn.init.xavier_uniform(net1[0].weight) 
torch.nn.init.kaiming_uniform(tensor, a=0, mode='fan_in')
torch.nn.init.orthogonal(tensor, gain=1)
torch.nn.init.sparse(tensor, sparsity, std=0.01)

方法2:

调用numpy

for layer in net1.modules():
    if isinstance(layer, nn.Linear): # 判断是否是线性层
        param_shape = layer.weight.shape
        layer.weight.data = torch.from_numpy(np.random.normal(0, 0.5, size=param_shape)) 
        # 定义为均值为 0,方差为 0.5 的正态分布

3种模型保存方法:

方法1:

字典方式保存,最常用的一种

# 保存
torch.save(model.state_dict(), './parameter.pth')
# 加载
model = TheModelClass(...)
model.load_state_dict(torch.load('./parameter.pth'))

方法2:

直接保存

# 保存
torch.save(model, './model.pth')
# 加载
model = torch.load('./model.pth')

方法3:

jit方式保存,可以供libtorch调用

model.load_state_dict(torch.load("***"))
model.eval()# 必须调用model.eval(),以便在运行推断之前将dropout和batch规范化层设置为评估模式。如果不这样做,将会产生不一致的推断结果     

.......

# 向模型中输入数据以得到模型参数 
example = torch.rand(1,1,28,28)
traced_script_module = torch.jit.trace(model,example)
     
# 保存模型
traced_script_module.save("./m.pt")

模型参数量,计算量统计(THOP: PyTorch-OpCounter):

https://github.com/Lyken17/pytorch-OpCounter

pip install thop

from torchvision.models import resnet50
from thop import profile
model = resnet50()
input = torch.randn(1, 3, 224, 224)
macs, params = profile(model, inputs=(input, ))

模型各层输出特征图大小,参数量统计(torchsummary):

GitHub - sksq96/pytorch-summary: Model summary in PyTorch similar to `model.summary()` in Keras

pip install torchsummary
from torchsummary import summary
summary(your_model, input_size=(channels, H, W))

基于pytorch的transforms实现自己的数据增

rom torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from dataset import MeiZhuang_Dataset
import random
import numpy as np
from PIL import Image


class AddPepperNoise(object):
    """增加椒盐噪声
    Args:
        snr (float): Signal Noise Rate
        p (float): 概率值,依概率执行该操作
    """

    def __init__(self, snr, p=0.9):
        assert isinstance(snr, float) or (isinstance(p, float))
        self.snr = snr
        self.p = p

    def __call__(self, img):
        """
        Args:
            img (PIL Image): PIL Image
        Returns:
            PIL Image: PIL image.
        """
        if random.uniform(0, 1) < self.p:
            img_ = np.array(img).copy()
            h, w, c = img_.shape
            signal_pct = self.snr
            noise_pct = (1 - self.snr)
            mask = np.random.choice((0, 1, 2), size=(h, w, 1), p=[signal_pct, noise_pct/2., noise_pct/2.])
            mask = np.repeat(mask, c, axis=2)
            img_[mask == 1] = 255   # 盐噪声
            img_[mask == 2] = 0     # 椒噪声
            return Image.fromarray(img_.astype('uint8')).convert('RGB')
        else:
            return img




def load_data(args):
    train_transform = transforms.Compose([
        transforms.Resize(size =(224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0),
        transforms.RandomAffine(degrees=5, translate=(0.1, 0.1), scale=(0.8, 1.2),
                                shear=None, resample=False, fillcolor=(0, 0, 0)),
        transforms.RandomGrayscale(p=0.1),
        transforms.RandomPerspective(distortion_scale=0.5, p=0.5, interpolation=3),
        AddPepperNoise(0.9, p=0.5),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False),
    ])
    

    train_dataset = MeiZhuang_Dataset(
    ["./data/qq_20210721/images/"],
    ["./data/qq_20210721/jsons_landmark/"],
    ["./data/qq_20210721/jsons_attribute/"],
    train_transform, True)


    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)

    test_transform = transforms.Compose([
        transforms.Resize(size =(224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])
    test_dataset = MeiZhuang_Dataset(
    ["./data/val_20210722_plus/images/"], ["./data/val_20210722_plus/jsons_landmark/"] ,["./data/val_20210722_plus/jsons_attribute/"], 
    test_transform, False)

    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)

    return train_loader, test_loader

数据增强之albumentations

pip install albumentations
import os

import albumentations as A
import cv2
import torch
from albumentations.pytorch import ToTensorV2
from torch.nn import functional as F
from torch.utils import data

image_size = 256
max_size = int(image_size * 1.5)
middle_size = int(image_size * 1.25)

train_transform = A.Compose(
    [
        A.VerticalFlip(p=0.5),
        A.HorizontalFlip(p=0.5),
        # color transforms
        A.OneOf(
            [
                A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.2, p=1),
                A.RandomGamma(gamma_limit=(80, 120), p=1),
                A.ChannelShuffle(p=0.2),
            ],
            p=0.8,
        ),
        # distortion
        A.OneOf(
            [
                A.ElasticTransform(p=1),
                A.OpticalDistortion(p=1),
                A.GridDistortion(p=1),
                A.IAAPerspective(p=1),
            ],
            p=0.2,
        ),
        # noise transforms
        A.OneOf(
            [
                A.GaussNoise(p=1),
                A.IAASharpen(p=1),
                A.MultiplicativeNoise(p=1),
                A.GaussianBlur(p=1),
            ],
            p=0.2,
        ),
        A.OneOf([
        A.Compose([
            A.LongestMaxSize(max_size, p=1),
            A.RandomCrop(image_size, image_size, p=1)
        ]),
        A.Compose([
            A.Resize(image_size, image_size, p=1)
        ]),
        A.Compose([
            A.LongestMaxSize(middle_size, p=1),
            A.RandomCrop(image_size, image_size, p=1)
            ]),
        ]),
        A.Normalize(mean=(0.406, 0.456, 0.485, 0.5), std=(0.225, 0.224, 0.229, 0.25)),
        ToTensorV2(),
    ]
)

val_transform = A.Compose(
    [
        # A.Resize(max_size, max_size),
        A.Normalize(mean=(0.406, 0.456, 0.485, 0.5), std=(0.225, 0.224, 0.229, 0.25)),
        ToTensorV2(),
    ]
)


class TC(data.Dataset):
    def __init__(self, root, list_path, num_samples=None, num_classes=10, **kwargs):
        if "train" in list_path:
            self.transform = train_transform
        else:
            self.transform = val_transform
        self.root = root
        self.num_classes = num_classes
        self.list_path = list_path  # image_path label_path
        self.class_weights = None

        self.img_list = [line.strip().split() for line in open(root + list_path)]

        self.files = self.read_files()
        if num_samples:
            self.files = self.files[:num_samples]

        self.class_weights = torch.FloatTensor(
            [
                4.2558026,
                1.630859,
                9.943599,
                9.044308,
                9.997374,
                8.934131,
                10.071862,
                10.176293,
                7.9508815,
                10.40919
            ]
        ).cuda()

    def read_files(self):
        files = []
        for item in self.img_list:
            image_path, label_path = item[:2]
            name = os.path.splitext(os.path.basename(label_path))[0]
            sample = {
                "img": image_path,
                "label": label_path,
                "name": name,
            }
            files.append(sample)
        return files

    def __len__(self):
        return len(self.files)

    def __getitem__(self, index):
        item = self.files[index]
        name = item["name"]
        image = cv2.imread(item["img"], -1)
        label = cv2.imread(
            item["label"],
            cv2.IMREAD_GRAYSCALE,
        )
        label -= 1

        transformed = self.transform(image=image, mask=label)
        image = transformed["image"]
        mask = transformed["mask"]
        return image, mask.long(), 0, 0

    def inference(self, model, image):
        size = image.size()
        pred = model(image)
        # 后处理 上采样
        pred = F.interpolate(
            input=pred, size=(size[-2], size[-1]), mode="bilinear", align_corners=True
        )
        return pred.exp()

references:

https://github.com/zxdefying/pytorch_tricks

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值