【动手学】36 图片增广_代码

%matplotlib inline
import torch
import torchvision
from torch import nn
from d2l import torch as d2l

d2l.set_figsize()
img = d2l.Image.open('./data/tmp2E5F.png')
d2l.plt.imshow(img)  

<matplotlib.image.AxesImage at 0x7fd01038c3d0>

 

def apply(img,aug,num_rows=2,num_cols=4,scalel=1.5):
    Y = [aug(img) for _ in range(num_rows*num_cols)]
    d2l.show_images(Y,num_rows,num_cols,scale=scalel)

左右翻转图像

apply(img,torchvision.transforms.RandomHorizontalFlip())

上下翻转

apply(img,torchvision.transforms.RandomVerticalFlip())

 

 随机剪裁

shape_aug = torchvision.transforms.RandomResizedCrop(
        (200,200),scale=(0.1,1),ratio=(0.5,2))
apply(img,shape_aug)

随机改变图片亮度

apply(img,torchvision.transforms.ColorJitter(brightness=0.5,contrast=0,saturation=0,hue=0))

 

 随机改变图片色调

apply(img,torchvision.transforms.ColorJitter(brightness=0,contrast=0,saturation=0,hue=0.5))

随机更改图片亮度(brightness)、对比度(contrast)、饱和度(saturation)和色调(hue)

color_aug = torchvision.transforms.ColorJitter(brightness=0.5,contrast=0.5,saturation=0.5,hue=0.5)
apply(img,color_aug)

 

 结合多种图像增广方法

augs = torchvision.transforms.Compose([
    torchvision.transforms.RandomHorizontalFlip(),
    color_aug,shape_aug
])
apply(img,augs)

 使用图像增广进行训练

!mkdir ./data/cifar10

all_images = torchvision.datasets.CIFAR10(
            train=True,root='./data/cifar10',download=True)
d2l.show_images([all_images[i][0] for i in range(32)],4,8,scale=0.8)

 只使用最简单的随机左右翻转

train_augs = torchvision.transforms.Compose([
    torchvision.transforms.RandomHorizontalFlip(),
    torchvision.transforms.ToTensor()
])
test_augs = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor()
])

定义辅助函数,以便于读取图像和应用图像增广

def load_cifar10(is_train,augs,batch_size):
    dataset = torchvision.datasets.CIFAR10(
        root = './data/cifar10',train=is_train,
        transform = augs,download = False
    )
    
    dataloader = torch.utils.data.DataLoader(
            dataset,batch_size=batch_size,shuffle=is_train,num_workers=4)
    return dataloader

定义一个函数,使用多gpu对模型进行训练和评估

def train_batch_ch13(net,X,y,loss,trainer,devices):
    if isinstance(X,list):
        X = [x.to(devices[0] for x in X)]
    else:
        X = X.to(devices[0])
    y = y.to(devices[0])
    net.train()
    trainer.zero_grad()
    pred = net(X)
    l = loss(pred ,y)
    l.sum().backward()
    trainer.step()
    train_loss_sum = l.sum()
    train_acc_sum = d2l.accuracy(pred,y)
    return train_loss_sum,train_acc_sum

def train_ch13(net,train_iter,test_iter,loss,trainer,num_epochs,devices=d2l.try_all_gpus()):
    timer,num_batches = d2l.Timer(),len(train_iter)
    animator = d2l.Animator(xlabel='epoch',xlim=[1,num_epochs],ylim=[0,1],legend=['train loss','train acc','test acc'])
#     net = nn.DataParallel(net,device_ids=devices).to(devices[0])
    net = net.cuda()
    for epoch in range(num_epochs):
        #4个维度:储存训练损失,训练准确度,实例数,特点数
        metric = d2l.Accumulator(4)
        for i ,(features,labels) in enumerate(train_iter):
            timer.start()
            l,acc =train_batch_ch13(
                net,features,labels,loss,trainer,devices)
            metric.add(l,acc,labels.shape[0],labels.numel())
            timer.stop()
            if(i+1)%(num_batches//5) == 0 or i ==num_batches-1:
                animator.add(epoch+(i+1)/num_batches,(metric[0]/metric[2],metric[1]/metric[3],None))
            test_acc = d2l.evaluate_accuracy_gpu(net,test_iter)
            animator.add(epoch+1,(None,None,test_acc))
        print(f'loss{metric[0]/metric[2]:.3f},train acc'
              f'{metric[1]/metric[3]:.3f},test acc{test_acc:.3f}')
        print(f'{metric[1]*num_epochs/timer.sum():.1f}exmples/sec on {str(devices)}'
             )
batch_size,devices,net =256,d2l.try_all_gpus(),d2l.resnet18(10,3)

def init_weights(m):
    if type(m) in [nn.Linear,nn.Conv2d]:
        nn.init.xavier_uniform_(m.weight)
net.apply(init_weights)

def train_with_data_aug(train_augs,test_augs,net,lr=0.001):
    train_iter = load_cifar10(True,train_augs,batch_size)
    test_iter = load_cifar10(False,test_augs,batch_size)
    loss = nn.CrossEntropyLoss(reduction='none')
    trainer = torch.optim.Adam(net.parameters(),lr=lr)
    train_ch13(net,train_iter,test_iter,loss,trainer,10,devices)
    
train_with_data_aug(train_augs,test_augs,net)

loss0.169,train acc0.942,test acc0.829
1654.1exmples/sec on [device(type='cuda', index=0), device(type='cuda', index=1)]

 

train_with_data_aug(test_augs,test_augs,net)          #这里是在训练集上使用测试集的增广方式。而不是代表测试集

loss0.033,train acc0.989,test acc0.857
1722.6exmples/sec on [device(type='cuda', index=0), device(type='cuda', index=1)]

 显卡用的是TITAN xp 和 GeForce RTX 2080

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
下面是一个使用广拉格朗日函数求解带等式和不等式约束的优化问题的Python示例代码: ```python import numpy as np from scipy import optimize def f(x): return x[0]**2 + x[1]**2 def eq_constraint(x): return x[0] + x[1] - 1 def ineq_constraint(x): return x[0]**2 + x[1]**2 - 1 def augmented_lagrangian(x, l, rho): return f(x) + l[0]*eq_constraint(x) + rho/2*eq_constraint(x)**2 + l[1]*ineq_constraint(x) + rho/2*np.maximum(0, ineq_constraint(x))**2 def solve_optimization_problem(x0, l0, rho, max_iter=1000, tol=1e-6): x = x0 l = l0 for i in range(max_iter): res = optimize.minimize(lambda x: augmented_lagrangian(x, l, rho), x, method='Nelder-Mead') x_new = res.x if np.linalg.norm(x_new - x) < tol: break x = x_new l = l + rho*eq_constraint(x) return x_new x0 = np.array([0, 0]) l0 = np.array([0, 0]) rho = 1.0 x = solve_optimization_problem(x0, l0, rho) print("Optimal solution:", x) print("Optimal value:", f(x)) ``` 在上述代码中,`f` 函数是要优化的目标函数,`eq_constraint` 函数是等式约束函数,`ineq_constraint` 函数是不等式约束函数,`augmented_lagrangian` 函数是广拉格朗日函数,`solve_optimization_problem` 函数是使用广拉格朗日函数求解优化问题的函数。 使用上述代码,你可以求解带等式和不等式约束的优化问题。其中,`x0` 是初始点,`l0` 是拉格朗日乘子的初始值,`rho` 是罚函数系数,`max_iter` 是最大迭代次数,`tol` 是控制收敛精度的参数。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

瑾怀轩

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值