《Explaining and Harnessing Adversarial Examples》即FGSM算法Pytorch实现

参考了众多代码,但是效果不佳,可以勉强实现文中的实验,仅供参考,代码菜鸟,大神飘过……

#!/usr/bin/env python
# coding: utf-8

# In[1]:


import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch.nn as nn
import torchvision
import torch
from torchvision import transforms,utils
from torch.autograd import Variable
import os
import json
import demjson


# In[2]:


# 加载图片
def load_image(image_path):
    tran=transforms.Compose([
        transforms.Resize((224,224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
    ])
    original_img = Image.open(image_path)
    original_tensor = tran(original_img)
    original_tensor.unsqueeze_(dim=0)
    return original_tensor


# In[3]:


# 获取梯度的sign值
def get_gradient_signs(model,original_tensor):
    original_tensor.requires_grad=True
    target = model(original_tensor)
    target_id = np.argmax(target.data.numpy())
    target_tensor = Variable(torch.Tensor([float(target_id)]).long())
    loss_f = torch.nn.CrossEntropyLoss()
    optimizer = optimizer = torch.optim.Adam([original_tensor])
    loss = loss_f(target,target_tensor)
    optimizer.zero_grad()  
    loss.backward()
    grad_value = original_tensor.grad.data
    grad_sign = torch.sign(grad_value)
    
    return grad_sign


# In[4]:


# 扰动图像生成
def pertubate_image(original_tensor, perturbation):
    modified_tensor  = original_tensor + perturbation
#     deprocess_array = np.clip(modified_array, 0., 255.).astype(np.uint8) #需要修改像素范围用torch
#     modified_tensor = torch.clamp(modified_tensor,-1,1)
    return modified_tensor


# In[5]:


# 生成图片标题
def create_title(category, proba):
    return '"%s" %.2f%% confidence' % (category.replace('_', ' '), proba * 100) 


# In[6]:


# 获取图片分类类型
def generate_title(model,img_tensor):
    prediction = model(img_tensor)
    target_idx = np.argmax(prediction.data.numpy())
    with open('./classes_1.txt','r') as fp:
        classes = fp.read()
        classes = demjson.decode(classes)
    category = classes[str(target_idx)][0]
#     _, category,_ = decode_predictions(prediction.data.numpy())[0][0]
    probability = torch.nn.functional.softmax(prediction,dim=1) #计算softmax,即该图片属于各类的概率
    proba,_ = torch.max(probability,1)   # 最大概率对应的索引号,该图片即为该索引号对应的类别
    return create_title(category, proba.detach().numpy()[0])


# In[7]:


# 获取所有图片分类类型
def generate_titles(display_model, original_tensor, perturbation, modified_tensor):
    title_original     = generate_title(display_model, original_tensor)
    title_perturbation = generate_title(display_model, perturbation)
    title_modified     = generate_title(display_model, modified_tensor)
    
    return title_original, title_perturbation, title_modified


# In[8]:


# 生成扰动
def generate_adversarial_example(pertubation_model,original_tensor,epsilon):
    gradient_signs = get_gradient_signs(pertubation_model,original_tensor)
    perturbation = gradient_signs * epsilon
    modified_tensor = pertubate_image(original_tensor, perturbation)
    
    return modified_tensor, perturbation


# In[9]:


def postprocess(img_tensor):
    '''
    《动手学深度学习》(Pytorch)版第9章9.11节样式迁移时发现有这么倒推运算
    假设输入为X,归一化操作:(X - Mean)/ Std = Y,那么X = Y × Std + Mean ,化简: X = (Y - Mean / Std)× Std = [Y - ( -Mean / Std)] × Std
    于是:反归一化则只需令Mean = -Mean / Std,Std = 1 / Std,即可实现。
    '''
    rgb_mean = np.array([0.485,0.456,0.406]) #自己设置的
    rgb_std = np.array([0.229,0.224,0.225])  #自己设置的
    inv_normalize = torchvision.transforms.Normalize(
        mean= -rgb_mean / rgb_std,
        std= 1/rgb_std)
    to_PIL_image = transforms.ToPILImage()
    #这里不用再调整size了,因为后处理是处理生成的图片,只有一张
    img_array = np.array(to_PIL_image(inv_normalize(img_tensor[0].cpu()).clamp(0, 1)))
    return img_array


# In[16]:


def generate_adversarial_examples(folder, title, perturbation_model, display_model = None, epsilon = 0.3):
    if not display_model:
        display_model = perturbation_model

    filenames = os.listdir(folder)
    line_number = len(filenames)
    plt.figure(figsize = (15, 10 * line_number))
    
    for line, filename in enumerate(filenames):
        original_tensor = load_image(folder + filename)    
        modified_tensor, perturbation = generate_adversarial_example(perturbation_model,original_tensor,epsilon)
        orig_tit, pert_tit, modi_tit = generate_titles(display_model, original_tensor, perturbation, modified_tensor)
        original_img = postprocess(original_tensor)
        perturbation_img = postprocess(perturbation)
        modified_img = postprocess(modified_tensor)
        plt.subplot(line_number, 3, 3 * line + 1)
        plt.imshow(original_img)
        plt.title(orig_tit)
        plt.subplot(line_number, 3, 3 * line + 2)
        plt.imshow(perturbation_img)
        plt.title(pert_tit)
        plt.subplot(line_number, 3, 3 * line + 3)
        plt.imshow(modified_img)
        plt.title(modi_tit)
        
    plt.suptitle(title)
    plt.tight_layout(pad = 4)


# In[11]:


model_vgg16 = torchvision.models.vgg16(pretrained=True).eval()
for param in model_vgg16.parameters():
    param.requires_grad = False


# In[12]:


folder = './images/'


# In[13]:


filenames = os.listdir(folder)


# In[14]:


filenames


# In[17]:


generate_adversarial_examples(folder,'Perturbation using VGG16, classification using VGG16', perturbation_model = model_vgg16, display_model = model_vgg16)


# # 记得总结一下……
# 总结看vnote

运行结果可以看到如图:
在这里插入图片描述
测试图片以及ImageNet的分类txt文件,上传至csdn资源中,链接是:https://download.csdn.net/download/xiaokan_001/15352796

参考
[1] github代码(主要参考该代码的方法,其用keras实现FGSM算法)
[2] 《动手学深度学习》(Pytorch)版

评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值