大师姐在做的一个项目,需要用到,帮忙写的。参考了【对抗攻击代码实战】对抗样本的生成——FGSM_nanyidev的博客-CSDN博客_对抗样本生成这篇文章。
目前 device可选cpu或者gpu,model选为Alexnet(已训练),测试集(生成集)选为SVHN的官方测试集。
注释拉满,方便后续取用。(不要像我一样哼哧哼哧的撸代码)
下一步去写其他的对抗性样本生成代码,后续回来贴链接。
另外,感谢@翻滚的小@强的系统学习pytorch笔记,适合我这种初学者学习。
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torch.utils.data as Data
import numpy as np
import matplotlib.pyplot as plt
from torchvision import models
from deephunter.models.alexnet import AlexNet
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
#如果模型没有训练,先训练
def train(model, trainloader, optimizer, loss_func, device, epochs): # 训练过程
model.train()
model = model.to(device) # 确实device CPU/GPU
train_loss = []# 记录训练过程中的loss值
for epoch in range(epochs+1):
correct, total = 0, 0 # 初始化正确率和次数
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device) #
optimizer.zero_grad() # 梯度清零
outputs = model(inputs) # 记录输出
loss = loss_func(outputs, targets) # 计算损失
loss.backward() # 反向传播
optimizer.step() # 优化器更新参数
train_loss.append(loss.item()) # 记录最终训练误差
_, predicted = outputs.max(1) # 返回outputs每一行最大的值以及序号(序号代表着分类结果对于的那一类)
total += targets.size(0) # 不同的每个样本展成的tensor按行排列
correct += predicted.eq(targets).sum().item() # 计算目标值与预测值相等的个数(比较两个tensor对于位置数相同的个数)
if (batch_idx+1) % 30 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f},train acc:{:.2f}'.format(
epoch, batch_idx * len(inputs), len(trainloader.dataset),
100. * batch_idx / len(trainloader), loss.item(),100.0*correct/total))
# 一些个可视化
# plt.plot(range(len(train_loss)),train_loss)
# plt.xlabel("Epoch")
# plt.ylabel("loss")
# plt.title("Train loss")
# plt.legend()
# plt.show()
def fgsm_attack(image, epsilon, data_grad):
#FSGM代码
sign_data_grad = data_grad.sign() #读取原图像
perturbed_image = image + epsilon*sign_data_grad # FGSM扰动
perturbed_image = torch.clamp(perturbed_image, 0, 1) # 将生成的每个像素点限制在0-1之间
return perturbed_image #返回干扰图像
def test( model, device, test_loader, epsilon ):
#测试集开始生成
correct = 0 #记录对抗样本输出正确label(就是生成失败)
adv_examples = [] #记录得到的对抗样本
#遍历测试数据集
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data.requires_grad = True # 需要梯度
output = model(data) #初始结果
init_pred = output.max(1, keepdim=True)[1] # 初始结果序号
if init_pred.item() != target.item(): # 初始结果错误,跳过此样本
continue
loss = F.nll_loss(output, target) #交叉熵损失函数
model.zero_grad() # 梯度清零(不知道为什么,所有的代码都在清零)
loss.backward() #反向传播
data_grad = data.grad.data # 梯度
perturbed_data = fgsm_attack(data, epsilon, data_grad) # 调用FGSM
output = model(perturbed_data) # 扰动后图像分类
final_pred = output.max(1, keepdim=True)[1] # 分类序号
if final_pred.item() == target.item():
correct += 1
#考虑无扰动,即epsilon=0,时,将错误样本记录,即Natural error
# if epsilon == 0:
# adv_ex = perturbed_data.squeeze().detach().cpu().numpy() # 张量回退
# adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) ) #记录
else:
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
#跑几个样例代码
if len(adv_examples)>5:
break;
final_acc = correct/float(len(test_loader)) # 计算最终准确率
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
return final_acc, adv_examples # 返回准确率与对抗样本
if __name__ =='__main__':
epsilons = [0, .05, .1, .15, .2, .25, .3] #攻击参数
# 测试集下载
test_loader = Data.DataLoader(
datasets.SVHN('test', split='test', transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]),download=True),
batch_size=1, shuffle=True) #生成对抗样本的batch_size=1
#加载模型
model = AlexNet()
model.load_state_dict(torch.load("D:\\DRL\\test\\data\\trained_models\\alexnet_lr0.0001_39.pkl"))
# model = models.alexnet(pretrained=True) # sy
model = model.cuda()
model = model.to(device)
model.eval()
#如果模型未训练,进行训练
#epochs = 5 #训练集参数
#batch_size =512
# trainloader = Data.DataLoader( #训练集加载
# datasets.SVHN('data',train=True,download=True,
# transform=transforms.Compose([transforms.ToTensor(),
# transforms.Normalize((0.1307,),(0.3081,))
# ])),
# batch_size=batch_size,shuffle=False
# )
#optimizer = optim.Adam(model.parameters) #优化器
#loss_func = nn.CrossEntropyLoss() #损失函数
#train(model,trainloader,optimizer,loss_func,device,epochs)
final_acc,adv_examples = test(model,device,test_loader,epsilons[1]) #攻击参数手动调配
# print(adv_examples[1])
# print(adv_examples[1][2])