蓝桥杯:二分+前缀和
机器人跳跃问题
#include<iostream>
#include<cstdio>
using namespace std;
const int N = 100010;
int n;
int h[N];
bool check(int x)
{
for (int i = 0; i < n; i ++)
{
x = x-h[i+1] +x;
if (x < 0) return false;
//一开始没考虑这个,数据容易溢出
else if (x > 1e5) return true;
}
return true;
}
int main()
{
scanf("%d", &n);
for (int i = 1; i <= n; i ++) scanf("%d",&h[i]);
int l = 0, r = N;
while(l < r)
{
int mid = (l+r)>> 1;
if(check(mid)) r = mid;
else l = mid + 1;
}
printf("%d\n", r);
return 0;
}
k倍区间
(空间换时间)
注意一下这块:
c[0] = 1;
for (int i = 1; i <= n; i ++)
{
res += c[a[i] % k];
c[a[i] % k]++;
}
完整如下
#include<iostream>
#include<cstdio>
using namespace std;
const int N = 100010;
int n, k;
int a[N], c[N];
int main()
{
scanf("%d %d", &n, &k);
for (int i = 1; i <= n; i ++)
{
scanf("%d", &a[i]);
a[i] += a[i-1];
}
int res = 0;
c[0] = 1;
for (int i = 1; i <= n; i ++)
{
res += c[a[i] % k];
c[a[i] % k]++;
}
printf("%d\n", res);
return 0;
}
实验
第三个代码已经跑通,不过还没跑完,估计要明天,希望有个好的结果
论文
今天看到一篇有代码的论文,于是就想想看这篇了
Enhancing Robust Representation in Adversarial Training: Alignment and Exclusion Criteria
摘要
对抗性训练 (AT) 已被证明是保护神经网络不被欺骗最有效的防御策略。然而,我们发现 AT 省略了学习鲁棒特征,导致对抗鲁棒性性能不佳。为了解决这个问题,我们强调了鲁棒表示的两个标准:(1)排除:示例的特征远离其他类; (2) 对齐:自然和对应的对抗样本的特征彼此接近。这些促使我们提出一个通用的 AT 框架,通过不对称的负对比度和反向注意力来获得稳健的表示。具体来说,我们设计了一个基于预测概率的非对称负对比度,以推开特征空间中不同类别的示例。此外,我们建议通过线性分类器的参数作为特征作为反向注意力来加权,以获得类感知特征并拉近同一类的特征。在三个基准数据集上的实证评估表明,我们的方法大大提高了 AT 的鲁棒性并实现最先进的性能。
2 criteria of robust representation:
- Exclusion:the feature of examples keeps away from that of other classes;
- Alignment: the feature of natural and corresponding adversarial examples is close to each other.
These motivate us to propose a generic framework of AT to gain robust representation, by the asymmetric negative contrast and reverse attention.ANCRA
深度学习–使用pytorch搭建AlexNet
花分类
不过今天代码写了还没跑
model.py
import torch.nn as nn
import torch
class AlexNet(nn.Module):
def __init__(self, num_classes=1000, init_weights=False):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2), # input[3, 224, 224] output[48, 55, 55]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[48, 27, 27]
nn.Conv2d(48, 128, kernel_size=5, padding=2), # output[128, 27, 27]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 13, 13]
nn.Conv2d(128, 192, kernel_size=3, padding=1), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=3, padding=1), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.Conv2d(192, 128, kernel_size=3, padding=1), # output[128, 13, 13]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 6, 6]
)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(128 * 6 * 6, 2048),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(2048, 2048),
nn.ReLU(inplace=True),
nn.Linear(2048, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, start_dim=1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
train.py
import os
import sys
import json
import torch
import torch.nn as nn
from torchvision import transforms, datasets, utils
import matplotlib.pyplot as plt
import numpy as np
import torch.optim as optim
from tqdm import tqdm
from model import AlexNet
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("using {} device.".format(device))
data_transform = {
"train":transforms.Compose([transforms.RandoResizedCrop(224),
transforms.RandomHoriziontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]),
"val":transforms.Compose([transforms.Resize(224,224),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])
}
data_root = os.path.abspath(os.path.join(os.getcwd(),"../"))
image_path = os.path.join(data_root,"deep-learning-for-image-processing-master","data_set","flower_data")
assert os.path.exists(image_path), "{} path does not exist.".format(image_path)
train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "train"),
transform=data_transform["train"])
train_num = len(train_dataset)
flower_list = train_dataset.class_to_idx
cla_dict = dice((val,key) for key,val in flower_list.items())
json_str = json.dumps(cla_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
batch_size = 32
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=True,
num_workers=nw)
validate_dataset = datasets.ImageFolder(root=os.path.join(image_path, "val"),
transform=data_transform["val"])
val_num = len(validate_dataset)
validate_loader = torch.utils.data.DataLoader(validate_dataset,
batch_size=4, shuffle=False,
num_workers=nw)
print("using {} images for training, {} images for validation.".format(train_num,
val_num))
net = AlexNet(num_classes=5, init_weights=True)
net.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr = 0.0002)
epochs = 10
save_path = './AlexNet.pth'
best_acc = 0.0
train_steps = len(train_loader)
for epoch in range(epochs):
net.train()
running_loss = 0.0
train_bar = tqdm(train_loader, file = sys.stdout)
for step, data in enumerate(train_bar):
images, labels = data
optimizer.zero_grad()
outputs = net(images.to(device))
loss = loss_function(outputs, labels.to(device))
loss.backward()
optimizer.step()
running_loss += loss.item()
train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
epochs,
loss)
net.eval()
acc = 0.0
with torch.no_grad():
val_bar = tqdm(validate_loader,file=sys.stdout)
for val_data in val_bar:
val_images, val_labels = val_data
outputs = net(val_images.to(device))
predict_y = torch.max(outputs, dim=1)[1]
acc += torch.eq(predict_y, val_labels.to(device)).sum().item()
val_accurate = acc/val_num
print('[epoch %d] train_loss:%.3f val_accuracy: %.3f' %
(epoch+1, running_loss/train_steps, val_accurate))
if val_accurate > best_acc:
best_acc = val_accurate
torch.save(net.state_dict(), save_path)
print('Finished Training')
if __name__ == '__main__':
main()