R-CNN
简介
利用Selective Search方法在输入图像上提取候选框(每张图片约2k个),之后将这些候选框resize到一定尺寸后,传入预训练好的模型进行特征提取,最后利用SVM进行图像分类, 并且,单独训练一个BBox回归器用于回归Bounding Box
性能
RCNN算法在VOC-07数据集上取得了非常显著的效果,平均精度由33.7%(DPM-V5, 传统检测的SOTA算法)提升到58.5%。相比于传统检测算法,基于深度学习的检测算法在精度上取得了质的飞跃。
核心代码复现
- 用 Selective Search 提取图像ROI
安装opencv-contrib-python方法,利用其中的cv2.ximgproc.segmentation.createSelectiveSearchSegmentation 方法完成选择性搜索
def cal_pro_region(self, img_path):
'''
计算每张图片的proposal region
Returns:
np.array: proposal region的坐标, 大小为num*4, 4列分别[xmin, ymin, xmax, ymax]
'''
# 选择性搜索类
try:
ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
except AttributeError:
raise Exception('需要安装opencv-contrib-python, 安装前请先删除原有的opencv-python')
ss.setBaseImage(cv2.imread(img_path))
ss.switchToSelectiveSearchFast()
rects = ss.process()
rects[:, 2] += rects[:, 0]
rects[:, 3] += rects[:, 1]
return rects
- 用预训练好的模型在提取的候选框图像上微调
def train(train_dataloader,val_dataloader,net,epochs,lr,loss_f,device,mode='classify'):
os.makedirs('./model',exist_ok=True)
net = net.to(device)
# optimizer = optim.Adam(net.parameters(),lr=lr)
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
train_loss,train_acc,val_loss,val_acc = [], [], [], []
print(f'=====mode={mode}, 开始训练...======')
Best_Loss,Best_Acc = 10000000,0
for i in range(epochs):
# train
net.train()
for x,y in tqdm(train_dataloader):
optimizer.zero_grad()
x, y = x.to(device), y.to(device)
output = net(x)
loss = loss_f(output, y)
loss.backward()
optimizer.step()
train_loss.append(loss.item())
if mode == 'classify':
pred = torch.argmax(output,dim=1)
acc = sum((pred == y).data.cpu().numpy())/len(y)
train_acc.append(acc)
- 训练一个BBox回归器
def train(net,epochs,lr,loss_f,device,transformer):
class BB_Dataset(Dataset):
def __init__(self,ss_dataset,gt_dataset):
self.ss = pd.read_csv(ss_dataset,header=None)
self.gt = pd.read_csv(gt_dataset)
self.transformer = transformer
self.net = net
self.device = device
def __getitem__(self, item):
img_path, *ss_box = self.ss.iloc[item, :]
index = img_path.split('/')[-1].split('_')[0] + '.png'
gt_box = self.gt[self.gt.img_name == index].values[0][2:].tolist()
label = torch.tensor(gt_box,dtype=torch.float32) - torch.tensor(ss_box,dtype=torch.float32)
# img = Image.open(img_path)
# img = self.transformer(img).unsqueeze(0).to(device)
with open(img_path, 'rb') as f:
img = Image.open(f).convert('RGB')
img = self.transformer(img).to(device).unsqueeze(0)
return net.features(img).squeeze(0), label
#V out = net.features(img).squeeze(0)
# return out, label
def __len__(self):
return len(self.ss)
ss_train_dataset = './data/ss_train_loc.csv'
gt_train_dataset = './data/banana-detection/bananas_train/label.csv'
ss_val_dataset = './data/ss_val_loc.csv'
gt_val_dataset = './data/banana-detection/bananas_val/label.csv'
train_dataset = BB_Dataset(ss_train_dataset,gt_train_dataset)
val_dataset = BB_Dataset(ss_val_dataset,gt_val_dataset)
train_dataloader = DataLoader(train_dataset,batch_size=128,shuffle=True)
val_dataloader = DataLoader(val_dataset,batch_size=128,shuffle=False)
regression = nn.Sequential(nn.AdaptiveAvgPool2d((6,6)),nn.Flatten(),nn.Linear(256*6*6,4))
nn.init.xavier_normal_(regression[-1].weight)
test_utils.train(train_dataloader,val_dataloader,regression,
epochs,lr,loss_f,device,mode='regression')