保存faster-rcnn的检测结果

为了分析faster-Rcnn的测试结果,需要先将测试结果保存起来,效果如下:

(图片名 类别 bbox坐标)

代码如下:

#!/usr/bin/env python
 
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
 
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
 
import _init_paths
#import matplotlib
#matplotlib.use('Agg')
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
 
 
#自己的类别名称
CLASSES = ('__background__',
           'one cell missing', 'half cell missing','two cells missing','four cell missing')
 
NETS = {'vgg16': ('VGG16',
                  'VGG16_faster_rcnn_final.caffemodel'),
        'zf': ('ZF',
                  'ZF_faster_rcnn_final.caffemodel')}
 
 
def vis_detections(image_name, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return
 
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]
    if(class_name == '__background__'):
        fw = open('./result.txt','a')   #保存结果的文件,下同
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu5'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu10'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu15'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu20'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu30'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu40'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu50'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu60'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu70'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu80'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu90'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu100'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu110'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
    elif(class_name == 'xiansu120'):
        fw = open('./result.txt','a')
        fw.write(str(image_name)+' '+class_name+' '+str(int(bbox[0]))+' '+str(int(bbox[1]))+' '+str(int(bbox[2]))+' '+str(int(bbox[3]))+'\n')
        fw.close()
 
 
def demo(net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""
    # Load the demo image
    im_file = os.path.join(cfg.DATA_DIR, 'VOCdevkit2007','VOC2007','JPEGImages', image_name) #保存图片的路径
    im = cv2.imread(im_file)
    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im)
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])
 
    # Visualize detections for each class
    CONF_THRESH = 0.9
    NMS_THRESH = 0.05
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1 # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        vis_detections(image_name, cls, dets, thresh=CONF_THRESH)
 
def parse_args():
    """Parse input arguments."""
    parser = argparse.ArgumentParser(description='Faster R-CNN demo')
    parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
                        default=0, type=int)
    parser.add_argument('--cpu', dest='cpu_mode',
                        help='Use CPU mode (overrides --gpu)',
                        action='store_true')
    parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
                        choices=NETS.keys(), default='zf')
 
    args = parser.parse_args()
 
    return args
 
if __name__ == '__main__':
    cfg.TEST.HAS_RPN = True  # Use RPN for proposals
 
    args = parse_args()
 
    prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
                            'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
    caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
                              NETS[args.demo_net][1])
 
    if not os.path.isfile(caffemodel):
        raise IOError(('{:s} not found.\nDid you run ./data/script/'
                       'fetch_faster_rcnn_models.sh?').format(caffemodel))
 
    if args.cpu_mode:
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()
        caffe.set_device(args.gpu_id)
        cfg.GPU_ID = args.gpu_id
    net = caffe.Net(prototxt, caffemodel, caffe.TEST)
 
    print '\n\nLoaded network {:s}'.format(caffemodel)
 
    # Warmup on a dummy image
    im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    for i in xrange(2):
        _, _= im_detect(net, im)
    fr = open('./data/demo/test/test1.txt','r') #保存所有测试图片名称的文件,一行一个文件名
    for im_name in fr:
        im_name = im_name.strip('\n')
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print 'Demo for test/{}'.format(im_name)
        demo(net,im_name)
        
    plt.show()
    fr.close()
 

 

  • 1
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
在Keras中实现Faster R-CNN旋转目标检测算法可以按以下步骤进行: 1. 数据预处理:将训练数据转换为网络需要的格式,包括图片大小的调整、数据增强等等。 2. 构建模型:搭建Faster R-CNN网络模型,包括特征提取层、RPN层、ROI Pooling层、分类和回归层等。 3. 编译模型:设置模型的优化器、损失函数等参数。 4. 训练模型:对构建好的模型进行训练,并保存训练好的权重。 5. 模型评估:使用测试数据对训练好的模型进行评估,计算模型的精度、召回率等指标。 以下是一个基于Keras实现Faster R-CNN旋转目标检测算法的示例代码: ``` # 数据预处理 # TODO: 数据预处理代码 # 构建模型 input_shape = (None, None, 3) img_input = Input(shape=input_shape) shared_layers = nn.nn_base(img_input, trainable=True) # RPN网络 num_anchors = len(config.RPN_ANCHOR_RATIOS) * len(config.ANGLE_BINS) rpn = nn.rpn(shared_layers, num_anchors) # ROI Pooling层 roi_input = Input(shape=(config.TRAIN_ROIS_PER_IMAGE, 5)) roi_pooling = PyramidROIAlign([config.POOL_SIZE, config.POOL_SIZE], name="roi_align")([shared_layers, roi_input]) # 分类和回归层 x = TimeDistributed(Flatten(name='flatten'))(roi_pooling) x = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(x) x = TimeDistributed(Dropout(0.5))(x) x = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(x) x = TimeDistributed(Dropout(0.5))(x) # 分类和回归输出 cls_output = TimeDistributed(Dense(config.NUM_CLASSES, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(config.NUM_CLASSES))(x) angle_output = TimeDistributed(Dense(num_anchors * config.NUM_ANGLES, activation='linear', kernel_initializer='zero'), name='dense_angle_{}'.format(num_anchors * config.NUM_ANGLES))(x) bbox_output = TimeDistributed(Dense(num_anchors * 4, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(4))(x) # 编译模型 model = Model([img_input, roi_input], [cls_output, angle_output, bbox_output]) model.compile(optimizer=Adam(lr=config.LEARNING_RATE), loss=[losses.class_loss(), losses.angle_loss(), losses.rpn_regress_loss(config.NUM_ANCHORS)]) # 训练模型 # TODO: 训练模型代码 # 模型评估 # TODO: 模型评估代码 ``` 需要注意的是,在实现旋转目标检测时,需要对RoI Pooling和NMS等部分进行修改,以支持旋转矩形的处理。具体实现可以参考上述项目中的代码和论文《R2CNN: Rotational Region CNN for Orientation Robust Scene Text Detection》。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值