main

m a i n main main


在这里插入图片描述


split_file:

在这里插入图片描述


extract_feature.py

# -*- coding: utf-8 -*-

import argparse
import os

import torch

from pyretri.config import get_defaults_cfg, setup_cfg
from pyretri.datasets import build_folder, build_loader
from pyretri.models import build_model
from pyretri.extract import build_extract_helper

from torchvision import models


def parse_args():
    parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
    parser.add_argument('--data_json', '-dj', default=None, type=str, help='json file for dataset to be extracted')
    parser.add_argument('--save_path', '-sp', default=None, type=str, help='save path for features')
    parser.add_argument('--config_file', '-cfg', default=None, metavar='FILE', type=str, help='path to config file')
    parser.add_argument('--save_interval', '-si', default=5000, type=int, help='number of features saved in one part file')
    args = parser.parse_args()
    return args


def main():

    # init args
    args = parse_args()
    assert args.data_json is not None, 'the dataset json must be provided!'
    assert args.save_path is not None, 'the save path must be provided!'
    assert args.config_file is not None, 'a config file must be provided!'

    # init and load retrieval pipeline settings
    cfg = get_defaults_cfg()
    cfg = setup_cfg(cfg, args.config_file, args.opts)

    # build dataset and dataloader
    dataset = build_folder(args.data_json, cfg.datasets)
    dataloader = build_loader(dataset, cfg.datasets)

    # build model
    model = build_model(cfg.model)

    # build helper and extract features
    extract_helper = build_extract_helper(model, cfg.extract)
    extract_helper.do_extract(dataloader, args.save_path, args.save_interval)


if __name__ == '__main__':
    main()

index.py

# -*- coding: utf-8 -*-

import argparse
import os
import pickle
import numpy as np


from pyretri.config import get_defaults_cfg, setup_cfg
from pyretri.index import build_index_helper, feature_loader
from pyretri.evaluate import build_evaluate_helper


def parse_args():
    parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
    parser.add_argument('--config_file', '-cfg', default=None, metavar='FILE', type=str, help='path to config file')
    args = parser.parse_args()
    return args


def main():

    # init args
    args = parse_args()
    assert args.config_file is not None, 'a config file must be provided!'

    # init and load retrieval pipeline settings
    cfg = get_defaults_cfg()
    cfg = setup_cfg(cfg, args.config_file, args.opts)

    # load features
    query_fea, query_info, _ = feature_loader.load(cfg.index.query_fea_dir, cfg.index.feature_names)
    gallery_fea, gallery_info, _ = feature_loader.load(cfg.index.gallery_fea_dir, cfg.index.feature_names)

    # build helper and index features
    index_helper = build_index_helper(cfg.index)
    index_result_info, query_fea, gallery_fea = index_helper.do_index(query_fea, query_info, gallery_fea)
    np.save('/data/nextcloud/dbc2017/files/jupyter/index_result_info.npy',np.array(index_result_info))
    # build helper and evaluate results
    # evaluate_helper = build_evaluate_helper(cfg.evaluate)
    # mAP, recall_at_k = evaluate_helper.do_eval(index_result_info, gallery_info)

    # show results
    # evaluate_helper.show_results(mAP, recall_at_k)
    print('Compute done!')


if __name__ == '__main__':
    main()

make_data_json.py

# -*- coding: utf-8 -*-

import argparse

from pyretri.extract import make_data_json


def parse_args():
    parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
    parser.add_argument('--dataset', '-d', default=None, type=str, help="path for the dataset that make the json file")
    parser.add_argument('--save_path', '-sp', default=None, type=str, help="save path for the json file")
    parser.add_argument('--type', '-t', default=None, type=str, help="mode of the dataset")
    parser.add_argument('--ground_truth', '-gt', default=None, type=str, help="ground truth of the dataset")

    args = parser.parse_args()

    return args


def main():

    # init args
    args = parse_args()
    assert args.dataset is not None, 'the data must be provided!'
    assert args.save_path is not None, 'the save path must be provided!'
    assert args.type is not None, 'the type must be provided!'

    # make data json
    make_data_json(args.dataset, args.save_path, args.type, args.ground_truth)

    print('make data json have done!')


if __name__ == '__main__':
    main()

single_index.py

# -*- coding: utf-8 -*-

import argparse
import os
from PIL import Image
import numpy as np

from pyretri.config import get_defaults_cfg, setup_cfg
from pyretri.datasets import build_transformers
from pyretri.models import build_model
from pyretri.extract import build_extract_helper
from pyretri.index import build_index_helper, feature_loader


def parse_args():
    parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
    parser.add_argument('--config_file', '-cfg', default=None, metavar='FILE', type=str, help='path to config file')
    args = parser.parse_args()
    return args


def main():

    # init args
    args = parse_args()
    assert args.config_file is not "", 'a config file must be provided!'
    assert os.path.exists(args.config_file), 'the config file must be existed!'

    # init and load retrieval pipeline settings
    cfg = get_defaults_cfg()
    cfg = setup_cfg(cfg, args.config_file, args.opts)

    # set path for single image
    path = '/data/caltech101/query/airplanes/image_0004.jpg'

    # build transformers
    transformers = build_transformers(cfg.datasets.transformers)

    # build model
    model = build_model(cfg.model)

    # read image and convert it to tensor
    img = Image.open(path).convert("RGB")
    img_tensor = transformers(img)

    # build helper and extract feature for single image
    extract_helper = build_extract_helper(model, cfg.extract)
    img_fea_info = extract_helper.do_single_extract(img_tensor)
    stacked_feature = list()
    for name in cfg.index.feature_names:
        assert name in img_fea_info[0], "invalid feature name: {} not in {}!".format(name, img_fea_info[0].keys())
        stacked_feature.append(img_fea_info[0][name].cpu())
    img_fea = np.concatenate(stacked_feature, axis=1)

    # load gallery features
    gallery_fea, gallery_info, _ = feature_loader.load(cfg.index.gallery_fea_dir, cfg.index.feature_names)

    # build helper and single index feature
    index_helper = build_index_helper(cfg.index)
    index_result_info, query_fea, gallery_fea = index_helper.do_index(img_fea, img_fea_info, gallery_fea)

    index_helper.save_topk_retrieved_images('retrieved_images/', index_result_info[0], 5, gallery_info)

    print('single index have done!')


if __name__ == '__main__':
    main()

split_dataset.py

# -*- coding: utf-8 -*-
import argparse
import os

from pyretri.extract.utils import split_dataset


def parse_args():
    parser = argparse.ArgumentParser(description='A tool box for deep learning-based image retrieval')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
    parser.add_argument('--dataset', '-d', default=None, type=str, help="path for the dataset.")
    parser.add_argument('--split_file', '-sf', default=None, type=str, help="name for the dataset.")

    args = parser.parse_args()

    return args


def main():

    # init args
    args = parse_args()
    assert args.dataset is not None, 'the dataset must be provided!'
    assert args.split_file is not None, 'the save path must be provided!'

    # split dataset
    split_dataset(args.dataset, args.split_file)

    print('split dataset have done!')


if __name__ == '__main__':
    main()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值