# -*- coding: utf-8 -*-
"""
Created on Thu Dec 6 09:35:58 2018
@author: M
"""
import re
import requests
import urllib.request
import os
import argparse
import socket
parser = argparse.ArgumentParser(description="test")
parser.add_argument('--keyword', type=str, default='resolution') # 传参匹配我们想要查找论文的关键字
args = parser.parse_args()
# get web context
r = requests.get('https://openaccess.thecvf.com/CVPR2018?day=2018-06-21')
data = r.text
# find all pdf links
link_list = re.findall(r"(?<=href=\").+?pdf(?=\">pdf)", data)
name_list = re.findall(r"(?<=2018_paper.html\">).+(?=</a>)", data)
socket.setdefaulttimeout(30)
cnt = 0
num = len(link_list)
# your local path to download pdf files
localDir = './CVPR2018/{}/'.format(args.keyword)
if not os.path.exists(localDir):
os.makedirs(localDir)
while cnt < num:
url = link_list[cnt]
# seperate file name from url links
file_name = name_list[cnt]
# to avoid some illegal punctuation in file name
file_name = file_name.replace(':', '_')
file_name = file_name.replace('\"', '_')
file_name = file_name.replace('?', '_')
file_name = file_name.replace('/', '_')
file_name = file_name.replace(' ', '_')
search_list = file_name.split('_')
search_pattern = re.compile(r'{}'.format(args.keyword), re.IGNORECASE)
download_next_paper = True
# print([True for i in search_list if search_pattern.findall(i)])
if ([True for i in search_list if search_pattern.findall(i)]):
download_next_paper = False
if download_next_paper:
cnt = cnt + 1
continue
file_path = localDir + file_name + '.pdf'
if os.path.exists(file_path):
print('File 【{}.pdf】 exists,skip downloading.'.format(file_name))
cnt = cnt + 1
continue
else:
# download pdf files
print('[' + str(cnt) + '/' + str(num) + "] Downloading -> " + file_path)
try:
urllib.request.urlretrieve('http://openaccess.thecvf.com/' + url, file_path)
except:
cnt = cnt + 1
continue
cnt = cnt + 1
print("all download finished")
用爬虫下载CVPR等论文
最新推荐文章于 2023-10-26 23:16:32 发布