今天要从国外的网站上下载一个学术会议的几百篇pdf文献,具体网址为https://www.onepetro.org/conferences/SPE/17ADIP/all?start=0&rows=700。这个网站需要登录后手动一篇一篇的下载,非常耗时。于是用Python+selenium写了个小程序,自动下载保存这些pdf文件。开始在Firefox浏览器中试验,试了好多次都没有成功。Firefox老是打开pdf文件而不是将文件保存到本地。后来改为Chrome浏览器,一切都很顺利。详情见代码:
import time
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
import random
'''
Automatically download SPE papers from a conference
i.e. https://www.onepetro.org/conferences/SPE/17ADIP/all?start=0&rows=700
Author: Carl Wu, 吴文旷
'''
#The starting URL
start_url = "https://www.onepetro.org/conferences/SPE/17ADIP/all?start=0&rows=700"
#设置Chrome的选项
options = webdriver.ChromeOptions()
profile = {"plugins.plugins_list": [{"enabled": False, "name": "Chrome PDF Viewer"}],
"download.default_directory": "D:\\Carl\\dev\\test\\"}
options.add_experimental_option("prefs", profile)
browser = webdriver.Chrome(r"C:\Users\Carl\AppData\Local\Google\Chrome\Application\chromedriver.exe",
chrome_options=options)
# credentials
username = 'xxxxxxx'
password = 'xxxxxxx'
#Open the URL
browser.get(start_url) #
#首先等几秒加载首页,然后要点击一下Login/Register链接,才能看见用户名和密码输入框
time.sleep(6)
elem_register = browser.find_element_by_id('p13n-menu')
elem_register.click()
time.sleep(2)
#输入用户名
user_blank = browser.find_element_by_id('l-email')
user_blank.clear()
user_blank.send_keys(username)
#输入密码
password_blank = browser.find_element_by_id('l-password')
password_blank.clear()
password_blank.send_keys(password)
#在登陆页面上找到登陆(login)按钮并点击
#虽然找到该按钮,但是点击不起作用
elem_login = browser.find_element_by_xpath(
'//form[@id="sgk-login-form"]/fieldset/div[@class="form-actions"]/input[@class="btn btn-pri"]')
print(elem_login.get_attribute('value'))
print(elem_login.get_attribute('name'))
# 点击登录按钮
# webdriver.common.action_chains.ActionChains(browser).click(elem_login).perform()
time.sleep(1)
# 为了正常登陆,只好在密码空格上按下回车键模拟登陆
password_blank.send_keys(Keys.ENTER)
time.sleep(28)
pageSource = browser.page_source
#获得所有的SPE paper的链接,链接的text为Get PDF
all_papers_link = browser.find_elements_by_link_text("Get PDF")
#打印链接并下载pdf文档
for onePaperLink in all_papers_link:
url = onePaperLink.get_attribute('href')
print(url)
browser.get(onePaperLink.get_attribute('href'))
# a.click()
# wget.download(url,out='D:\\Carl\\dev\\test\\')
time.sleep(22 + random.randint(1, 5))
browser.quit()