利用selenium实现中国裁判文书网自动登录批量下载功能
本代码只用于学习,请遵守相关法律法规,请勿售卖未经授权的数据。
爬虫者请牢记。
直接上代码了,挺简单的。
# -*- coding:utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
import time
# 初始化selenium
option = webdriver.ChromeOptions()
option.add_argument('--start-maximized')
option.add_experimental_option('excludeSwitches', ['enable-automation'])
prefs = {'profile.default_content_settings.popups': 0, 'download.default_directory': 'D:\\裁判文书网\\',"profile.default_content_setting_values.automatic_downloads":1}
option.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome('chromedriver_win32/chromedriver.exe',options=option)
driver.maximize_window()
driver.set_page_load_timeout(30)
url = 'https://wenshu.court.gov.cn/website/wenshu/181029CR4M5A62CH/index.html?'
driver.get(url)
# —————— 登录 ————————
# 进入登录页面
driver.find_element_by_xpath('//*[@id="loginLi"]/a').click()
text = driver.page_source
time.sleep(10) # 等待页面渲染
# 自动登录
# 进入iframe框
iframe = driver.find_elements_by_tag_name('iframe')[0]
driver.switch_to.frame(iframe)
username = driver.find_element_by_xpath('/html/body/app-root/div/app-login/div/div/form/div/div[1]/app-mobile-input/div/input')
username.send_keys('账号')
time.sleep(3)
username = driver.find_element_by_xpath('/html/body/app-root/div/app-login/div/div/form/div/div[2]/input')
username.send_keys('密码')
time.sleep(2)
driver.find_element_by_xpath('/html/body/app-root/div/app-login/div/div/div[2]/button').click()
time.sleep(3)
# —————— 登录成功 ————————
driver.find_element_by_xpath('//*[@id="_view_1540966819000"]/div/ul/li[2]/a').click()
time.sleep(10)
# testHtml(driver.page_source)
_lastWindow = driver.window_handles[-1]
driver.switch_to.window(_lastWindow)
s1 = Select(driver.find_element_by_xpath('//*[@id="_view_1545184311000"]/div[8]/div/select'))
s1.select_by_visible_text('15')
page = 1
while page < 4:
time.sleep(1)
# 点击批量下载
driver.find_element_by_xpath('//*[@id="AllSelect"]').send_keys(Keys.SPACE)
time.sleep(3)
driver.find_element_by_xpath('//*[@id="_view_1545184311000"]/div[2]/div[4]/a[3]').click()
time.sleep(3)
# 下一页
driver.find_element_by_xpath('//*[@id="_view_1545184311000"]/div[18]/a[8]').click()
page += 1
driver.quit()
- 2021.02.02更
关注公众号回复【文书网爬虫】,即可下载最新的文书网爬虫。