import requests
import os
from bs4 import BeautifulSoup
from lxml import etree
import time
import pymysql
from selenium import webdriver
import sys
sys.setrecursionlimit(100000)
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36',
}
res = 'https://m.qidian.com'
def pub(u, pa):
cont = requests.get(u, header)
home = etree.HTML(cont.text.encode('utf8'))
ty = home.xpath(pa)
return ty
// qd/1/2/3循环 获取链接
def qd(res):
ty = pub(res, '//nav[@class="home-nav"]/a/@href')
for i in ty:
qd1(res + i)
return
def qd1(url):
ty = pub(url, '//div[@class="sort-li-detail"]/a/@href')
for i in ty:
qd2(res + i)
return
def qd2(url):
ty = pub(url, '//*[@id="books-"]/li/a/@href')
for i in ty:
qd3(res + i)
return
def qd3(url):
ty = pub(url, '//*[@id="btnReadBook"]/@href')
for i in ty:
qd4(res + i)
return
// 模拟点击开始
driver = webdriver.Chrome(
r"D:\Anaconda3\pkgs\selenium-3.141.0-py37he774522_0\Lib\site-packages\selenium\webdriver\chrome\chromedriver.exe")
def qd4(url):
ty = pub(url, '//*[@id="btnLoadNextChapter"]')
if ty:
driver.get(url)
cli(c=1)
def cli(c):
path = driver.find_element_by_xpath("//*[@id='btnLoadNextChapter']")
home = etree.HTML(driver.page_source)
tit = home.xpath('//*[@id="chapterContent"]/section[{}]/h3/text()'.format(c))
jz = home.xpath('//*[@id="chapterContent"]/section[{}]/p/text()'.format(c))
name = home.xpath('//*[@id="readCover"]/div[1]/div[1]/h2/text()')
title = tit + jz
//循环存入txt
for i in title:
with open('{}.txt'.format(name[0]), 'a') as f:
f.write(i + '\n')
if path:
driver.execute_script("arguments[0].click();", path)
time.sleep(2)
cli(c+1)
if __name__ == '__main__':
qd(res)
python xpath匹配 + selenium模拟点击 保存到txt
最新推荐文章于 2024-04-19 16:07:14 发布