# 爬取小说 http://www.hengyan.com/dir/9495.aspx
from lxml.html import etree
import requests
import re
import os
import pdfkit
host = "http://www.hengyan.com"
url = "http://www.hengyan.com/dir/9495.aspx"
temp = ''
outPutDirName = 'D:/Book/'
config = pdfkit.configuration(wkhtmltopdf=r"D:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe")
if not os.path.exists(outPutDirName):
# 如果文件目录不存在则创建目录
os.makedirs(outPutDirName)
response = requests.get(url)
selector = etree.HTML(response.text)
for i in range(1, 43):
words = selector.xpath('//*[@id="left"]/div[3]/ul/li[' + str(i) + ']/a/text()')
hrefs = selector.xpath('//*[@id="left"]/div[3]/ul/li[' + str(i) + ']/a/@href')
address = host + ''.join(hrefs)
print(words, address)
html = requests.get(address)
content = etree.HTML(html.text)
block = content.xpath('//*[@class="contentitem"]/div[3]')
sub_str = re.sub(u"([^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a-\u3000])", "", str(''.join(words))) # 剔除非法字符
filename = outPutDirName + sub_str + '.txt'
for j in block:
aaa = j.xpath('./p/text()')
bbb = '\n'.join(aaa)
bbb.strip()
bbb.replace(' ', '')
# print(bbb)
file = open(filename , 'w')
file.write(bbb)
file.close()
temp = temp + '\n'.join(words) + '</br>' + bbb + '</br>'
temp.replace('\n', '</br>')
# 写成pdf
last_pdf = '<html><head><meta charset="UTF-8"></head>' \
'<body><div align="center"><p>%s</p></div></body></html>' %temp
pdfkit.from_string(last_pdf, "./民间烧尸怪谈.pdf", configuration=config)