从网上寻找小说斗破苍穹,爬取并保存。
import requests
import time
import re
from lxml import etree
from bs4 import BeautifulSoup
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36 SE 2.X MetaSr 1.0'
}
path = 'D:\\doupo.txt' #保存的位置
f = open(path,'a+')
def get_info(url):
res = requests.get(url,headers = headers)
if res.status_code == 200: #爬取成功
contents = re.findall('<p>(.*?)</p>',res.content.decode('utf-8'),re.S) #在爬取的网页内寻找内容
for content in contents:
f.write(content+'\n') #将找到的内容写入你要保存的文件内
else:
pass
if __name__ == '__main__':
urls = ['http://www.doupoxs.com/doupocangqiong/{}.html'.format(str(i)) for i in range(2,1665)] #每一页的网址
f.write('\n\n' + "第1章"+ '\n\n')
get_info("http://www.doupoxs.com/doupocangqiong/1.html")
i = 2
for url in urls:
f.write('\n\n'+"第%d章" % (i) + '\n\n')
get_info(url)
time.sleep(1) #避免过快访问,间隔1s
print("第%d章下载"%(i))
i += 1
f.close()