家里的网不好,到晚上的时候用手机看小说就一直在缓冲,打不开页面,就在网上搜了免费的小说网站,爬取小说的全部内容,保存为.docx,晚上就能用WPS看了
#encoding:utf8
'''
Created on 2017年3月9日
@author: susu
'''
#导入各种包
import urllib
import os
import requests
from bs4 import BeautifulSoup
import urllib2
import re
import urlparse
import time
import codecs
#斐波契那函数和迭代器产生间隔为10的两端数字 (0,10)(10,20)(20,30)......
#10,20,30都可以,10个章节一组下载的比较快,自我感觉
def fib(n):
a,b=0,10
while a
yield a,b
a,b=b,b+10
#获取小说章节的全部url和title
def requst_content(url,header):
request=urllib2.Request(url,None,headers=header)
response=urllib2.urlopen(request)
data=response.read()
content=[]
soup=BeautifulSoup(data,'lxml')
a=soup.find_all('a',href=re.compile("(.shtml)$"))#获取页面url
for s in a:
full_url=urlparse.urljoin(url,s['href'])#拼接完整url
url_dit={}
url_dit['url']=full_url
url_dit['title']=s.get_text()
content.append(url_dit)
return content
#下载章节内容并保存在指定文件中
def dowmload_doc(content,leng,name):
i=0
with codecs.open(name,'w+','utf-8') as f:
d=time.time()
print "开始任务",d
for star,end in fib(leng):
for a in content[star:end]:
i=i+1#计数章节
b=time.time()
f.write(a['title'])
r = requests.get(a['url'])
encode_content = r.content.decode('gb2312', 'replace').encode('utf-8', 'replace')
soup=BeautifulSoup(encode_content,'html.parser')
txt=soup.find('br')
for br in txt:
for line in br.get_text():
f.write(line)
# c=time.time()
# print c-b
e=time.time()#下载一本书用的时间
print "ok",i,"章,一共用了",e-d,"s" #提示任务完成,查看章节和时间
f.close()
# for s in content:
# print s['title']
# driver=webdriver.PhantomJS(executable_path="D:\\phantomjs.exe")
# driver.get(s['url'])
# driver.implicitly_wait(5)
# page=driver.page_source
# soup=BeautifulSoup(page,'lxml')
# txt=soup.find_all('div', id="content" )
# print txt
if __name__ == '__main__':
header={
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate, sdch',
'Accept-Language':'zh-CN,zh;q=0.8',
'Cache-Control':'no-cache',
'Connection':'keep-alive',
'Pragma':'no-cache',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'
}
'name':u"替嫁娇妻.docx"},
'name':u"花心总裁.docx"},
'name':u"冷酷总裁.docx"}
]
for task in urls:
content=requst_content(task['url'],header=header)
print len(content)#输出总章节数,跟后面完成下载的章节数对比
leng=len(content)+30
#下载是有中断或者延时造成最后16章的缺失,加30保证所有章节被下载
dowmload_doc(content,leng,task['name'])