目标:下载韩寒博客中的所有文章(文章列表有7页,共有315篇文章)
代码如下:
#<a title="" target="_blank" href="http://blog.sina.com.cn/s/blog_4701280b0102wrup.html">写给那个茶水妹的《乘风破浪》诞生…</a>
#coding:utf-8
import urllib.request as urllib
import time
url = ['']*350#url列表,存储350篇文章
#取博文的7页中所有的基地址
page = 1
link = 1
while page <= 7 :
con = str(urllib.urlopen('http://blog.sina.com.cn/s/articlelist_1191258123_0_'+str(page)+'.html').read(),'utf-8')
i = 0
title = con.find(r'<a title=')
href = con.find(r'href=',title)#从title后开始搜索href
html = con.find(r'.html',href)#从href后开始搜索.html
#捕获本页文章所有地址
while title !=-1 and href != -1 and html != -1 and i < 50:
url[i] = con[href + 6:html + 5]
print(link,' ',url[i])
title = con.find(r'<a title=',html)
href = con.find(r'href=',title)#从title后开始搜索href
html = con.find(r'.html',href)#从href后开始搜索.html
i = i + 1
link = link + 1
else :
print (page,'find end!')
page = page + 1
else :
print ('all find end!')
#下载所有文章
j = 0
while j < 350:
content = str(urllib.urlopen(url[j]).read(),'utf-8')
open(r'韩寒/'+url[j][-26:],'w+',1,'utf-8').write(content)
print('downloading',url[j])
j = j + 1
time.sleep(10)
else:
print('download article finished')