注:把韩寒博文目录第一页的所有文章下载下来
1.正则在线测试工具:http://tool.chinaz.com/regex/
getBlogData.py
import urllib
import re
import threading
import os
#创建用于保存的文件夹
os.mkdir('D:\Blog')
#get
def getBlog(url):
p = urllib.urlopen(url).read()
open(r'D:\Blog/'+url[-26:],'w+').write(p)
#创建Threading的子类,重写run方法
class downLoad(threading.Thread):
def __init__(self,url):
threading.Thread.__init__(self)
self.url = url
def run(self):
getBlog(self.url)
url = 'http://blog.sina.com.cn/s/articlelist_1191258123_0_1.html'
#获取url的页面数据
html = urllib.urlopen(url)
data = html.read()
#正则匹配文章链接,把重复的url过滤掉
reg = r'http://blog.sina.com.cn/s/blog.*.html'
text = re.compile(reg)
for x in text.findall(data):
#getBlog(x)
thread1 = downLoad(x)
thread1.start()