这一篇实现的功能是:抓取匹配正则表达式的网址,并下载到本地
#!/usr/bin/env python
# -*- coding: GBK -*-
import urllib
import re
start_url = "http://www.baidu.com"
#获取网站内所有链接
def get_url(url):
html = urllib.urlopen(url)
pattern = re.compile("http://.*?\.com",re.I)
while True:
data = html.read()
if data:
urls = pattern.findall(data)
else:
break
html.close()
return urls
#下载链接网页
def download_url(url,filename):
html = urllib.urlopen(url)
f = open(filename,'w')
while True:
data = html.read()
if data:
f.write(data);
else:
break
html.close()
f.close()
return 1
#广度优先搜索
'''
start_url:初始网址
times:需要遍历网页的数量
'''
def broad_traverse(start_url,times):
urls = []
urls.append(start_url)
i = 0#爬虫抓取的网页数目
while True:
if i > times:
break
elif len(urls)>0:
url = urls.pop();
print url,len(urls)
download_url(url,str(i)+'htm')
i = i+1
if len(urls)<times:
url_list = get_url(url)
for url in url_list:
if urls.count(url)==0:
urls.append(url)
else:
break
return 1
def main():
broad_traverse(start_url,25)
main()