Python 爬取百度Python词条
1.main.py(运行)
# -*- coding: utf-8 -*-
from baiKe_spider import url_manager, html_downloader, html_parser,html_outputer
#初始化需要的对象
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager() #url管理器
self.downloader = html_downloader.HtmlDownloader() #下载器
self.parser = html_parser.HtmlParser() #html解析器
self.outputer = html_outputer.HtmlOutputer() #html输出器
#爬出的调度程序
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url);
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url() #获取新的url
print 'craw %d:%s' % (count,new_url)
html_cont = self.downloader.dowmload(new_url) #启动下载器下载页面
new_urls,new_data = self.parser.parse(new_url,html_cont) #解析器
self.urls.add_new_urls(new_urls)
self.outputer.cllect_data(new_data)
#获取1000个之后跳出循环