python开发bs程序_Python开发简单爬虫

The Dormouse's story

Once upon a time there were three little sisters; and their names were

Elsie,

Lacie and

Tillie;

and they lived at the bottom of a well.

...

"""soup= BeautifulSoup(html_doc,'html.parser',from_encoding='utf-8')print '获取所有的链接'links= soup.find_all('a')for link inlinks:print link.name,link['href'],link.get_text()print '获取lacie的链接'link_node= soup.find('a',href='http://example.com/lacie')print link_node.name,link_node['href'],link_node.get_text()print '正则匹配'link_node= soup.find('a', href= re.compile(r"ill"))print link_node.name,link_node['href'],link_node.get_text()print '获取p段落文字'p_node= soup.find('p', class_="title")print p_node.name,p_node.get_text()

实战编写爬取百度百科页面:

目录结构:

763083-20160106142320340-420909875.png

注:mac osx下用alt+enter添加相应方法

(爬虫调度器)spider_main.py:

#coding=utf-8

from baike_spider importurl_manager,html_downloader,html_parser,html_outputerclassSpiderMain(object):def __init__(self):

self.urls= url_manager.UrlManager() #url管理器

self.downloader = html_downloader.HtmlDownloader() #下载器

self.parser = html_parser.HtmlParser() #解析器

self.outputer = html_outputer.HtmlOutputer() #输出器

defcraw(self, root_url):

count= 1 #判断当前爬取的是第几个url

self.urls.add_new_url(root_url)while self.urls.has_new_url(): #循环,爬取所有相关页面,判断异常情况

try:

new_url= self.urls.get_new_url() #取得url

print 'craw %d : %s' % (count, new_url) #打印当前是第几个url

html_cont = self.downloader.download(new_url) #下载页面数据

new_urls, new_data = self.parser.parse(new_url,html_cont) #进行页面解析得到新的url以及数据

self.urls.add_new_urls(new_urls)#添加新的url

self.outputer.collect_data(new_data) #收集数据

if count == 10: #此处10可以改为100甚至更多,代表循环次数

breakcount= count + 1

except:print 'craw failed'self.outputer.output_html()#利用outputer输出收集好的数据

if __name__=="__main__":

root_url= "http://baike.baidu.com/view/21087.htm"obj_spider= SpiderMain() #创建

obj_spider.craw(root_url) #craw方法启动爬虫

(url管理器)url_manager.py:

#coding=utf-8

classUrlManager(object):def __init__(self):

self.new_urls= set() #待爬取url

self.old_urls = set() #已爬取url

def add_new_url(self, url): #向管理器中添加一个新的url

if url isNone:return

if url not in self.new_urls and url not inself.old_urls:

self.new_urls.add(url)def add_new_urls(self, urls): #向管理器中添加新的更多的url

if urls is None or len(urls) ==0:return

for url inurls:

self.add_new_url(url)def has_new_url(self): #判断管理器是否有新的待爬取的url

return len(self.new_urls) !=0def get_new_url(self): #从管理器中获取一个新的待爬取的url

new_url =self.new_urls.pop()

self.old_urls.add(new_url)return new_url

(下载器)html_downloader.py:

importurllib2classHtmlDownloader(object):defdownload(self, url):if url isNone:returnNone

response=urllib2.urlopen(url)if response.getcode() != 200:returnNonereturn response.read()

(解析器)html_parser.py:

importreimporturlparsefrom bs4 importBeautifulSoupclassHtmlParser(object):defparse(self,page_url,html_cont):if page_url is None or html_cont isNone:returnsoup= BeautifulSoup(html_cont,'html.parser', from_encoding='utf-8')

new_urls=self._get_new_urls(page_url, soup)

new_data=self._get_new_data(page_url, soup)returnnew_urls, new_datadef_get_new_urls(self, page_url, soup):

new_urls=set()#/view/123.htm

links = soup.find_all('a', href=re.compile(r"/view/\d+\.htm"))for link inlinks:

new_url= link['href']

new_full_url=urlparse.urljoin(page_url, new_url)

new_urls.add(new_full_url)returnnew_urlsdef_get_new_data(self, page_url, soup):

res_data={}#url

res_data['url'] =page_url#

Python

title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1")

res_data['title'] =title_node.get_text()#

summary_node = soup.find('div',class_="lemma-summary")

res_data['summary'] =summary_node.get_text()return res_data

(数据输出)html_outputer.py:

#coding=utf-8

classHtmlOutputer(object):#初始化

def __init__(self):

self.datas=[]def collect_data(self, data): #收集数据

if data isNone:returnself.datas.append(data)def output_html(self): #输出数据

fout = open('output.html', 'w')

fout.write("")

fout.write("

")

fout.write("")

fout.write("")

fout.write("

")

fout.write("

for data inself.datas:

fout.write("

")

fout.write("

%s" % data['url'])

fout.write("

%s" % data['title'].encode('utf-8'))

fout.write("

%s" % data['summary'].encode('utf-8'))

fout.write("

")

fout.write("")

fout.write("

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值