这是一篇课程总结。
课程来源: 慕课网
需要爬网页做成pdf方便打印。
Module 分成五个部分:网页下载器,网页解析器,网页信息的输出,,URL管理,主调度程序。
Python 3.5+,需要用到urllib2 re urlparse bs4 四个库 。
主调度程序:
from baike_spider import url_manager, html_downloader, html_parser, html_outputer
class SpiderMain(object):
def __init__(self):
self.urls=url_manager.UrlManager()
self.downloader=html_downloader.HtmlDownloader()
self.parser=html_parser.HtmlParser()
self.outputer=html_outputer.HtmlOuputer()
#爬虫的调度程序
def craw(self, root_url):
count=1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url=self.urls.get_new_url()
print('craw %d:%s'%(count,new_url))
html_cont=self.downloader.download(new_url)
new_urls,new_data=self.parser.parse(new_url,html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
except Exception as err:
print(err.__str__())
print('craw failed')
if count==1000:
break
count+=1
self.outputer.output_html()
if __name__ == '__main__':
#要爬取得URL
root_url='https://baike.baidu.com/item/Google'
obj_spider=SpiderMain()
obj_spider.craw(root_url)
class UrlManager(object):
def __init__(self):
self.new_urls=set()
self.old_urls=set()
def add_new_url(self, url):
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
return
def add_new_urls(self, urls):
if urls is None or len(urls)==0:
return
for url in urls:
self.add_new_url(url)
return
def has_new_url(self):
return len(self.new_urls)!=0
def get_new_url(self):
new_url=self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
网页下载:
from urllib import request
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response=request.urlopen(url)
if response.getcode()!=200:
return None
return response.read()
网页解析:
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
class HtmlParser(object):
def _get_new_urls(self, page_url, soup):
new_urls=set()
links=soup.find_all('a',href=re.compile(r'/item/'))
for link in links:
new_url=link['href']
new_full_url=urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
res_data={}
#url
res_data['url']=page_url
#<dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1>
title_node=soup.find('dd',class_='lemmaWgt-lemmaTitle-title')
res_data['title']=title_node.get_text()
#<div class="lemma-summary" label-module="lemmaSummary">
summary_node=soup.find('div',class_='lemma-summary')
res_data['summary']=summary_node.get_text()
return res_data
def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
soup=BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8')
new_urls=self._get_new_urls(page_url,soup)
new_data=self._get_new_data(page_url,soup)
return new_urls,new_data
网页输出:
class HtmlOuputer(object):
def __init__(self):
self.datas=[]
def collect_data(self, data):
if data is None:
return
self.datas.append(data)
def output_html(self):
fout=open('output.html','w',encoding='utf-8')
fout.write('<html>')
fout.write("<head><meta http-equiv='content-type' content='text/html;charset=utf-8'></head>")
fout.write('<table>')
for data in self.datas:
fout.write('<tr>')
fout.write('<td>%s</td>'%data['url'])
fout.write('<td>%s</td>'%data['title'])
fout.write('<td>%s</td>' % data['summary'])
爬取结果:
补充:1.如果只有一个结果,一般是URL管理器模块的问题,请检查一下
2.结果中出现的编辑锁定是样式选择的问题,没有修改过来。