python 百度百科的爬虫实例

本实例用的python版本为3.6。

爬虫启动类 spider_main.py

from baike_spider import url_manager, html_downer, html_parser, html_outputer


class SpiderMain(object):
    def __init__(self):
        self.urls = url_manager.UrlManager()
        self.downloader = html_downer.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_outputer.HtmlOutputer()

    def craw(self, begin_url):
        count = 1
        self.urls.add_new_url(begin_url)
        while self.urls.has_new_url():
            try:
                if count > 100:
                    break
                new_url = self.urls.get_new_url()
                print("craw %d : %s" % (count, new_url))
                html_content = self.downloader.download(new_url)
                new_urls, new_data = self.parser.parse(new_url, html_content)
                self.urls.add_new_urls(new_urls)
                self.outputer.collect_data(new_data)
                count += 1
            except BaseException as e:
                print(e)
                print("craw fail")

        self.outputer.output_html()


if __name__ == "__main__":
    root_url = "https://baike.baidu.com/item/Python/407313"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)

url管理器 url_manager.py

class UrlManager(object):
    def __init__(self):
        self.new_urls = set()
        self.old_urls = set()

    def add_new_url(self, begin_url):
        if begin_url is None:
            return
        if begin_url in self.new_urls or begin_url in self.old_urls:
            return
        self.new_urls.add(begin_url)

    def has_new_url(self):
        return len(self.new_urls) != 0

    def get_new_url(self):
        new_url = self.new_urls.pop()
        self.old_urls.add(new_url)
        return new_url

    def add_new_urls(self, new_urls):
        if new_urls is None or len(new_urls) == 0:
            return
        for url in new_urls:
            self.add_new_url(url)

html下载器 html_downer.py

import ssl
from urllib import request


class HtmlDownloader(object):

    def download(self, new_url):
        ssl._create_default_https_context = ssl._create_unverified_context
        response = request.urlopen(new_url)
        print("请求返回码:%d" % response.getcode())
        if response.getcode() != 200:
            return None
        return response.read()

html解析器 html_parser.py

import re
from bs4 import BeautifulSoup
from urllib import parse


class HtmlParser(object):

    def parse(self, new_url, html_content):
        if html_content is None or new_url is None:
            return
        soup = BeautifulSoup(html_content, 'html.parser')
        new_urls = self._get_new_urls(new_url, soup)
        new_data = self._get_new_data(new_url, soup)
        return new_urls,new_data

    def _get_new_urls(self, new_url, soup):
        full_urls = set()
        links = soup.find_all("a", href=re.compile(r"/item/"))
        for link in links:
            url_href = link["href"]
            full_url_href = parse.urljoin(new_url, url_href)
            full_urls.add(full_url_href)
        return full_urls

    def _get_new_data(self, new_url, soup):
        res_data = {"url": new_url}
        # 获取title class="lemmaWgt-lemmaTitle-title"
        title = soup.find("dd", class_="lemmaWgt-lemmaTitle-title").find("h1").get_text()
        res_data["title"] = title
        # 获取摘要  class = "lemma-summary"
        summary = soup.find("div", class_="lemma-summary").get_text()
        res_data["summary"] = summary
        return res_data

html输出器 html_outputer.py

class HtmlOutputer(object):
    def __init__(self):
        self.datas = []

    def collect_data(self, new_data):
        if new_data is None:
            return
        self.datas.append(new_data)

    def output_html(self):
        fout = open("output.html", 'w')

        fout.write("<html>")
        fout.write("<head>")
        fout.write("<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">")
        fout.write("</head>")
        fout.write("<body>")
        fout.write("<table border = 1>")
        for data in self.datas:
            fout.write("<tr>")
            fout.write("<td>%s<td>" % data['url'])
            fout.write("<td>%s<td>" % data['title'])
            fout.write("<td>%s<td>" % data['summary'])
            fout.write("</tr>")
        fout.write("</table>")
        fout.write("</body>")
        fout.write("</html>")
        fout.close()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值