【Python3】爬虫

"""
爬虫前奏:
明确目的
找到数据对应的网页
分析网页的结构找到数据所在的标签位置

模拟HTTP请求,向服务器发送这个请求,获取到服务器返回的HTML,用正则提取需要数据

库:BeautifulSoup
框架:Scrapy
反爬虫机制、效率、存储、提取、ip封(代理ip库)
"""
import re
from urllib import request

# 断点调试,F5-启动程序,F10-单步执行,F5-到下个断点,F11-进入某个函数内部
class Spider():
    url = 'https://www.douyu.com/g_LOL'
    root_pattern = '<div class="video-info">[.]</div>' # 无法匹配出结果!
    root_pattern = '<div class="video-info">([\s\S]*?)</div>'# 
    name_pattern = '</i>([\s\S]*?)</span>'
    number_pattern = '<span class="video-number">([\s\S]*?)</span>'


    def __fetch_content(self):
        r = request.urlopen(self.url)
        htmls = r.read() # 得到的字节码bytes
        htmls = str(htmls,encoding='utf-8')
        return htmls
        
    def __analysis(self,htmls):
        root_html = re.findall(Spider.root_pattern,htmls)
        anchors = []
        for html in root_html:
            name = re.findall(Spider.name_pattern, html)
            number = re.findall(Spider.number_pattern, html)
            anchor = {'name':name,'number':number}
            anchors.append(anchor)
        return anchors

    def __refine(self,anchors):
        l = lambda anchor:{
            'name':anchor['name'][0].strip(),
            'number':anchor['number'][0]
            }
        return map(l,anchors)
    
    def __sort(self,anchors):

        anchors = sorted(anchors,key=self.__sort_seed,reverse=True)
        return anchors
    
    def __sort_seed(self,anchor):
        r = re.findall('\d*',anchor['number'])
        number = float(r[0])
        if '万' in anchor['number']:
            number *= 10000
        return number
    
    def __show(self,anchors):
        for rank in range(0,len(anchors)):
                print('rank'+str(rank+1)
                      +'    :   '+anchors[rank]['name']
                      +'    :   '+anchors[rank]['number'])

    def go(self):
        htmls = self.__fetch_content()
        anchors = self.__analysis(htmls)
        anchors = list(self.__refine(anchors))
        anchors = self.__sort(anchors)
        self.__show(anchors)
        
        

spider = Spider()
spider.go()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值