爬虫虎牙斗鱼网站主播人气和姓名

虎牙

from urllib import request
import re

class Pachong():
    # 虎牙
    url ='https://www.huya.com/g/2168'
    root =' <span class="txt">[\s\S]*?</li>'
    name_patter ='<i class="nick" title="(.*)">.*</i>'
    number_patter ='<i class="js-num">(.*)</i>'
    # 获取页面信息(html)
    def __request_html(self):
        r = request.urlopen(Pachong.url)
        htmls = r.read()
        htmls = str(htmls,encoding='utf-8')
        # print(htmls)
        return htmls
    # 处理html,获取想要的数据
    def __request_name(self,htmls):
        root_html = re.findall(Pachong.root,htmls)
        ans = []
        for html in root_html:
            name = re.findall(Pachong.name_patter,html)
            number = re.findall(Pachong.number_patter,html)
            an = {'name':name,'number':number}
            ans.append(an)
        # print(root_html[0])
        # print(ans)
        return ans

    def __refine(self,anchors):
        l = lambda anchor:{
            'name':anchor['name'][0],
            'number':anchor['number'][0]
            }
        return map(l,anchors)

    def __sort(self,anchors):
        anchors = sorted(anchors,key=self.__sort_seed,reverse=True)
        return anchors

    def __sort_seed(self,anchor):
        r = re.findall('\d*', anchor['number'])
        number = float(r[0])
        if '万' in anchor['number']:
            number *= 10000
        return number

    def __show(self,anchors):
        for rank in range(0,len(anchors)):
            print('主播排名第'+ str(rank+1)
                  +": " +anchors[rank]['name']
                  +'  ' +anchors[rank]['number'])

    def go(self):
        htmls = self.__request_html()
        anchors = self.__request_name(htmls)
        anchors = self.__refine(anchors)
        anchors = self.__sort(anchors)
        self.__show(anchors)

pachong = Pachong()
pachong.go()


斗鱼

from urllib import request
from io import BytesIO
import gzip
import re
# 断点调试
class Spider():
    # 私有方法  读取斗鱼网页的HTML页面信息
    url = 'https://www.douyu.com/g_yz'
    # ?是解决贪婪问题,[\s\S]*正则表达式
    # .DyListCover-info
    root_pattern = '<div class="DyListCover-info">([\s\S]*?)</div>'
    # DyListCover - userName
    # name_pattern = '<div class="DyListCover-userName is-template">([\s\S]*?)</div>'
    name_pattern = '<div class="DyListCover-userName is-template">(.*)'
    # .DyListCover - hotIcon
    number_pattern = '<span class="DyListCover-hot is-template"><svg><use xlink:href="#icon-hot_8a57f0b"></use></svg>([\s\S]*?)</span>'

    def __fetch_content(self):
        r = request.urlopen(Spider.url)
        # htmls获取的都是字节码bytes
        htmls = r.read()
        # 首先我们观察第一个print输出的字节码可以看到它是以"b’\x1f\x8b\x08"开头的 ,说明它是gzip压缩过的数据,这也是报错的原因,所以我们需要对我们接收的字节码进行一个解码操作。
        buff = BytesIO(htmls)
        f =gzip.GzipFile(fileobj=buff)
        htmls = f.read().decode('utf-8')
        return htmls

    def __analysis(self,htmls):
        # 获取大标签
        root_html = re.findall(Spider.root_pattern,htmls)
        anchors = []
        for i in range(len(root_html)):
            if i%2 == 1:
                html = root_html[i]
                # 获取小标签
                name = re.findall(Spider.name_pattern,html)
                number = re.findall(Spider.number_pattern,html)
                anchor ={'name':name,'number':number}
                anchors.append(anchor)
        return anchors

    def __refine(self,anchors):
        l = lambda anchor:{
            'name':anchor['name'][0].strip(),
            'number':anchor['number'][0]
            }
        return map(l,anchors)

    def __sort(self,anchors):
        anchors = sorted(anchors,key = self.__sort_seed,reverse=True)
        return anchors

    def __sort_seed(self,anchor):
        r = re.findall('\d*',anchor['number'])
        number = float(r[0])
        if '万' in anchor['number']:
            number *=10000
        return number

    def __show(self,anchors):
        for rank in range(0,len(anchors)):
            print('主播排名第'+str(rank +1)
                  + ': ' + anchors[rank]['name']
                  +'  ' + anchors[rank]['number'])

    def go(self):
        htmls = self.__fetch_content()
        anchors = self.__analysis(htmls)
        anchors = list(self.__refine(anchors))
        anchors = self.__sort(anchors)
        self.__show(anchors)

spider = Spider()
spider.go()
  • 3
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值