虎牙
from urllib import request
import re
class Pachong():
# 虎牙
url ='https://www.huya.com/g/2168'
root =' <span class="txt">[\s\S]*?</li>'
name_patter ='<i class="nick" title="(.*)">.*</i>'
number_patter ='<i class="js-num">(.*)</i>'
# 获取页面信息(html)
def __request_html(self):
r = request.urlopen(Pachong.url)
htmls = r.read()
htmls = str(htmls,encoding='utf-8')
# print(htmls)
return htmls
# 处理html,获取想要的数据
def __request_name(self,htmls):
root_html = re.findall(Pachong.root,htmls)
ans = []
for html in root_html:
name = re.findall(Pachong.name_patter,html)
number = re.findall(Pachong.number_patter,html)
an = {'name':name,'number':number}
ans.append(an)
# print(root_html[0])
# print(ans)
return ans
def __refine(self,anchors):
l = lambda anchor:{
'name':anchor['name'][0],
'number':anchor['number'][0]
}
return map(l,anchors)
def __sort(self,anchors):
anchors = sorted(anchors,key=self.__sort_seed,reverse=True)
return anchors
def __sort_seed(self,anchor):
r = re.findall('\d*', anchor['number'])
number = float(r[0])
if '万' in anchor['number']:
number *= 10000
return number
def __show(self,anchors):
for rank in range(0,len(anchors)):
print('主播排名第'+ str(rank+1)
+": " +anchors[rank]['name']
+' ' +anchors[rank]['number'])
def go(self):
htmls = self.__request_html()
anchors = self.__request_name(htmls)
anchors = self.__refine(anchors)
anchors = self.__sort(anchors)
self.__show(anchors)
pachong = Pachong()
pachong.go()
斗鱼
from urllib import request
from io import BytesIO
import gzip
import re
# 断点调试
class Spider():
# 私有方法 读取斗鱼网页的HTML页面信息
url = 'https://www.douyu.com/g_yz'
# ?是解决贪婪问题,[\s\S]*正则表达式
# .DyListCover-info
root_pattern = '<div class="DyListCover-info">([\s\S]*?)</div>'
# DyListCover - userName
# name_pattern = '<div class="DyListCover-userName is-template">([\s\S]*?)</div>'
name_pattern = '<div class="DyListCover-userName is-template">(.*)'
# .DyListCover - hotIcon
number_pattern = '<span class="DyListCover-hot is-template"><svg><use xlink:href="#icon-hot_8a57f0b"></use></svg>([\s\S]*?)</span>'
def __fetch_content(self):
r = request.urlopen(Spider.url)
# htmls获取的都是字节码bytes
htmls = r.read()
# 首先我们观察第一个print输出的字节码可以看到它是以"b’\x1f\x8b\x08"开头的 ,说明它是gzip压缩过的数据,这也是报错的原因,所以我们需要对我们接收的字节码进行一个解码操作。
buff = BytesIO(htmls)
f =gzip.GzipFile(fileobj=buff)
htmls = f.read().decode('utf-8')
return htmls
def __analysis(self,htmls):
# 获取大标签
root_html = re.findall(Spider.root_pattern,htmls)
anchors = []
for i in range(len(root_html)):
if i%2 == 1:
html = root_html[i]
# 获取小标签
name = re.findall(Spider.name_pattern,html)
number = re.findall(Spider.number_pattern,html)
anchor ={'name':name,'number':number}
anchors.append(anchor)
return anchors
def __refine(self,anchors):
l = lambda anchor:{
'name':anchor['name'][0].strip(),
'number':anchor['number'][0]
}
return map(l,anchors)
def __sort(self,anchors):
anchors = sorted(anchors,key = self.__sort_seed,reverse=True)
return anchors
def __sort_seed(self,anchor):
r = re.findall('\d*',anchor['number'])
number = float(r[0])
if '万' in anchor['number']:
number *=10000
return number
def __show(self,anchors):
for rank in range(0,len(anchors)):
print('主播排名第'+str(rank +1)
+ ': ' + anchors[rank]['name']
+' ' + anchors[rank]['number'])
def go(self):
htmls = self.__fetch_content()
anchors = self.__analysis(htmls)
anchors = list(self.__refine(anchors))
anchors = self.__sort(anchors)
self.__show(anchors)
spider = Spider()
spider.go()