1.确定爬虫目的:去熊猫tv网站上爬一个游戏分类下面各个主播的人气排行
2.找到数据对应的网页,充分了解所爬网站的结构,确定爬取页面和内容,找到数据所在的标签位置(在谷歌浏览器按F12可出现htmls)
3.怎么爬
模拟HTTP请求,向服务器发送这个请求,获取到服务器返回给我们的HTML
用正则表达式提取我们要的数据(名字,人气)
1.找定位标签:尽量具有唯一性,尽量最接近要提取的数据,尽量选择可以闭合的标签
2.确定标签写正则表达式提取
3.写函数分析
'''
This is a module
'''
import re
from urllib import request
# 断点调试
class Spider():
'''
This is a class
'''
url = 'https://www.panda.tv/cate/lol?pdt=1.24.s1.3.4fibegt9f83'
root_pattern = '<div class="video-info">([\s\S]*?)</div>'
name_pattern = '</i>([\s\S]*?)</span>'
number_pattern = '<i class="ricon ricon-eye">.*?</i>(.*?)</span>'
def __fetch_content(self):
'''
This is a function
'''
# This is a HTTP request
r = request.urlopen(Spider.url)
htmls = r.read()
htmls = str(htmls,encoding="utf-8").replace('\n ','')
return htmls
def __analysis(self, htmls):
root_html = re.findall(Spider.root_pattern,htmls)
anchors= []
for html in root_html:
name = re.findall(Spider.name_pattern,html)
number = re.findall(Spider.number_pattern,html)
anchor = {'name':name,'number':number}
anchors.append(anchor) #在列表里添加元素
return anchors
#精练
def __refine(self,anchors):
l = lambda anchor:{
'name':anchor['name'][0].strip(),
'number':anchor['number'][0]
}#strip内置函数去掉\n和空格
return map(l,anchors)
def __sort(self,anchors):
anchors = sorted(anchors,key=self.__sort_seed,reverse=True)#reverse决定升序
return anchors
def __sort_seed(self,anchor):
r = re.findall('\d*',anchor['number'])
number = float(r[0])
if '万' in anchor['number']:
number *=10000
return number
def __show(self,anchors):
for rank in range(0,len(anchors)):
print('rank' + str(rank+1)
+':'+anchors[rank]['name']
+"------"+anchors[rank]['number'])
def go(self):
htmls=self.__fetch_content()
anchors = self.__analysis(htmls)
anchors = list(self.__refine(anchors))
anchors = self.__sort(anchors)
self.__show(anchors)
spider=Spider()
spider.go()