"""
爬虫前奏:
明确目的
找到数据对应的网页
分析网页的结构找到数据所在的标签位置
模拟HTTP请求,向服务器发送这个请求,获取到服务器返回的HTML,用正则提取需要数据
库:BeautifulSoup
框架:Scrapy
反爬虫机制、效率、存储、提取、ip封(代理ip库)
"""
import re
from urllib import request
# 断点调试,F5-启动程序,F10-单步执行,F5-到下个断点,F11-进入某个函数内部
class Spider():
url = 'https://www.douyu.com/g_LOL'
root_pattern = '<div class="video-info">[.]</div>' # 无法匹配出结果!
root_pattern = '<div class="video-info">([\s\S]*?)</div>'#
name_pattern = '</i>([\s\S]*?)</span>'
number_pattern = '<span class="video-number">([\s\S]*?)</span>'
def __fetch_content(self):
r = request.urlopen(self.url)
htmls = r.read() # 得到的字节码bytes
htmls = str(htmls,encoding='utf-8')
return htmls
def __analysis(self,htmls):
root_html = re.findall(Spider.root_pattern,htmls)
anchors = []
for html in root_html:
name = re.findall(Spider.name_pattern, html)
number = re.findall(Spider.number_pattern, html)
anchor = {'name':name,'number':number}
anchors.append(anchor)
return anchors
def __refine(self,anchors):
l = lambda anchor:{
'name':anchor['name'][0].strip(),
'number':anchor['number'][0]
}
return map(l,anchors)
def __sort(self,anchors):
anchors = sorted(anchors,key=self.__sort_seed,reverse=True)
return anchors
def __sort_seed(self,anchor):
r = re.findall('\d*',anchor['number'])
number = float(r[0])
if '万' in anchor['number']:
number *= 10000
return number
def __show(self,anchors):
for rank in range(0,len(anchors)):
print('rank'+str(rank+1)
+' : '+anchors[rank]['name']
+' : '+anchors[rank]['number'])
def go(self):
htmls = self.__fetch_content()
anchors = self.__analysis(htmls)
anchors = list(self.__refine(anchors))
anchors = self.__sort(anchors)
self.__show(anchors)
spider = Spider()
spider.go()
【Python3】爬虫
于 2023-05-09 10:41:13 首次发布