[Python爬虫] 3-数据解析(lxml/bs4/正则)

# I.Xpath语法和lxml模块
# 1)Xpath语法
# 1.概念:XPath是一门在XML/HTML文档中查找信息的语言
# 2.工具:Chrome的XPath Helper和Firefox的XPath Checker
# 3.语法:使用//获取整个页面当中的元素,然后写标签名,然后写谓词进行提取,例://div[@class="abc"]
# 4.几个注意点:
    # i.//子孙节点,/子节点,@属性
    # ii.contains:某个属性中出现了多个值,可以使用contains函数,例://div[contains(@class,"job_detail")]
    # iii.谓词的第一个下标为1
    
# 2)lxml模块
# 1.概念:lxml是一个基于C语言的XML/HTML的解析器,用于解析和提取XML/HTML数据
# 2.编码
# 2-1.解析html字符串,使用lxml.etree.HTML进行解析(str-> lxml.etree._Element-> bytes)
from lxml import etree
text = """
<!-- hello.html -->
<div>
    <ul>
         <li class="item-0"><a href="link1.html">first item</a></li>
         <li class="item-1"><a href="link2.html">second item</a></li>
         <li class="item-inactive"><a href="link3.html"><span class="bold">third item</span></a></li>
         <li class="item-1"><a href="link4.html">fourth item</a></li>
         <li class="item-0"><a href="link5.html">fifth item</a></li>
     </ul>
 </div>
"""
htmlElement = etree.HTML(text)  # 利用etree.HTML,将字符串解析为HTML文档
result = etree.tostring(htmlElement,encoding="utf-8")  # 按字符串序列化HTML文档
print(result.decode("utf-8"))
# 2-2.解析html文件,使用lxml.etree.parse进行解析
htmlElement = etree.parse("hello.html")
result = etree.tostring(htmlElement,encoding="utf-8")
print(result.decode("utf-8"))
# !!!非常重要!!!这个函数默认使用XML解析器,所以如果碰到一些不规范的html代码时就会解析错误,这时候需要自己指定一个HTML解析器
# parser = etree.HTMLParser(encoding="utf-8")
# htmlElement = etree.parse("tencent.html",parser=parser)
# result = etree.tostring(htmlElement,encoding="utf-8")
# print(result.decode("utf-8"))

# 3)lxml结合xpath的使用
# tencent.html见附录1
from lxml import etree
parser = etree.HTMLParser(encoding="utf-8")
html = etree.parse("tencent.html",parser=parser)
# print(html)
# 1.获取所有的tr标签
# !!!重要!!!xpath函数返回的是一个列表,对于列表[]而言,想要取单个元素可以在列表后面加[]这样索引的方式选取
trs = html.xpath("//tr")
# for tr in trs:
    # print(etree.tostring(tr,encoding="utf-8").decode("utf-8"))
    # break
# 2.获取第2个tr标签
tr = html.xpath("//tr[2]")[0]
# print(etree.tostring(tr,encoding="utf-8").decode("utf-8"))
# 3.获取所有class等于even类的标签
trs = html.xpath("//tr[@class='even']")
# for tr in trs:
#     print(etree.tostring(tr,encoding="utf-8").decode("utf-8"))
# 4.获取所有a target的href属性
# a标签位置(第一个td下有一个子标签a)href记录了部分url,可补充成完整的url
list = html.xpath("//td/a/@href") # 获取所有的a标签
# 注意不是html.xpath("//a[@href]"),这个表示所有包含href的a标签,而不是需求中的所有a标签的href
positions = []
for i in list:
    url = "https://hr.tencent.com/" + i
    # print(url)
# 5.获取所有的职位信息(纯文本)
# 分析:所有的信息均在tr标签下,且要排除第一个(抬头)和最后一个(其他不需要的信息)
trs = html.xpath("//tr[position()>1 and position()<11]")
for tr in trs:
    # !!!重要!!!在某个标签下,再执行xpath函数,获取这个标签下的子孙元素,那么应该在//之前加"."代表在当前元素下获取
    href = tr.xpath(".//a/@href")[0]
    url = "https://hr.tencent.com/" + href
    # 用text()函数可获取某个标签下的所有文本
    # 这里text()前要//,因为文本并不是直接在td标签下,而是在td的子标签a下
    title = tr.xpath("./td[1]//text()")[0]
    category = tr.xpath("./td[2]//text()")[0]
    numbers = tr.xpath("./td[3]//text()")[0]
    area = tr.xpath("./td[4]//text()")[0]
    pubdate = tr.xpath("./td[5]//text()")[0]
    # 拼接为字典,添加到列表中
    position = {"url:":url,"title:":title,"category":category,
                "numbers":numbers,"area":area,"pubdate:":pubdate}
    positions.append(position)
print(positions)

# 4)豆瓣电影爬虫
# 1.将目标网站上的页面抓取下来
import requests
url = "https://movie.douban.com/cinema/nowplaying/hangzhou/"
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
           "Referer":"https://movie.douban.com/"}
response = requests.get(url=url,headers=headers)
text = response.text
# print(text)
# response.text:返回一个经过解码后的字符串,是str(unicode)类型
# response.content:返回一个原生的字符串,是bytes类型
# 2.将抓取下来的数据根据一定的规则进行提取
from lxml import etree
html = etree.HTML(text)
ul = html.xpath("//ul[@class='lists']")[0]
# print(etree.tostring(ul,encoding="utf-8").decode("utf-8"))  # 将ul编码为utf-8的形式->转换为字符串->解码为utf-8
lis = ul.xpath("./li")
movies = [] # 空列表用于放置字典
for li in lis:
    title = li.xpath("@data-title")[0]
    score = li.xpath("@data-score")[0]
    duration = li.xpath("@data-duration")[0]
    region = li.xpath("@data-region")[0]
    director = li.xpath("@data-director")[0]
    actors = li.xpath("@data-actors")[0]
    poster = li.xpath(".//img/@src")[0]
    movie = {"title":title,"score":score,"duration":duration,
             "region":region,"director":director,"actors":actors,"poster":poster}
    movies.append(movie)
print(movies)

# 5)阳光电影爬虫
# 1.使用的库
import requests
from lxml import etree
# 2.设定全局变量
HEADERS = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
          "Referer":"http://www.ygdy8.net/index.html"}
HEADER_URL = "http://www.ygdy8.net/"
# 3.获取各个页面的url,并将各个url导入get_detail_urls函数中
def spider():
    base_url = "http://www.ygdy8.net/html/gndy/dyzz/list_23_{}.html" # 页面url的框架
    movies = [] # 创建一个list用来存放解析好的内容(即movie = {})
    for i in range(1,2): # 此处选取前11页
        page_urls = base_url.format(i) # 获取各个页面的url
        detail_urls = get_detail_urls(page_urls) # 将各个page_url导入get_detail_urls函数中
        for detail_url in detail_urls: # 遍历所有页中的detail_urls,依次将detail_url导入parse_detail_urls中进行解析
            movie = parse_detail_urls(detail_url)
            movies.append(movie)
# 4.得到page_urls后获取对应页所有电影的detail_urls
def get_detail_urls(page_urls):
    response = requests.get(url=page_urls,headers=HEADERS)
    text = response.content.decode("gbk","ignore")
    # 注1:此处不能使用.text,原因在于response.text命令会默认选择一个解码方式对网页进行解码,但网页的编码方式不唯一,此处的话网页源代码可以看出"charset=gb2312",为gbk的一种。
    # 注2:此处加上"ignore"放置报错('gbk' codec can't decode byte 0xd0 in position 30352: illegal multibyte sequence)
    html = etree.HTML(text=text) # 将text解析成element形式
    tail_urls = html.xpath("//table[@class='tbspan']//a/@href") # 得到的为完整url的尾部,用map函数对其进行拼接
    # 注3:xpath语句解读:选取包含class="tbspan"属性的table标签下的子孙元素为a(//)的href(@)属性值
    detail_urls = map(lambda tail_url:HEADER_URL+tail_url,tail_urls) # map函数:http://www.runoob.com/python/python-func-map.html
    return detail_urls
# 5.解析各个detail_urls下的具体内容
def parse_detail_urls(detail_url):
    movie = {} # 创建一个dict用来存放各项属性
    response = requests.get(url=detail_url,headers=HEADERS)
    text = response.content.decode("gbk","ignore")
    html = etree.HTML(text=text) # 将text解析成element形式
    # 1.标题
    title = html.xpath("//div[@class='title_all']//font[@color='#07519a']/text()")[0]
    movie["title"] = title
    # 注1:发现有多个结果可以往上面增加一级条件
    # 注2:利用text()函数获取属性下的所有文本内容
    # 2.海报,在Zoom层下,xpath返回的为list
    zoom = html.xpath("//div[@id='Zoom']")[0]
    imgs = zoom.xpath(".//img/@src") # 此处获取到2张,分别对应了海报和截图
    poster = imgs[0] # 有些有截图有些无截图,统一取第一张
    movie["poster"] = poster
    # 3.其他信息
    infos = zoom.xpath("//div[@id='Zoom']//text()") # 此处的每项标签对应了列表中的一行
    for index,info in enumerate(infos): # 遍历是采用enumerate,得到对应的索引和内容,目的是为了筛选actor部分
        if info.startswith("◎年  代"): # 如果以"◎年  代"开头
            info = info.replace("◎年  代", "").strip()  # 将年代替换成空格,并用strip()函数清空前后的空格
            movie["year"] = info
        elif info.startswith("◎产  地"):
            info = info.replace("◎产  地","").strip()
            movie["country"] = info
        elif info.startswith("◎类  别"):
            info = info.replace("◎类  别","").strip()
            movie["category"] = info
        elif info.startswith("◎豆瓣评分"):
            info = info.replace("◎豆瓣评分","").strip()
            movie["score"] = info
        elif info.startswith("◎片  长"):
            info = info.replace("◎片  长","").strip()
            movie["duration"] = info
        elif info.startswith("◎导  演"):
            info = info.replace("◎导  演","").strip()
            movie["director"] = info
        elif info.startswith("◎导  演"):
            info = info.replace("◎导  演","").strip()
            movie["director"] = info
        elif info.startswith("◎主  演"):
            info = info.replace("◎主  演","").strip()
            actors = [info] # 主演这一行的那一个
            for i in range(index+1,len(infos)):
                actor = infos[i].strip() # 注意,不是info
                if actor.startswith("◎"): # 如果碰到◎,则停止
                    break
                actors.append(actor)
            movie["actor"] = actors
        elif info.startswith("◎简  介"):
            info = info.replace("◎简  介","").strip()
            profiles = [info]
            for j in range(index+1,len(infos)):
                profile = infos[j].strip()
                if profile.startswith("【"):
                    break
                profiles.append(profile)
            movie["profile"] = profiles
    download_urls = zoom.xpath(".//td[@bgcolor='#fdfddf']//a/@href")[0]
    movie["download"] = download_urls
    print(movie)
if __name__ == '__main__':
    spider()
# II.BeautifulSoup4库
# 1)基本介绍
# 1.概念:和lxml一样,也是一个HTML/XML的解析器,lxml只会局部遍历,而BeautifulSoup基于HTML DOM,会载入整个文档,相较于lxml更加容易
# 2.官方文档:https://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.html

# 2)基本使用
# 2-1.find_all及find方法
# html为附录1
from bs4 import BeautifulSoup
html = """..."""
soup = BeautifulSoup(html,"lxml") # 第二个参数为解析器,通常使用Lxml
# 1.获取所有tr标签
trs = soup.find_all("tr")
# 2.获取第二个tr标签,用limit参数确定获取元素的个数,类似sql的操作,返回一个列表
tr = soup.find_all("tr",limit=2)[1]
# 3.获取所有class="even"的标签,class为python关键字,需要加一个_做出区分
trs = soup.find_all("tr",class_="even")
# 或者可以这样写,attrs为atrribute的简写,要写成字典的形式
trs = soup.find_all("tr",attrs={"class":"even"})
# 4.将所有class="right pl9"且id="topshares"的div标签提取出来,多个判断条件的写法
divs = soup.find_all("div",class_="right pl9",id="topshares")
# 同样可以通过attrs来写
divs = soup.find_all("div",attrs={"class":"right pl9","id":"topshares"})
# 5.获取所有a标签的href属性,提取某个标签下的某个属性
aList = soup.find_all("a")
for a in aList:
    # 1.通过下标的方式(推荐使用这种)
    href = a["href"]
    # 2.通过attrs属性获取
    href = a.attrs["href"]
# 6.获取所有职位信息,如果只要该标签下的字符串可以用.string获取,同xpath.text()
# !!重要!!且用.string属性来提取标签里的内容时,该标签应该是只有单个节点的。比如上面的 <td>1</td> 标签那样,不然会显示None,在html里找出来剔除即可
trs = soup.find_all("tr")[1:10]
works = []
for tr in trs:
    # 1.方法一
    work = {}
    tds = tr.find_all("td")
    work_name = tds[0].string
    work["work_name"] = work_name
    works.append(work)
    # 2.方法二(巨特么方便,用stripped_strings剔除掉其中的空格)
    infos = list(tr.stripped_strings)
    work_name = infos[0]
    work["work_name"] = work_name
    works.append(work)
    
# 2-2.BeautifulSoup中使用CSS选择器
# i.CSS常用语法:1.根据标签a查找 a 2.根据b类查找 .b 3.根据id=c查找 #c 4.查找d类子孙元素p #d p 5.直接查找d类子元素p #d>p
soup = BeautifulSoup(html,"lxml")
# 1.获取所有的tr标签,CSS选择器对应语法:tr
trs = soup.select("tr")
# 2.获取第2个tr标签
tr = soup.select("tr")[1]
# 3.获取所有class="even"的tr标签:CSS选择器对应语法:tr.even
tr = soup.select("tr.even")
tr = soup.select("tr[class='even']")
# 4.将所有class="right pl9"且id="topshares"的div标签提取出来,多个判断条件的写法
# CSS选择器无法实现
# 5.获取所有a标签的href属性
aList = soup.select("a")[57:67]
for a in aList:
     href = a["href"]
# 6.提取所有职位信息
works = []
work = {}
trs = soup.select("tr")[1:11]
for tr in trs:
    infos = list(tr.stripped_strings)
    work_name = infos[0]
    work_name = infos[0]
    work["work_name"] = work_name
    works.append(work)

# 3)常用对象
# 1.Tag:BeautifulSoup中所有的标签都是Tag类型,并且BeautifulSoup的对象本质上也是一个Tag类型,所以其实一些方法比如find,find_all并不是BeautifulSoup的,而是Tag的
# 2.BeautifulSoup:继承自Tag,用来生成BeautifulSoup树,对于一些查找方法,比如find,find_all,select这些,其实还是Tag的
# 3.NavigableString:继承自python中的str,用起来和str是一样的
# 4.Comment:继承自NavigableString

# 4)中国天气网的爬虫及数据可视化
from bs4 import BeautifulSoup
import requests
ALL_DATA = []
def spider():
    # I.得到各各页URL后的批量爬虫
    urls = ["http://www.weather.com.cn/textFC/hb.shtml",
            "http://www.weather.com.cn/textFC/hd.shtml",
            "http://www.weather.com.cn/textFC/hz.shtml",
            "http://www.weather.com.cn/textFC/hn.shtml",
            "http://www.weather.com.cn/textFC/xb.shtml",
            "http://www.weather.com.cn/textFC/xn.shtml",
            "http://www.weather.com.cn/textFC/gat.shtml"]
    for url in urls:
        get_weather_condition(url)
    # II.数据可视化部分
    from pyecharts import Bar
    ALL_DATA.sort(key=lambda data:data["min_temp"])
    data = ALL_DATA[0:10]
    cities = list(map(lambda x:x["city"],data))
    min_temps = list(map(lambda x:x["min_temp"],data))
    chart = Bar("min_temp")
    chart.add("",cities,min_temps)
    chart.render("test.html")
def get_weather_condition(url): # 单页的爬虫
    # 设定请求头
    headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
               "Referer":"http: // www.weather.com.cn / textFC / hd.shtml"}
    # 请求网页内容
    response = requests.get(url=url,headers=headers)
    text = response.content.decode("utf-8")
    # 解码
    soup = BeautifulSoup(text,"html5lib") # 此处用html5lib的原因在于其中有一页的html格式不是很规范,需要用这个方法增加容错性
    conMidtab = soup.find_all("div",class_="conMidtab")[0] # 此处有7页conMidtab,后6页被隐藏,实则为之后几天的天气情况,此处只找今天一天的天气情况
    tables = conMidtab.find_all("table") # 发现各个省的天气信息放在各个table下,遍历各个table
    for table in tables:
        trs = table.find_all("tr")[2:] # 去掉table抬头两行
        for index,tr in enumerate(trs): # 每行的第一个身份会出现两次,需要做判断
            infos = list(tr.stripped_strings)
            if index == 0:
                city = infos[1]
                max_temp = infos[5]
                min_temp = infos[-2]
            else:
                city = infos[0]
                max_temp = infos[4]
                min_temp = infos[-2]
            ALL_DATA.append({"city":city,"max_temp":int(max_temp),"min_temp":int(min_temp)})
if __name__ == '__main__':
    spider()
# III.正则表达式
# 1)概念:按照一定的规则,从某个字符串中匹配符出想要的数据,这个规则就是正则表达式
#   区分:利用正则表达式爬虫不同于xpath语法和bs4,使用时网页中的内容不存在上下级的关系,案例中我还是按照了这种思路写,其实没有这个必要
# 2)re模块下常用的函数
# 1.match:从一个字符串开始的位置进匹配,如果开始的位置没有匹配到。就直接失败了
# 2.search:在字符串中找满足条件的字符,如果找到就返回,只会找到第一个满足条件的
# 3.group:分组,正则表达式中可以对过滤到的字符串进行分组,分组使用()的方式
import re
text = "apple price is $10, orange price is $20"
ret = re.search(".*(\$\d+),.*(\$\d+)",text)
print(ret.group(0)) # 等同于print(ret.group())
print(ret.group(1)) # 返回()内的内容,索引从1开始
print(ret.groups()) # 依次返回各()内的内容,tuple(元组类型),和list非常类似,但是tuple一旦初始化就不能修改,更为安全
# 4.findall:找出所有满足条件的,返回的是一个列表
import re
text = "apple price is $10, orange price is $20"
ret = re.findall("\$\d+",text)
print(ret)
# 5.sub:用来替换字符串。将匹配到的字符串替换为其他字符串,pattern:替换区域, repl:替换内容, string, flags=0:从什么位置开始替换,count=0:替换个数 
import re
text = "apple price is $10, orange price is $20"
ret = re.sub("\$\d+","0",text,flags=0,count=1) # 将$10替换为0,且$20的位置不变
print(ret)
# 6.split:分割字符串,可指定maxsplit和flags
import re
text = "hello wrold !"
ret = re.split(" ",text)
print(ret)
# 7.compile:编译正则表达式,通过制定re.VERBOSE使其可以添加注释,推荐!!!见例7

# 3)常用匹配规则:
import re
# 1.匹配单个字符
# 匹配某个字符串
text = "hello"
ret = re.match("he",text)
print(ret.group())
# .:匹配任意的字符
text = "hello"
ret = re.match(".",text)
print(ret.group())
# \d:匹配任意数字
text = "1234"
ret = re.match("\d",text)  # ret = re.match("[0-9],text")
print(ret.group())
# \D:匹配任意的非数字
text = "ASD213"
ret = re.match("\D",text)  # ret = re.match("[^0-9],text")
print(ret.group())
# \s:匹配空白字符,包括\n,\t,\r,空格
text = "\n"
ret = re.match("\s",text)
print(ret.group())
# \w:匹配a-z,A-Z,数字和下划线
text = "ASDW"
ret = re.match("\w",text)  # ret = re.match("[a-zA-Z0-9_]",text)
print(ret.group())
# \W:匹配与\w相反的内容
text = "+"
ret = re.match("\W",text)  # ret = re.match("[^a-zA-Z0-9_]",text)
print(ret.group())
# []:组合的方式,只要满足[]中的字符就可以匹配
text = "0571-888888"
ret = re.match("[\d-]+",text)
print(ret.group())
# 2.匹配多个字符
# *:匹配0个或任意多个字符串,具体是数字还是字符还是其他的坎*前面的
text = "0571"
ret = re.match("\d*",text)
print(ret.group())
# +:匹配一个或者多个字符串
text = "abcd"
ret = re.match("\w+",text)
print(ret.group())
# ?:匹配0个或者1个字符串,要么没有,要么只有一个
text = "abcd"
ret = re.match("\w?",text)
print(ret.group())
# {m}:匹配m个字符
text = "abcd"
ret = re.match("\w{2}",text)
print(ret.group())
# {m,n}:匹配m-n个字符,按多的匹配
text = "abcd"
ret = re.match("\w{1,3}",text)
print(ret.group())
# 3.其他
# ^:表示以...开始,在中括号[]中使用代表取反
# $:表示以...结束
# |:匹配多个表达式或者字符串,通常用()包起来
# \:转义符号,在正则表达式中,有些字符是有特殊意义的字符。因此如果想要匹配这些字符,那么就必须使用反斜杠进行转义。
# r:原生字符串
text = "\\c" # python剥掉一层\,实则为\n
ret = re.match(r"\\c",text) # 正则表达式剥掉一层\,python中用r表原生字符串,或者ret = re.match("\\\\c",text) 
print(ret.group())
# 贪婪模式+,非贪婪模式+?:
text = "<h1>abc<h2>" # 要选取<h1>
ret = re.match("<.+?>",text) # 选取了满足条件的最前面一个<h1>,"<.+>"的话会选取<h1>abc<h2>
print(ret.group())

# 4)小案例
# 1.验证手机号码(规则:第一位为1,第二位为34578)
text = "13303475216"
ret = re.match("1[34578]\d{9}",text)
print(ret.group())
# 2.验证邮箱(规则:前若干位为数字,英文或下下划线+@符号+数字和小写英文+.+数字和小写英文)
text = "a_test_@163.com"
ret = re.match("\w+@[0-9a-z]+\.[a-z]+",text) # 注:用\.转义.
print(ret.group())
# 3.验证URL(规则:http/https/ftp+:+//+任意非空白字符)
text = "http://www.baidu.com"
ret = re.match("(http|https|ftp)://[^\s]+",text) # 用(|)代表或
print(ret.group())
# 4.验证身份证(规则:18位,且前17位位数字,第18位可以是数字,x或者X)
text = "325621198507267315"
ret = re.match("[1-9]\d{16}[\dzZ]",text)
print(ret.group())
# 5.匹配0-100之间的数字,(规则:09,101不能出现)
# 分析:一位的:1,两位的99,三位的100
text = "100"
ret = re.match("([0-9]$|[1-9]\d$|100$)",text) # ret = re.match("([1-9]\d?$|100$)",text)
print(ret.group())
# 6.拉勾网网页实例,爬取文字内容
# coding=gbk
import re
html = """
<dd class="job_bt">
        <h3 class="description">职位描述:</h3>
        <div>
        <p>职位描述:<br>1、参与公司用户行为数据的收集和实时计算开发;<br>2、根据业务需求实现实时和离线数据ETL过程<br>3、对外应用系统、数据服务接口的开发<br>4、开发实时数据处理、统计功能,支撑上层业务,如:数据监控、统计分析、日报展现、业务方调用等<br><br>任职要求:<br>1、计算机/软件工程或相关专业出身,工作3年以上<br>2、扎实的代码基础;擅长java或scala。<br>3、熟悉大数据的生态圈和相关组件(hadoop、hive、spark、flink、kafka、hbase等),能够深了解集群和周边模块<br>4、对spark&nbsp;RDD模型有深刻的理解,能针对线上问题进行调优;<br>5、熟悉Mysql,Redis,能够快速理解业务模型和数据模型<br>6、熟悉Linux环境及脚本开发(Python/Perl/Shell等)</p>
        </div>
    </dd>
"""
ret = re.sub("<.+?>","",html)
print(ret)
# 7.通过compile写一个能取出里面金额的封装,并对每一步进行注释
import re
text = "apple price is $10.05, orange price is $20.11"
r = re.compile(r"""
    \$ # $符号,用\进行了转义
    \d+ # $后.前的数字
    \. # .本身
    \d+ # .后的数字部分
""",re.VERBOSE)
ret = re.findall(r,text)
print(ret)

# 5)古诗网爬虫实例
import requests
import re
def main():
    base_url = "https://www.gushiwen.org/default_{}.aspx"
    for i in range(1,6):
        page_url = base_url.format(i)
        prase_page(page_url)
def prase_page(page_url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
        "Referer": "https://www.gushiwen.org/default_1.aspx"}
    response = requests.get(url=page_url,headers=headers)
    text = response.text
    # 思路:获取所有的标题组成一个列表,同理获取所有的其他同类信息组成列表,最后利用下标进行拼接
    # 1.注意:.不能匹配\n,可以添加re.DOTALL来设置.可以匹配任何值
    # 2.注意:要采用非贪婪模式,不然的话因为各个块中网页格式一致的原则会捕获到下一个同名标签
    # 3.注意:使用findall时需要得到的部分需要用()括起来
    titles = re.findall(r'<div\sclass="cont">.*?<b>(.*?)</b>',text,re.DOTALL) # cont标签下的b属性
    danasties = re.findall(r'<div\sclass="cont">.*?<p\sclass="source">.*?<a.*?>(.*?)</a>',text,re.DOTALL) # cont标签下的第一个a属性
    authors = re.findall(r'<div\sclass="cont">.*?<p\sclass="source">.*?<a.*?>.*?<a.*?>(.*?)</a>',text,re.DOTALL) # cont标签下的第二个a属性
    contents_raw = re.findall(r'<div\sclass="cont">.*?<div\sclass="contson".*?>(.*?)</div>',text,re.DOTALL) # cont标签下的class属性
    contents = []
    for content in contents_raw:
        a = re.sub(r"<.*?>","",content) # 剔除中间的<>
        a = a.strip() # 删除空格
        contents.append(a)
    # 使用zip函数打包
    poems = []
    for values in zip(titles,danasties,authors,contents):
        title,danasty,author,content = values
        poem = {"title":title,"danasty":danasty,"author":author,"content":content}
        poems.append(poem)
    for poem in poems:
        print(poem)
        print("="*120)
if __name__ == '__main__':
    main()

# 6)糗事百科爬虫:顺便吐槽一下找不到笑点
import re
import requests
def url():
    base_url = "https://www.qiushibaike.com/text/page/{}/"
    for i in range(1,31):
        page_url = base_url.format(i)
        prase_url(page_url)
def prase_url(page_url):
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
              "Referer": "https: // www.qiushibaike.com / text / page / 1 /"}
    response = requests.get(url=page_url,headers=headers)
    text = response.text
    contents_raw = re.findall('<div\sclass="content">.*?<span>(.*?)</span>',text,re.DOTALL)
    contents = []
    for content in contents_raw:
        a = re.sub("\n","",content)
        a = re.sub("<.*?>","",a)
        a = a.strip()
        contents.append(a)
    for content in contents:
        print(content)
        print("="*120)
if __name__ == '__main__':
    url()
1.附件I-html

<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
    <title>职位搜索 | 社会招聘 | Tencent 腾讯招聘</title>
    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
	<!-- Js Css -->
     		<link media="screen" href="//cdn.m.tencent.com/hr_static/css/all.css?max_age=86412" type="text/css" rel="stylesheet" />
	<script type="text/javascript" src="//cdn.m.tencent.com/hr_static/js/jquery-1.7.2.min.js"></script>
    <script type="text/javascript" src="//cdn.m.tencent.com/hr_static/js/jquery-ui-1.7.2.custom.min.js"></script>
    <script type="text/javascript" src="//cdn.m.tencent.com/hr_static/js/thickbox.js"></script>
    <link media="screen" href="//cdn.m.tencent.com/hr_static/css/thickbox.css" type="text/css" rel="stylesheet" />
    <script type="text/javascript" src="//cdn.m.tencent.com/hr_static/js/functions.js"></script>
    <script type="text/javascript" src="//cdn.m.tencent.com/hr_static/js/utils.js"></script>
    <script language="javascript" src="//vm.gtimg.cn/tencentvideo/txp/js/txplayer.js" charset="utf-8"></script>
    <script type="text/javascript" src="//cdn.m.tencent.com/hr_static/js/all.js?max_age=86412"></script>	<!-- Js Css -->
	<script>
		var keywords_json = ["python"];
	</script>
</head>

<body>
    	<div id="header">
    	<div class="maxwidth">
    		<a href="index.php" class="left" id="logo"><img src="//cdn.m.tencent.com/hr_static/img/logo.png"/></a>
    		<div class="right" id="headertr">
    			<div class="right pl9" id="topshares">
    				<div class="shares">
    					<span class="left">分享到:</span>
		    			<!--<a href="javascript:;" onclick="shareto('qqt','top');" id="qqt" title="分享到腾讯微博">分享到腾讯微博</a>-->
		    			<a href="javascript:;" onclick="shareto('qzone','top');" id="qzone" title="分享到QQ空间">分享到QQ空间</a>
		    			<!--<a href="javascript:;" onclick="shareto('pengyou','top');" id="pengyou" title="分享到腾讯朋友">分享到腾讯朋友</a>-->
		    			<a href="javascript:;"  onclick="shareto('sinat','top');"id="sinat" title="分享到新浪微博">分享到新浪微博</a>
		    			<!--<a href="javascript:;"  onclick="shareto('renren','top');"id="renren" title="分享到人人网">分享到人人网</a>-->
		    			<!--<a href="javascript:;"  onclick="shareto('kaixin001','top');"id="kaixin" title="分享到开心网">分享到开心网</a>-->
		    			<div class="clr"></div>
    				</div>
    				<!--<a href="javascript:;">分享</a>-->
    			</div>
    			<!--<div class="right pl9">-->
    				<!--<a href="http://t.qq.com/QQjobs" id="tqq" target="_blank">收听腾讯招聘</a>-->
    			<!--</div>-->
    			<div class="right pr9">
    				    				    					<a href="login.php" id="header_login_anchor">登录</a><span class="plr9">|</span><a href="reg.php">注册</a>
    				    				<span class="plr9">|</span><a href="question.php">反馈建议</a>
    				<span class="plr9">|</span><a href="http://careers.tencent.com/global" target="_blank">Tencent Global Talent</a>
    				<script>
    					var User_Account = "";
    				</script>
    				    			</div>
    			<div class="clr"></div>
    		</div>
    		<div class="clr"></div>
    	</div>
    	<div id="menus">
    		<div class="maxwidth">
	    		<ul id="menu" class="left">
	    			<li id="nav1" ><a href="index.php">&nbsp;</a></li>
	    			<li id="nav2" class="active" ><a href="social.php">&nbsp;</a></li>
	    			<li id="nav3"><a href="about.php">&nbsp;</a></li>
	    			<li id="nav4"><a href="workInTencent.php">&nbsp;</a></li>
	    		</ul>
	    		<a class="right texti9" target="_blank" id="navxy" href="http://join.qq.com">校园招聘</a>
	    		<div class="clr"></div>
	    	</div>
    	</div>
    </div>    <div id="sociaheader">
			</div>
    <div id="position" class="maxwidth">
    	<a name="a" id="a"></a>
    	<div class="left wcont_b box">
		    <div class="blueline"><div class="butzwss"></div></div>
		    <form id="searchform" class="buts1">
		    	<div id="searchrow1">
		    		<div id="search1"><input id="search2" name="keywords" t="请输入关键词" value="python" class="left"/><input class="left" id="search3" type="submit" value=""/><div class="clr"></div></div>
		    		<input type="hidden" name="lid" value="0"/>
		    		<input type="hidden" name="tid" value="0"/>
		    	</div>
		    	<div id="searchrow2">
		    		<div class="srow2l left"></div>
		    		<div class="left items pl9 itemnone" id="additems">
		    			<a href="position.php?keywords=python&tid=0" class="item active"><span><font>全部</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&tid=0&lid=2218"><span><font>深圳</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&tid=0&lid=2156"><span><font>北京</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&tid=0&lid=2175"><span><font>上海</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&tid=0&lid=2196"><span><font>广州</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&tid=0&lid=2268"><span><font>成都</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&tid=0&lid=2252"><span><font>杭州</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&tid=0&lid=2426"><span><font>昆明</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&tid=0&lid=33"><span><font>美国</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&tid=0&lid=2459"><span><font>中国香港</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2418"><span><font>长春</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=62"><span><font>欧洲</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2226"><span><font>重庆</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2355"><span><font>武汉</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=90"><span><font>荷兰</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2393"><span><font>太原</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2406"><span><font>沈阳</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2381"><span><font>西安</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2436"><span><font>贵阳</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2442"><span><font>呼和浩特</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2448"><span><font>银川</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2225"><span><font>天津</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2228"><span><font>南京</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2346"><span><font>郑州</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=59"><span><font>日本</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2314"><span><font>南宁</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2439"><span><font>兰州</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2336"><span><font>石家庄</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2283"><span><font>福州</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=81"><span><font>新加坡</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2407"><span><font>大连</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2453"><span><font>乌鲁木齐</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=60"><span><font>马来西亚</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=95"><span><font>雄安新区</font></span></a>
		    					    				<a class="item itemhide" href="position.php?keywords=python&tid=0&lid=2280"><span><font>海口</font></span></a>
		    					    		</div>
							    		<div class="left"><a href="javascript:;" class="more2">更多</a></div>
							    		<div class="clr"></div>
		    	</div>
		    	<div id="searchrow3">
		    		<div class="srow2l left"></div>
		    		<div class="left items pl9">
		    			<a href="position.php?keywords=python&lid=0" class="item active"><span><font>全部</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&lid=0&tid=87"><span><font>技术类</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&lid=0&tid=82"><span><font>产品/项目类</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&lid=0&tid=83"><span><font>市场类</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&lid=0&tid=81"><span><font>设计类</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&lid=0&tid=84"><span><font>职能类</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&lid=0&tid=85"><span><font>内容编辑类</font></span></a>
		    					    				<a class="item" href="position.php?keywords=python&lid=0&tid=86"><span><font>客户服务类</font></span></a>
		    					    		</div>
		    		<div class="clr"></div>
		    	</div>
		    </form>
		    <table class="tablelist" cellpadding="0" cellspacing="0">
		    	<tr class="h">
		    		<td class="l" width="374">职位名称</td>
		    		<td>职位类别</td>
		    		<td>人数</td>
		    		<td>地点</td>
		    		<td>发布时间</td>
		    	</tr>
		    			    	<tr class="even">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44592&keywords=python&tid=0&lid=0">OMG097-数据平台运维(北京)</a></td>
					<td>技术类</td>
					<td>1</td>
					<td>北京</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="odd">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44578&keywords=python&tid=0&lid=0">MIG16-基础架构工程师(北京)</a><span class="hot">&nbsp;</span></td>
					<td>技术类</td>
					<td>2</td>
					<td>北京</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="even">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44559&keywords=python&tid=0&lid=0">18796-专项技术测试(深圳)</a><span class="hot">&nbsp;</span></td>
					<td>技术类</td>
					<td>2</td>
					<td>深圳</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="odd">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44546&keywords=python&tid=0&lid=0">SNG17-QQ钱包后台开发工程师(深圳)</a></td>
					<td>技术类</td>
					<td>1</td>
					<td>深圳</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="even">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44547&keywords=python&tid=0&lid=0">MIG09-NLP算法工程师</a></td>
					<td>技术类</td>
					<td>1</td>
					<td>北京</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="odd">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44543&keywords=python&tid=0&lid=0">SNG07-测试开发高级工程师</a></td>
					<td>技术类</td>
					<td>1</td>
					<td>深圳</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="even">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44539&keywords=python&tid=0&lid=0">SNG11-人工智能研究员(深圳)</a><span class="hot">&nbsp;</span></td>
					<td>技术类</td>
					<td>1</td>
					<td>深圳</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="odd">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44521&keywords=python&tid=0&lid=0">18435-反洗钱建模工程师</a><span class="hot">&nbsp;</span></td>
					<td>技术类</td>
					<td>2</td>
					<td>深圳</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="even">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44526&keywords=python&tid=0&lid=0">18796-后台专项测试工程师(深圳)</a></td>
					<td>技术类</td>
					<td>1</td>
					<td>深圳</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="odd">
		    		<td class="l square"><a target="_blank" href="position_detail.php?id=44514&keywords=python&tid=0&lid=0">22989-专有云中间件运维工程师(深圳/北京/上海)</a></td>
					<td>技术类</td>
					<td>2</td>
					<td>深圳</td>
					<td>2018-09-30</td>
		    	</tr>
		    			    	<tr class="f">
		    		<td colspan="5">
		    			<div class="left">共<span class="lightblue total">544</span>个职位</div>
		    			<div class="right"><div class="pagenav"><a href="javascript:;" class="noactive" id="prev">上一页</a><a class="active" href="javascript:;">1</a><a href="position.php?lid=&tid=&keywords=python&start=10#a">2</a><a href="position.php?lid=&tid=&keywords=python&start=20#a">3</a><a href="position.php?lid=&tid=&keywords=python&start=30#a">4</a><a href="position.php?lid=&tid=&keywords=python&start=40#a">5</a><a href="position.php?lid=&tid=&keywords=python&start=50#a">6</a><a href="position.php?lid=&tid=&keywords=python&start=60#a">7</a><a href="position.php?lid=&tid=&keywords=python&start=70#a">...</a><a href="position.php?lid=&tid=&keywords=python&start=540#a">55</a><a href="position.php?lid=&tid=&keywords=python&start=10#a" id="next">下一页</a><div class="clr"></div></div></div>
		    			<div class="clr"></div>
		    		</td>
		    	</tr>
		    </table>
		</div>
		<div class="right wcont_s box">
		    <div class="blueline"><div class="butcjwt"></div></div>
		    <div class="module_faqs square"><a href="faq.php?id=5" title="如何应聘腾讯公司的职位?">如何应聘腾讯公司的职位?</a><a href="faq.php?id=3" title="应届生如何应聘?">应届生如何应聘?</a><a href="faq.php?id=19" title="腾讯应聘流程是什么?">腾讯应聘流程是什么?</a><a href="faq.php?id=20" title="我注册了简历,但为什么没有人联系我?">我注册了简历,但为什么没...</a><a href="faq.php?id=22" title="我忘记密码了,怎么办?">我忘记密码了,怎么办?</a><a href="faq.php?id=23" title="如何进行简历修改?">如何进行简历修改?</a></div>		</div>
		<div class="clr"></div>
	</div>
   	<div id="homeDep"><table id="homeads"><tr><td align="center"><a href="http://tencent.avature.net/career" target="blank">全球招聘</a></td><td align="center"><a href="http://game.qq.com/hr/" target="blank">互动娱乐事业群招聘</a></td><td align="center"><a href="http://hr.tencent.com/position.php?lid=&tid=&keywords=WXG" target="blank">微信事业群招聘</a></td><td align="center"><a href="http://hr.qq.com/" target="blank">技术工程事业群招聘</a></td></tr></table></div>    	<div id="footer">
		<div>
			<a href="http://www.tencent.com/" target="_blank">关于腾讯</a><span>|</span><a href="http://www.qq.com/contract.shtml" target="_blank">服务条款</a><span>|</span><a href="http://hr.tencent.com/" target="_blank">腾讯招聘</a><span>|</span><a href="http://careers.tencent.com/global" target="_blank">Tencent Global Talent</a><span>|</span><a href="http://gongyi.qq.com/" target="_blank">腾讯公益</a><span>|</span><a href="http://service.qq.com/" target="_blank">客服中心</a>
	    </div>
		<p>Copyright &copy; 1998 - 2018 Tencent. All Rights Reserved.</p>
	</div>
	<script type="text/javascript" src="//tajs.qq.com/stats?sId=64934792" charset="UTF-8"></script>
</body>
</html>

 

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值