正则表达式:
"""
四种解析方式
re解析
"""
"""
正则表达式:使用表达式的方式对字符串进行匹配的语法规则
抓取的网页源代码本质就是超长字符串
正则表达式测试网站:https://tool.oschina.net/regex
"""
"""
普通字符: "你好,李焕英" 直接匹配"李焕英"
元字符
. 匹配除换行符以外的任意字符
\w 匹配数字字母下划线
\d 匹配数字
^ 匹配开始后的内容
$ 匹配结束的内容
\W 非数字字母下划线
\D非数字
| 或者
() 分组
[]字符组
[^xxxx] 除了字符组的内容进行匹配
"""
"""
量词
* 0或更多次
+ 一次或更多次
? 0次或一次
{n} n次
{n,}n次或更多次
{n,m} n次到m次
"""
"""
贪婪匹配 惰性匹配
.*
.*? 回溯 从远到进
"""
re模块
"""
re模块
"""
import re
result = re.findall("a", "我是abcda")
print(result)
result_1 = re.finditer(r"\d+", "aa12 333")
print(result_1)
for i in result_1:
print(i.group())
result_2 = re.search(r"\d+", "aa12 333")
print(result_2)
print(result_2.group())
# 开头进行搜索
result_3 = re.match(r"\d+", "aa12 333")
print(result_3)
"""
预加载
"""
obj = re.compile(r"\d+")
result = obj.findall("wjjj123kjkjk12222")
print(result)
"""
想要提取数据必须用小括号括起来,可以单独起名字
(?P<name>正则)
"""
"""
获取豆瓣电影top250的所有电影名,导演,年份
"""
import re
import requests
num = 0
with open("demo.csv", "w", encoding="utf-8") as f:
while num <=225:
url = f"https://movie.douban.com/top250?start={num}&filter="
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/114.0"}
resp = requests.get(url, headers=headers)
obj = re.compile(r'<div class="item">.*?<span class="title">(?P<title>.*?)</span>.*?导演:(?P<director>.*?) .*?<br>(?P<year>.*?) /', re.S)
result = obj.finditer(resp.text)
for i in result:
print(i.group("title").strip())
print(i.group("director").strip())
print(i.group("year").strip())
f.write(f'{i.group("title").strip()},{i.group("director").strip()},{i.group("year").strip()}\n')
num +=25
"""
爬取电影天堂的链接和电影名
"""
import re
import requests
url = "https://www.dy2018.com/"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/114.0"}
resp = requests.get(url, headers=headers)
resp.encoding = "gbk"
obj_1 = re.compile(r"2023必看热片.*?<ul>(?P<html>.*?)</ul>", re.S)
obj_2 = re.compile(r"href='(?P<url>.*?)'", re.S)
obj_3 = re.compile(r'◎片 名 (?P<name>.*?)<br.*?bgcolor="#fdfddf"><a href="(?P<url_2>.*?)"', re.S)
# obj_3 = re.compile(r'◎片 名 (?P<name>.*?)<br.*?bgcolor="#fdfddf"><a hraf="(?P<url_2>.*?)"', re.S)
value = obj_1.search(resp.text)
value_1 = value.group("html")
value_2 = obj_2.finditer(value_1)
with open("demo.csv", "w", encoding="utf-8") as f:
for i in value_2:
url_1 = url.strip("/") + i.group("url").strip()
resp_1 = requests.get(url_1, headers=headers)
resp_1.encoding = "gbk"
value_3 = obj_3.search(resp_1.text)
f.write(f'{value_3.group("name").strip()},{value_3.group("url_2").strip()}\n')