核心代码:
import re
import requests
from bs4 import BeautifulSoup
def work(dic, url, headers,shop_name):
# 发送请求
response = requests.get(url, headers=headers)
# 使用BeautifulSoup解析HTML
soup = BeautifulSoup(response.text, 'lxml')
# 获取所有<li>标签
li_elements = soup.find('div', id='shop-all-list').find('ul').find_all('li')
# 遍历<li>标签
for li in li_elements:
# 店名
name = li.find('h4').get_text(strip=True)
# 评价
# review_num = li.find('a', class_='review-num').find('b').get_text(strip=True) + '条评价'
# 人均(不知道为什么有时候会报错,所以加个报错处理)
# 初始化变量以存储b标签的文本
text = ""
# # 使用try-except语句来捕获可能发生的异常
# try:
# # 尝试找到a标签,然后找到b标签,并获取文本
# a_element = li.find('a', class_='mean-price')
# if a_element:
# b_element = a_element.find('b')
# if b_element:
# text = b_element.get_text(strip=True)
# # 使用正则表达式提取数字
# numbers = re.findall(r'\d+', text)
# except Exception as e:
# # 确保numbers保持为空列表,以便后续处理
# numbers = []
# 菜系和地址(class 都是 tag)
# 在div元素下查找所有class为tag的span标签
spans = li.find('div', class_='tag-addr').find_all('span', class_='tag')
# 遍历找到的span标签
# enumerate函数用于在遍历spans列表时获取每个元素的索引(从0开始)
for index, span in enumerate(spans):
# 菜系
cuisine = spans[0].text
# 地址
address = spans[1].text
# 存储数据
dic = {
'店名': name,
# '评价': review_num,
# '人均': numbers,
'菜系': cuisine,
'地址': address
}
print(dic)
shop_name.append(name)
def write_list_to_file(lst, filename):
"""
将列表内容写入文本文件
参数:
lst (list): 要写入文件的列表
filename (str): 输出文件名
"""
with open(filename, 'w') as file:
for item in lst:
file.write(f"{item}\n")
def main():
# 创建字典保存数据
dic = {}
shop_name = []
# 起始页
num = 1
# 伪造请求头
headers = {
'cookie': '_lxsdk_cuid=1911cc498eac8-0a9bb0ce26d609-26001e51-1fa400-1911cc498eac8; _lxsdk=1911cc498eac8-0a9bb0ce26d609-26001e51-1fa400-1911cc498eac8; _hc.v=a2438707-fd8b-2eb1-8201-db141f67078b.1722764532; fspop=test; s_ViewType=10; WEBDFPID=4u9z73uwu0xv5uu3073961156ux948x18088xx35z4y97958x3vz2080-2038124643497-1722764642744QQEGWKOfd79fef3d01d5e9aadc18ccd4d0c95078843; Hm_lvt_1099875750d85512c80065e2d3365ff4=1722764653; cy=1368; cye=enshi; logan_session_token=rx2feyfkqxnl4ya7fkmz; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; qruuid=2a0cdf84-2462-4f0e-b7bc-d65a03f0af19; dplet=751140961a88908057bdd6b5e1d57eed; dper=02029f21c63a9f697bfa9f0dc27d1e5e7061ba00c91d078774d4bcec06fea8d66ff915f6ce449cac5705667bfcb3a59839997cf1aeef6c8cd6b10000000001220000c39dd56319ee64d03a9bbf82be7768125a7a59640826d1c03f3c83e3323b185c53049d32498b4661e305b8ca6e33d386; ll=7fd06e815b796be3df069dec7836c3df; ua=%E7%82%B9%E5%B0%8F%E8%AF%849579146909; ctu=091da7d386a1953d055f05da823b041d3a55d58bde248d60d47bff7b7bf284e8; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1722764661,1722838891; HMACCOUNT=46EFA8661D7DADEA; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1722842928; _lxsdk_s=19121135bb7-405-89b-78b%7C%7C292',
'host': 'www.dianping.com',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
}
while num < 51:
# 请求地址
url = 'https://www.dianping.com/enshi/ch10/p' + str(num)
# 调用函数
work(dic, url=url, headers=headers,shop_name=shop_name)
num += 1
write_list_to_file(shop_name, r'C:\Users\pc\Desktop\商户名称.txt')
if __name__ == '__main__':
main()
使用时注意事项:
使用前需调整对页面标签的解析和请求头。在谷歌浏览器登录上大众点评,选好要爬取的内容后,f12查看自己的header和对应的url。
效果演示:
原页面如下:
获取的信息:
有帮助请点赞,有问题提评论。