大众点评美食爬虫案例

本文介绍了如何使用Python编程语言,结合requests和Parsel库,通过代理IP和伪装User-Agent进行网页抓取,从大众点评网站获取餐厅信息,包括店名、评论、价格等,并将数据存储在CSV文件中。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

# coding=utf-8
import requests
import parsel
import pandas as pd
import random
import time
import os
 
url = 'https://www.dianping.com/shanghai/ch10'
path = os.getcwd()
# 1.ip地址伪装:不同的代理IP,免费代理IP不能保证永久有效,如果不能用可以更新
proxy_list = [
    {'http': '183.95.80.102:8080'},
    {'http': '123.160.31.71:8080'},
    {'http': '115.231.128.79:8080'},
    {'http': '166.111.77.32:80'},
    {'http': '43.240.138.31:8080'},
    {'http': '218.201.98.196:3128'},
    {'http': '112.115.57.20:3128'},
    {'http': '121.41.171.223:3128'}
]
# proxy_list = [
#     'http://183.95.80.102:8080',
#     'http://123.160.31.71:8080',
#     'http://115.231.128.79:8080',
#     'http://166.111.77.32:80',
#     'http://43.240.138.31:8080',
#     'http://218.201.98.196:3128',
#     'http://112.115.57.20:3128',
#     'http://121.41.171.223:3128'
# ]

user_list = [
    # 遨游
    {"user-agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)"},
    # 火狐
    {"user-agent": "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"},
    # 谷歌
    {"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"},
    # 自己电脑的
    {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.57'},
    {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'}
]

headers= {
    # 用户信息,检测是否登录账号
    'Cookie': '_lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=18d40167073c8-01d299756c99f8-4c657b58-144000-18d40167073c8; _lxsdk=18d40167073c8-01d299756c99f8-4c657b58-144000-18d40167073c8; _hc.v=55780356-8cc5-4648-d436-90606a4d35ca.1706177229; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1706177230; WEBDFPID=11yuu771z2z6501y1353u6z53v8xu72781w598325x597958zz8wzy5v-2021537235767-1706177235767WSSOIYEfd79fef3d01d5e9aadc18ccd4d0c95074153; qruuid=d78311ea-1dc6-4e50-8b6f-e2b2b81f6f50; dper=02021220a687abc4a9fc96b481adb09152ddc3c4127543c27a0efc720bac7c1465eafa2984234b4cf5a917896501409aa7146926aacae35d9eaa00000000ab1d00005cba2a051651b4fe3919cd293e6a2870dc3efba9d5074e517c92a21c3260c3ec57694ea39d11503362e68c9aedb16f65; ll=7fd06e815b796be3df069dec7836c3df; s_ViewType=10; _lxsdk_s=18d40167074-60d-837-c83%7C%7C62; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1706177321',
    # 域名
    'Host': 'www.dianping.com',
    # 2.访问地址伪装,告诉服务器url从哪里来
    'Referer': 'https://www.dianping.com/',
}

# 随机获取UA和代理IP
user_agent = random.choice(user_list)
headers.update(user_agent)
proxy = random.choice(proxy_list)

# 3.最基本的浏览器伪装
# proxy_feign = urllib3.ProxyManager(proxy ,headers = headers)
# response =proxy_feign.request('get',url,timeout=5) 
response = requests.get(url=url,headers=headers, proxies=proxy)

# 解析数据
selector = parsel.Selector(response.text)

# 通过css获取该页的每个店铺的网址
href = selector.css('.shop-list ul li .pic a::attr(href)').getall()
print(href)
# 通过page 获取网页显示页数
# ['2', '3', '4', '5', '6', '7', '8', '9', '50', '下一页']
page = selector.css('.page ::attr(title)').getall()
print(page)
# 得到最大页数
max_page = int(page[2])
# 遍历每一页,得到所有店铺的网址
for i in range(2, max_page + 1):
    url_1 = url + "/p" + str(i)
    selector = parsel.Selector(response.text)
    # 通过css获取该页的每个店铺的网址
    href_1 = selector.css('.shop-list ul li .pic a::attr(href)').getall()
    href.extend(href_1)

# 将爬虫的数据汇总到大字典中
data_dict = {}
data_lose = []
max_count = len(href)
# 创建指定列名的空表
columns = ['店名','评论','人均消费','口味','环境','服务','地址','电话','详情页']
table_data = pd.DataFrame(columns = columns)
for i, index in enumerate(href):
    try:
        # 设置暂停时间为0.1秒
        t = 0.05
        time.sleep(t)
        # 随机从列表中选择IP、Header
        user_agent = random.choice(user_list)
        headers.update(user_agent)
        proxy = random.choice(proxy_list)
        html_data = requests.get(url=index, headers=headers).text
        selector_1 = parsel.Selector(html_data)
        title = selector_1.css('.shop-name::text').get()  # 店名
        count = selector_1.css('#reviewCount::text').get()  # 评论
        Price = selector_1.css('#avgPriceTitle::text').get()  # 人均消费
        item_list = selector_1.css('#comment_score .item::text').getall()  # 评价
        taste = item_list[0].split(': ')[-1]  # 口味评分
        environment = item_list[1].split(': ')[-1]  # 环境评分
        service = item_list[-1].split(': ')[-1]  # 服务评分
        address = selector_1.css('#address::text').get()  # 地址
        tel = selector_1.css('.tel ::text').getall()[-1]  # 电话
        data_tmp = {
            "店名": title, "评论": count, "人均消费": Price, "口味评分": taste, 
            "环境评分": environment, "服务评分": service, "地址": address, "电话": tel, "详情页": index}
        data_dict.update(data_tmp)
    except:
        data_lose.append(index)
    schedule = i / max_count * 100
    if schedule % 10 == 0:
        print('{}个店铺,已爬虫了{} %店铺的信息'.format(max_count, int(schedule)))
        table_data.append(data_dict, ignore_index=True, inplace = True)
        # 清空列表
        data_list.clear()
abs_path = "C:\\pythonDM\\sfDM\\output.csv"
table_data.to_csv(abs_path)
print(data_lose)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值