# -*- coding:utf-8 -*-
# 仅需修改这个地方https://jn.lianjia.com/ershoufang/pg{}rs/ 将jn换成你所在城市的拼写首字母小写
import requests
from lxml import etree
import time
import random
import csv
import requests
import json
class LianjiaSpider(object):
def __init__(self):
self.url = "https://mobilenext-web.meituan.com/api/rankingList/getSaleBoardList?cityId={}&boardType={}&cateId=10000&limit=10"
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1"}
def get_page(self, url, i, j):
print(url)
res = requests.get(url=url, headers=self.headers)
res.encoding = "utf-8"
html = res.text
# if(html == '{"totalSize":0,"saleBoardPoiList":[],"boardDigest":null}'):
# html ={"totalSize":50,"saleBoardPoiList":[{"id":0,"name":"","weekSaleCount":"周销量 0","score":0,"avgPrice":0,"cateName":"","areaName":"","distance":"","rank":0,"frontImg":"https://img.meituan.net/msmerchant/","oneSentence":"","saleBoardPoiGroup":null,"saleBoardPoiCoupon":{"icon":"https://p0.meituan.net/travelcube/","content":""},"saleBoardPoiPay":null,"branchList":null}],"boardDigest":null}
# print(i)
# print(html)
# results_temp = html.replace('{"totalSize":50,"saleBoardDealList":', "").replace("}}]}", "")
# results = results_temp + "}}]"
# print(results)
self.parse_page(html, i, j)
# print(html)
# print(i)
def parse_page(self, html, i, j):
print(i)
results = html[35:-20]
# print(len(results))
print(results)
# print(results.find("["))
if (results.find("[") != 0):
prefix = "["
results = prefix + results
print(results)
print(len(results))
for list in json.loads(results):
# print(list)
id = list["id"]
# print(id)
name = list["name"]
# print(name)
weekSaleCount = list["weekSaleCount"]
score = list["score"]
avgPrice = list["avgPrice"]
cateName = list["cateName"]
areaName = list["areaName"]
distance = list["distance"]
rank = list["rank"]
frontImg = list["frontImg"]
oneSentence = list["oneSentence"]
if (i == 20): cityId = "广州"
if (i == 57): cityId = "武汉"
if (i == 105): cityId = "哈尔滨"
if (i == 66): cityId = "沈阳"
if (i == 59): cityId = "成都"
if (i == 55): cityId = "南京"
if (i == 42): cityId = "西安"
if (i == 116): cityId = "长春"
if (i == 96): cityId = "济南"
if (i == 50): cityId = "杭州"
if (j == 1):
catId = "火锅"
elif (j == 2):
catId = "自助餐"
elif (j == 3):
catId = "烧烤龙虾"
elif (j == 4):
catId = "地方菜"
elif (j == 5):
catId = "异国料理"
elif (j == 6):
catId = "小吃快餐"
elif (j == 7):
catId = "甜点饮品"
elif (j == 8):
catId = "蛋糕"
with open('meituan.csv', 'a', newline='', encoding='utf-8') as f:
write = csv.writer(f)
write.writerow(
[cityId, catId, id, name, weekSaleCount, score, avgPrice, cateName, areaName, distance, rank,
frontImg,
oneSentence])
def main(self):
# 20广州、香港118,165白山,170鹤岗
cityId_lists = [20, 57, 105, 66, 59, 55, 42, 116, 96, 50]
for i in cityId_lists: # 第二个实例
# print(i)
for j in range(1, 9):
# print(j)
time.sleep(random.randint(3, 5))
url = self.url.format(i, j)
# print(url)
self.get_page(url, i, j)
# print(j)
if __name__ == '__main__':
start = time.time()
spider = LianjiaSpider()
spider.main()
end = time.time()
print("执行时间:%.2f" % (end - start))
美团人气榜
于 2023-04-24 14:15:41 首次发布
该代码实现了一个名为LianjiaSpider的类,用于爬取美团网站上不同城市的销售排行榜数据。它通过修改URL中的城市代码来切换目标城市,使用requests库进行HTTP请求,lxml库解析HTML内容,然后提取并存储相关数据到CSV文件中,如ID、名称、周销量等。
摘要由CSDN通过智能技术生成