关于crawl DZDP的城市商场名称和地址的参考
# -*- coding:utf-8 -*-
import csv
import json
import re
import time
import requests
from lxml import etree
from fontTools.ttLib import TTFont
from parsel import Selector
from selenium import webdriver
# 总页数
page = 3
# 列表页通用url
index_urls = 'http://www.dianping.com/shijiazhuang/ch20/g119p{page}'
# 数据保存的字典
item = {}
# 字体字符串
word_string = ' .1234567890店中美家馆小车大市公酒行国品发电金心业商司超生装园场食有新限天面工服海华水房饰城乐汽香部利子老艺花专东肉菜学福饭人百餐茶务通味所山区门药银农龙停尚安广鑫一容动南具源兴鲜记时机烤文康信果阳理锅宝达地儿衣特产西批坊州牛佳化五米修爱北养卖建材三会鸡室红站德王光名丽油院堂烧江社合星货型村自科快便日民营和活童明器烟育宾精屋经居庄石顺林尔县手厅销用好客火雅盛体旅之鞋辣作粉包楼校鱼平彩上吧保永万物教吃设医正造丰健点汤网庆技斯洗料配汇木缘加麻联卫川泰色世方寓风幼羊烫来高厂兰阿贝皮全女拉成云维贸道术运都口博河瑞宏京际路祥青镇厨培力惠连马鸿钢训影甲助窗布富牌头四多妆吉苑沙恒隆春干饼氏里二管诚制售嘉长轩杂副清计黄讯太鸭号街交与叉附近层旁对巷栋环省桥湖段乡厦府铺内侧元购前幢滨处向座下臬凤港开关景泉塘放昌线湾政步宁解白田町溪十八古双胜本单同九迎第台玉锦底后七斜期武岭松角纪朝峰六振珠局岗洲横边济井办汉代临弄团外塔杨铁浦字年岛陵原梅进荣友虹央桂沿事津凯莲丁秀柳集紫旗张谷的是不了很还个也这我就在以可到错没去过感次要比觉看得说常真们但最喜哈么别位能较境非为欢然他挺着价那意种想出员两推做排实分间甜度起满给热完格荐喝等其再几只现朋候样直而买于般豆量选奶打每评少算又因情找些份置适什蛋师气你姐棒试总定啊足级整带虾如态且尝主话强当更板知己无酸让入啦式笑赞片酱差像提队走嫩才刚午接重串回晚微周值费性桌拍跟块调糕'
# 字体下载链接
css_url = 'http://s3plus.meituan.net/v1/mss_0a06a471f9514fc79c981b5466f56b91/svgtextcss/f0fc7d7e8a364a97fad4a4a07a41b376.css'
# 扫码登录时候的链接
login_url = 'https://account.dianping.com/login?redir=http%3A%2F%2Fwww.dianping.com%2Fshijiazhuang%2Fch20%2Fg119'
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36'
# 'cookie': '_lxsdk_cuid=17ee3d5db55c8-02aaa46d8e08ed-576153e-100200-17ee3d5db56c8; _lxsdk=17ee3d5db55c8-02aaa46d8e08ed-576153e-100200-17ee3d5db56c8; _hc.v=ec50cc35-322b-00db-9dcb-a9b45e1c69c3.1644499951; cy=24; cye=shijiazhuang; s_ViewType=10; ctu=7c22490a62ab72d170b39e4650c5b5776777eaaf4f8361313c0fb028f8ec5b3f; ua=dpuser_8950845605; fspop=test; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1645183586,1645456020,1645627547,1646209259; dplet=16f5285e739fab1ed48b70c0b4732c23; dper=90abba06c6ab80d3bb13255bb130e10a36a12b7743996235db369e1dce0dc088e395900b06ddd3a09e7d0630fb657efe150f93b1b2e5b311beedede1c5fb748bdd9503c04e07b3b6e8ebf79b9bfd40be0804080ba991ca2d95912baf7d85e3fb; ll=7fd06e815b796be3df069dec7836c3df; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1646209729; _lxsdk_s=17f49b7d35b-f17-077-3e3%7C%7C144'
}
# 1.定义函数通过列表页url获取所有的店铺url
def parse_index(page):
global item
# 1.1遍历所有的页码
for p in range(1, page + 1):
# 1.2生成每一页的列表url
index_url = index_urls.format(page=p)
# 1.3获取网页对象
response_index = session.get(index_url)
# 1.4通过对象获取网页,由于python默认采用unicode,而获取的内容采用'utf-8',需要进行转换
html = response_index.content.decode('utf-8')
# 1.5使用parsel css定位信息
selector = Selector(text=html)
# 1.6获取所有的li
li_list = selector.css('#shop-all-list .tit a')
# 1.7遍历获取当前页所有的店铺链接
for li in li_list:
# 1.7.1店铺名称
item['shop_name'] = li.css('a::attr(title)').getall()[0]
# 1.7.2店铺链接
shop_url = li.css('a::attr(href)').getall()[0]
# 1.8将获取的每一个店铺url进行解析
parse_detail(shop_url)
# 2.定义函数解析传过来的url获取其地址信息
def parse_detail(url):
global item
address = ''
# 2.1获取店铺网页对象
response_detail = session.get(url)
# 2.2通过对象获取网页内容
# 这里为什么要进行解码呢?
# 因为python只会把unicode格式的显示为我们可识别的符号(这里指的是网页源码)
# 对于utf-8格式的,会打印出字节码(当不添加decode()时,显示的就是字节码)
html = response_detail.content.decode('utf-8')
# 2.3在进行解析之前还要进行字符的替换
# 这样是为了防止解析加密数据后获取的是unicode字符串('흳'-->'\uxxxx')
# 思路是将'흳'替换为'unid773'
# +?:匹配一次或更多次的重复,但是在能使整个匹配成功的前提下使用最少的重复
html = re.sub(r'&#x(\w+?);', r'uni\1', html)
# 2.4开始解析定位
selector = Selector(text=html)
# 2.6.4.1 2.6.4中将拼接好的字符串传给函数处理时,还要传一个class值,通过class值判断用什么字体
# 所以这里先将class值和其对应的节点文本组成字典
# /text():获取节点下的节点的文本内容,注意和//text()区分
node_text = selector.xpath('//*[@id="address"]/*/text()').getall()
class_name = selector.xpath('//*[@id="address"]/*/@class').getall()
node_dict = dict(zip(node_text, class_name))
# 2.6获取替换后的字符
# 2.6.1 获取所有的内容
address_content = selector.css('#address *::text').getall()
# 2.6.2遍历这些节点信息,找出加密的部分
for add_char in address_content:
# 2.6.3判断是否是以uni开始,如果是,进行加密替换,如果不是,直接获取内容
if add_char.startswith('uni'):
# 2.6.4是的话传入函数
address += str(get_word(node_dict[add_char], add_char))
else:
# 2.6.5 不是uni开始的话,直接追加到address中
address += str(add_char)
# 2.6.6 将address传给item
item['shop_address'] = address
# 2.7 传给函数进行保存
save(item)
# 2.6.4.2 接收传来的class值和字体码
def get_word(name, key):
word = None
if name == 'address':
# 填入当前的woff名
woff_name = '74ba1cb2.woff'
elif name == 'num':
# 填入当前的woff名
woff_name = '74ba1cb2.woff'
# 读取woff,需要下载woff安装包fonttools
tagName = TTFont(woff_name)
# 获取字体码列表
addres_codes = tagName.getGlyphOrder()
# 获取传入的字体码在字体码列表中的位置
word_index = addres_codes.index(key)
# 将位置传给字体字符串,得到对应的字符
word = word_string[word_index]
return word
# 2.8 保存
def save(item):
with open('dzdp.csv', 'a', encoding='utf-8') as file:
#初始化写入对象
writer = csv.writer(file)
writer.writerow(item.values())
# 下载字体文件,运行一次即可
def save_woff(url):
response_woff = requests.get(url)
content_woff = response_woff.content.decode('utf-8')
# html = etree.HTML(content_woff)
woff_list = re.findall(r'url\("(//s3plus.meituan.net/v1/mss_73a511b8f91f43d0bdae92584ea6330b/font/\w+?\.woff)"\)',
content_woff)
# 取出每一个链接,lambda依次对链接进行拼接
# map()会根据提供的函数对指定的序列进行映射
woff_list = map(lambda x: 'https:' + x, woff_list)
for woff_url in woff_list:
woff_content = requests.get(woff_url).content
filepath = woff_url.split('/')[-1]
with open(filepath, 'wb') as f:
f.write(woff_content)
# 使用selenium获取cookie传给request使用
def get_cookies(url):
dzdp_cookies = {}
# 一些防止被DZDP检测到的设置
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
browser = webdriver.Chrome(options=options)
browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined
})
"""
})
# 上面这些设置是为了放置selenium被DZDP识别到,不知道有没有用
browser.maximize_window()
browser.get(url)
time.sleep(12)
with open('cookies.txt', 'w') as f:
f.write(json.dumps(browser.get_cookies()))
# 下面这一步的目的也是为了防止被检测到,也不知道是什么意思
with open('cookies.txt', 'r') as f:
cookie_list = json.load(f)
for cookie in cookie_list:
if 'expiry' in cookie:
del cookie['expiry']
browser.add_cookie(cookie)
cookies = browser.get_cookies()
browser.close()
for i in cookies:
dzdp_cookies[i['name']] = i['value']
return dzdp_cookies
if __name__ == '__main__':
# 这个函数调用一次获取到字体文件后就可以注释了
save_woff(css_url)
# 创建会话
session = requests.session()
session.headers.clear()
session.headers.update(headers)
session.cookies.update(get_cookies(login_url))
parse_index(page)
查询结果: