爬虫 — 大众点评商户信息的爬取和文字反爬

信息爬取

import requests
from lxml import etree
import time
import json
import pandas as pd

# 获取商户名称和ID
result = []
for i in range(1,51):
    print(i)
    url = r'http://www.dianping.com/haikou/ch10/p{page}'.format(page=i)
    headers = {
    "Cache-Control": "max-age=0",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cookie": "navCtgScroll=1; _lxsdk_cuid=17a1dc4c18e1d-0a7ec7a9c3737a-45410429-240000-17a1dc4c18fc8; _hc.v=028ffd92-97cc-7fa9-e64f-74408f8c9421.1623997072; s_ViewType=10; aburl=1; _dp.ac.v=771d9bb8-a7f2-4d76-8bf3-a13639ae2217; ctu=6f9ef9a624f2bb02349e3412ec41e048b9d1e204d4b5ace5828e7fef8adc319b; uuid=FABC533C9DFB7697EA69101F0E9BA3433B4C21FA22195EF9294F2EB256C50DBD; iuuid=FABC533C9DFB7697EA69101F0E9BA3433B4C21FA22195EF9294F2EB256C50DBD; _lxsdk=FABC533C9DFB7697EA69101F0E9BA3433B4C21FA22195EF9294F2EB256C50DBD; _ga=GA1.2.2029444719.1624263718; ua=dpuser_8723703376; fspop=test; cy=23; cye=haikou; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1638601245; dper=92b45344e065dccd73f49a5f35c3199caf0700028fb482052cbdc6dd828e7788da6496adb22783938714abee41ab3b737cb23486307598f8c7321ca1afabdc689d54c6084be55dd55ade40bc9d3270e87f91e3e9a0e3df9456baff3b84e57454; ll=7fd06e815b796be3df069dec7836c3df; uamo=18508921021; dplet=51f7f8eb52ef4c18661bd3a1955e648f; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1638603180; _lxsdk_s=17d843ebc0f-d5-044-030%7C%7C193"}
    response = requests.get(url=url,headers=headers)
    html = etree.HTML(response.text)
    shop_id = html.xpath('//*[@id="shop-all-list"]//ul//li//a[@data-click-name="shop_title_click"]/@data-shopid')
    shop_name = html.xpath('//*[@id="shop-all-list"]//ul//li//a[@data-click-name="shop_title_click"]/@title')
    for i in zip(shop_id,shop_name):
        info={}
        info['店名'] = i[1]
        info['id'] = i[0]
        result.append(info)
    time.sleep(2)

shop_list = []

# API接口
# def str_replace(x):
#     x = x.replace('<e class="address">',' ')
#     x = x.replace('</e>',' ')
#     x = x.replace('<d class="num">',' ')
#     x = x.replace('</d>',' ')
#     x = x.replace('&#x',' ')
#     x = x.replace(';',' ')
#     return x

# for i in result:
#     print(i)
#     url = "http://www.dianping.com/ajax/json/shopDynamic/basicHideInfo?shopId={shopid}&_token=eJx1j09rg0AQxb%2FLnBd3x7%2BrN0MgmMZiqw2lIQer4i5pXHFtbSn97p1AcuihMPB7%2B%2BbxmP2GKWshQSGEjww%2BugkSQEc4ITCYLW1CT4bCC%2BKAwKD544URhgxep%2F0akgP6gWSxxOPFeSTj4ApfMsTAPbKbRtKuT3NJZRQCNc9jwvmyLE6r62HUQ%2B805sytMiPf1F6RP3TyxZStaaOJjvovr2p9Mu%2B8USiAys8VlRNPV9ZXzrd3Th%2BlOqv7gVS3%2FazubJTt%2BCq3z%2FviPo2W9dMX6twUlX0r8jIu035l51Tt4OcXMJBX6g%3D%3D&tcv=u2zfioyo9j&uuid=028ffd92-97cc-7fa9-e64f-74408f8c9421.1623997072&platform=1&partner=150&optimusCode=10&originUrl=http%3A%2F%2Fwww.dianping.com%2Fshop%2F{shopid}".format(shopid=i.get('id')) 
#     headers = {
#         "Accept": "application/json, text/javascript, */*; q=0.01",
#         "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36",
#         "X-Requested-With": "XMLHttpRequest",
#         "Referer": "http://www.dianping.com/shop/Ga3PMQe8ZoSdod7r",
#         "Accept-Language": "zh-CN,zh;q=0.9",
#         "Cookie": "fspop=test; cy=23; cye=haikou; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=17d85390949c8-060fddc5b4ecfa-5d11371e-240000-17d8539094ac8; _lxsdk=17d85390949c8-060fddc5b4ecfa-5d11371e-240000-17d8539094ac8; _hc.v=6f6ee73a-db19-6427-de36-f4710afec8b8.1638617648; s_ViewType=10; ctu=88d747e0aa8639b8b6451d577574db552ced28f278254e1a47e8525faff77616; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1638617648,1638620196,1638787904,1638842852; dper=92b45344e065dccd73f49a5f35c3199c3d907477f6b3e9fadd830d19d2904701a3018335efdd719657fc51c0b35a95e16feb7a78856989abfd30e64925a98770bdc6d2a4853ea19f7a87d9d0c877b9efe52b4a17c9b91206d718e25a8be391b4; ll=7fd06e815b796be3df069dec7836c3df; ua=dpuser_8723703376; uamo=18508921021; dplet=87f469cd05e56b09fb2552b4f14df7f5; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1638842889; _lxsdk_s=17d92a5b4f6-954-2fd-bcf%7C%7C66"
#     }
#     # 爬取的时候有时候会莫名冒出异常,这里做了异常处理
#     try:
#         response = json.loads(requests.get(url=url,headers=headers).text).get('msg').get('shopInfo')
#     except json.JSONDecodeError:
#         print('失败')
#     else:
#         if response:
#             print('成功')
#             info={}
#             info['shopname'] = response.get('shopName')
#             info['address'] = str_replace(response.get('address'))
#             info['phone'] = str_replace(response.get('phoneNo'))
#             info['id'] = response.get('shopId')
#             print(info)
#             shop_list.append(info)
#        else:
#             print('失败')
#     time.sleep(5)

# 详情页
for i in result:
    url = r'https://www.dianping.com/shop/{id}'.format(id=i.get('id'))
    headers = {
			    "Cache-Control": "max-age=0",
			    "Upgrade-Insecure-Requests": "1",
			    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
			    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
			    "Accept-Language": "zh-CN,zh;q=0.9",
			    "Cookie": "fspop=test; cy=23; cye=haikou; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=17d85390949c8-060fddc5b4ecfa-5d11371e-240000-17d8539094ac8; _lxsdk=17d85390949c8-060fddc5b4ecfa-5d11371e-240000-17d8539094ac8; _hc.v=6f6ee73a-db19-6427-de36-f4710afec8b8.1638617648; s_ViewType=10; ctu=88d747e0aa8639b8b6451d577574db552ced28f278254e1a47e8525faff77616; dper=92b45344e065dccd73f49a5f35c3199c3d907477f6b3e9fadd830d19d2904701a3018335efdd719657fc51c0b35a95e16feb7a78856989abfd30e64925a98770bdc6d2a4853ea19f7a87d9d0c877b9efe52b4a17c9b91206d718e25a8be391b4; ua=dpuser_8723703376; uamo=18508921021; ll=7fd06e815b796be3df069dec7836c3df; dplet=c6b2f0bba2d39dfffe3dbbdded1c9693; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1638787904,1638842852,1638846334,1638855073; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1638855073; _lxsdk_s=17d935fd8bf-a00-3db-efa%7C%7C20"}
    response = requests.get(url=url,headers=headers).text
    html = etree.HTML(response)
    i['address'] = html.xpath('//span[@id="address"]//text()')
    i['phone'] = html.xpath('//p[@class="expand-info tel"]//text()')
    print(i)
    shop_list.append(i)
    time.sleep(3)

file = pd.DataFrame(shop_list)
file.to_excel(r'C:\Users\Administrator\Desktop\店铺.xlsx',index=False)
  • 这里用了两种方式,第一种API接口数据会更规范一点,只是反爬限制太大,没跑多久就会被封。没心情去搞UA和IP,后摸索发现了详情页,基本不限制。
  • 通过API接口爬出来的数据是带有CSS代码,这里数据清洗过程没有用正则提取,而是直接定义函数将不需要的全部替换掉

文字反解析

from fontTools.ttLib import TTFont
import pandas as pd

word = '1234567890店中美家馆小车大市公酒行国品发电金心业商司超生装园场食有新限天面工服海华水房饰城乐汽香部利子老艺花专东肉菜学福饭人百餐茶务通味所山区门药银农龙停尚安广鑫一容动南具源兴鲜记时机烤文康信果阳理锅宝达地儿衣特产西批坊州牛佳化五米修爱北养卖建材三会鸡室红站德王光名丽油院堂烧江社合星货型村自科快便日民营和活童明器烟育宾精屋经居庄石顺林尔县手厅销用好客火雅盛体旅之鞋辣作粉包楼校鱼平彩上吧保永万物教吃设医正造丰健点汤网庆技斯洗料配汇木缘加麻联卫川泰色世方寓风幼羊烫来高厂兰阿贝皮全女拉成云维贸道术运都口博河瑞宏京际路祥青镇厨培力惠连马鸿钢训影甲助窗布富牌头四多妆吉苑沙恒隆春干饼氏里二管诚制售嘉长轩杂副清计黄讯太鸭号街交与叉附近层旁对巷栋环省桥湖段乡厦府铺内侧元购前幢滨处向座下臬凤港开关景泉塘放昌线湾政步宁解白田町溪十八古双胜本单同九迎第台玉锦底后七斜期武岭松角纪朝峰六振珠局岗洲横边济井办汉代临弄团外塔杨铁浦字年岛陵原梅进荣友虹央桂沿事津凯莲丁秀柳集紫旗张谷的是不了很还个也这我就在以可到错没去过感次要比觉看得说常真们但最喜哈么别位能较境非为欢然他挺着价那意种想出员两推做排实分间甜度起满给热完格荐喝等其再几只现朋候样直而买于般豆量选奶打每评少算又因情找些份置适什蛋师气你姐棒试总定啊足级整带虾如态且尝主话强当更板知己无酸让入啦式笑赞片酱差像提队走嫩才刚午接重串回晚微周值费性桌拍跟块调糕'

font = TTFont(r'C:\Users\Administrator\Desktop\地址.woff')
phid = font.getGlyphOrder()[2:]
address_dict = {}
for i in zip(phid,word):
    address_dict[i[0][3:]] = i[1]

font1 = TTFont(r'C:\Users\Administrator\Desktop\数字.woff')
phid1 = font1.getGlyphOrder()[2:]
num_dict = {}
for i in zip(phid1,word):
    num_dict[i[0][3:]] = i[1]

# API
# def func1(x):
#     address = x.split(' ')
#     info = []
#     for i in address:
#         if address_dict.get(i):
#             info.append(address_dict.get(i))
#         else:
#             if num_dict.get(i):
#                 info.append(num_dict.get(i))
#             else:
#                 info.append(i)
#     return ''.join(info).strip()

# def func2(x):
#     num = x.split(' ')
#     info = []
#     for i in num:
#         if num_dict.get(i):
#             info.append(num_dict.get(i))
#         else:
#             info.append(i)
#     return ''.join(info).strip()


# 详情页
def func1(x):
    info = []
    for i in x:
        k = (r''+repr(i))[3:7]
        if address_dict.get(k):
            info.append(address_dict.get(k))
        else:
            if num_dict.get(k):
                info.append(num_dict.get(k))
            else:
                info.append(i)
    return ''.join(info).strip()

def func2(x):
    info = []
    for i in x: 
        k = (r''+repr(i))[3:7] # 提取出来的字符串含有转义字符 \u,所以这里需要做额外处理
        if num_dict.get(k):
            info.append(num_dict.get(i))
        else:
            info.append(i)
    return ''.join(info).strip()

data = pd.read_excel(r'C:\Users\Administrator\Desktop\店铺.xlsx',engine='openpyxl')
data['address'] = data['address'].map(func1)
data['phone'] = data['phone'].map(func2)
data.to_excel(r'C:\Users\Administrator\Desktop\大众点评.xlsx',index=False)
  • 文字反解析的重点是提取 GlyphOrder参数与对应的文字形成映射字典,这里的文字是复制网友的,自己手打太长了
  • 有一点需要注意的是通过详情页提取出来的字符串是含有转义字符\u,类似\uebc2这种。Python 不能直接在这样的字符串上面做替换,所以做了额外的处理r''+repr(i)
  • 字体文件会变,每次爬需要把对应的字体文件下载下来
    在这里插入图片描述
    在这里插入图片描述
  • 2
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
爬虫(Web Crawler)是一种自动化程序,用于从互联网上收集信息。其主要功能是访问网页、提取数据并存储,以便后续分析或展示。爬虫通常由搜索引擎、数据挖掘工具、监测系统等应用于网络数据抓取的场景。 爬虫的工作流程包括以下几个关键步骤: URL收集: 爬虫从一个或多个初始URL开始,递归或迭代地发现新的URL,构建一个URL队列。这些URL可以通过链接分析、站点地图、搜索引擎等方式获取。 请求网页: 爬虫使用HTTP或其他协议向目标URL发起请求,获取网页的HTML内容。这通常通过HTTP请求库实现,如Python中的Requests库。 解析内容: 爬虫对获取的HTML进行解析,提取有用的信息。常用的解析工具有正则表达式、XPath、Beautiful Soup等。这些工具帮助爬虫定位和提取目标数据,如文本、图片、链接等。 数据存储: 爬虫将提取的数据存储到数据库、文件或其他存储介质中,以备后续分析或展示。常用的存储形式包括关系型数据库、NoSQL数据库、JSON文件等。 遵守规则: 为避免对网站造成过大负担或触发反爬虫机制,爬虫需要遵守网站的robots.txt协议,限制访问频率和深度,并模拟人类访问行为,如设置User-Agent。 反爬虫应对: 由于爬虫的存在,一些网站采取了反爬虫措施,如验证码、IP封锁等。爬虫工程师需要设计相应的策略来应对这些挑战。 爬虫在各个领域都有广泛的应用,包括搜索引擎索引、数据挖掘、价格监测、新闻聚合等。然而,使用爬虫需要遵守法律和伦理规范,尊重网站的使用政策,并确保对被访问网站的服务器负责。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值