整体思路就是:找到对应的url,url里page处理,发送请求,获取返回的数据,将返回数据存入列表,再写入excel
1.要爬取的商品页面,F12找到获取评论的url,如下图所示

2.可以观察到每个评论页面的url只有Page的值不同,且page=实际页面-1。所以每次请求的url的page需要处理一下


#根据page处理url
def get_url(n):
print('爬取第' + str(n+1) + '页')
page = '&page=' + str(n)
url = 'https://api.m.jd.com/?appid=item-v3&functionId=pc_club_productPageComments&client=pc&clientVersion=1.0.0&t=1704506228099&loginType=3&uuid=181111935.1768046396.1704416494.1704470440.1704500661.9&productId=100005856331&score=0&sortType=6'+page+'&pageSize=10&isShadowSku=0&rid=0&fold=1&bbtf=&shield='
print('京东搜索页面链接:' + url)
return url
!!如果要爬取别的商品,只需要将这里的url替换掉,page处理一样
3.完整代码如下:
import requests
import pandas as pd
items=[]
#根据page处理url
def get_url(n):
print('爬取第' + str(n+1) + '页')
page = '&page=' + str(n)
url = 'https://api.m.jd.com/?appid=item-v3&functionId=pc_club_productPageComments&client=pc&clientVersion=1.0.0&t=1704506228099&loginType=3&uuid=181111935.1768046396.1704416494.1704470440.1704500661.9&productId=100005856331&score=0&sortType=6'+page+'&pageSize=10&isShadowSku=0&rid=0&fold=1&bbtf=&shield='
print('京东搜索页面链接:' + url)
return url
if __name__ == '__main__':
#循环爬取每一页评论,范围可根据实际修改
for page in range(0,1):
#请求头部信息,可不修改
header = {'User-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 SLBrowser/8.0.1.4031 SLBChan/105'}
url=get_url(page)
response= requests.get(url=url,headers=header)
json=response.json()
data=json['comments']
for t in data:
content1 =t['content']
time =t['creationTime']
if 'location' in t:
location = t['location']
else:
location = '空'
user =t['nickname']
color =t['productColor']
score =t['score']
if 'buyCount' in t.get('extMap', {}):
buyCount = t['extMap']['buyCount']
else:
buyCount = '空'
if 'replies' in t and len(t['replies']) > 0 and 'content' in t['replies'][0]:
content2 = t['replies'][0]['content']
else:
content2 = '商家没有回复'
item=[user,color,buyCount,score,content1,time,location,content2]
items.append(item)
df = pd.DataFrame(items, columns=['用户名','购买颜色','buyCount','评价等级','评论内容', '发布时间','地址','商家回复'])
df.to_excel(r'D:\mycode\information_jd.xlsx', index=False)
4.运行结果:

1万+

被折叠的 条评论
为什么被折叠?



