特别注意商品价格和评价是在动态加载的,所以要找javascript
价格:
https://p.3.cn/prices/mgets?skuIds=J_商品id
评论:https://club.jd.com/comment/productCommentSummaries.action?referenceIds=商品id
import requests
from lxml import etree
import json
#ctrl + F 局部查找
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
}
def Get_goods_url(url,headers):
res = requests.get(url,headers=headers)
#print(res.text)
#print(res.content.decode('utf-8'))
html = etree.HTML(res.content.decode('utf-8'))
#print(html)
#双肩包
urls = []
goods_urls = html.xpath('//*[@id="J_goodsList"]/ul/li/div/div[@class="p-img"]/a/@href')
for url in goods_urls:
url = "https:" + url
urls.append(url)
return urls
#双肩包
def Get_goods_text(url):
id = url[20:-5]
res = requests.get(url,headers=headers)
html = etree.HTML(res.content.decode('utf-8'))
Brands = html.xpath('//div/div/div/div[@class="sku-name"]/text()')
price_url = 'https://p.3.cn/prices/mgets?skuIds=J_' + id
#print(price_url)
res = requests.get(price_url,headers=headers)
price = json.loads(res.text)
comments_url = 'https://club.jd.com/comment/productCommentSummaries.action?referenceIds=' + id
res = requests.get(comments_url,headers=headers)
comments = json.loads(res.text)
for Brand in Brands:
if Brand.strip() != '':
Brand = Brand.strip()
goods = {
'Brand' : Brand,
'Price' : price[0]['p'],
'CommentCountStr' : comments['CommentsCount'][0]['CommentCountStr'],
'GoodCountStr' : comments['CommentsCount'][0]['GoodCountStr'],
'GoodRate' : comments['CommentsCount'][0]['GoodRate']
}
print("品牌:",Brand)
print("价格:",price[0]['p'])
print("累计评价:",comments['CommentsCount'][0]['CommentCountStr'])
print("好评:",comments['CommentsCount'][0]['GoodCountStr'])
print("好评率:",comments['CommentsCount'][0]['GoodRate'])
print()
return goods
def save_csv(goods, f):
f.write('{},{},{},{},{}\n'.format(goods['Brand'], goods['Price'], goods['CommentCountStr'], goods['GoodCountStr'],goods['GoodRate']))
def main():
goods = input("商品:")
urls = ['https://search.jd.com/Search?keyword={0}&page={1}'.format(goods,str(i)) for i in range(1,10,2)] #多页面
for url in urls:
goods_urls = Get_goods_url(url,headers)
for goods_url in goods_urls:
goods = Get_goods_text(goods_url)
with open('goods.csv', 'a', encoding='utf-8') as f:
save_csv(goods, f)
if __name__ == '__main__':
main()