import requests
import re
import json
import random
import time
from lxml import etree
import os
from agency import PROXIES, USER_AGENTS
def get_page(url):
headers ={
"User-Agent": random.choice(USER_AGENTS),
"x-requested-with": "XMLHttpRequest"}
proxies = {'http': random.choice(PROXIES), 'https': random.choice(PROXIES)}
response = requests.get(url, headers=headers, proxies=proxies).text #获取列表页信息
return response
#处理列表页
def list_page(data):
good_ids = re.findall('"nid":"(.*?)"', data)
seller_ids = re.findall('"user_id":"(.*?)"', data) # 获取买家的id
headers ={
"User-Agent": random.choice(USER_AGENTS),
"referer": "https://item.taobao.com/item.htm", }
for good_id in good_ids:
url = "https://item.taobao.com/item.htm?&id=" + str(good_id) + "&ns=1&abbucket=7" #构造详情页url
proxies = {'http': random.choice(PROXIES), 'https': random.choice(PROXIES)}
response = requests.get(url, headers=headers, proxies=proxies) #获取详情页
resp = etree.HTML(response.text)
links = response.url
#页面中有淘宝和天猫的,匹配方式不同,区别对待
if 'detail.tmall.com' in links:
print(links)
tmId = re.findall('"itemId":(\d+)', response.text)[0]
buyId = re.findall('"sellerId":(\d+)',response.text)[0]
name = resp.xpath('//div[contains(@class,"tb-detail-hd")]/h1/text()')[0].strip() #获取商品标题名称
print(name)
good_detail = resp.xpath('//div[@id="J_AttrList"]/ul[@id="J_AttrUL"]/li/text()') #商品描述
good_detaildict = deal_detail(good_detail)
pictures_list = resp.xpath('//ul[@class="tm-clear J_TSaleProp tb-img "]/li/a/@style') #获取包含商品图片的内容
img_urls = deal_url(pictures_list)
color = resp.xpath('//ul[@class="tm-clear J_TSaleProp tb-img "]/li/a/span/text()') #获取图片相应的颜色
color = colorstyle(color)
#详情页的图片操作
try:
for i in range(len(img_urls)):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36"}
proxies = {'http': random.choice(PROXIES), 'https': random.choice(PROXIES)}
response = requests.get("https:" +img_urls[i], headers=headers, proxies=proxies) #获取图片请求
global keyword
if not os.path.exists(keyword):
os.mkdir(keyword)
url_path1 = '{0}/{1}'.format(keyword, name)
if not os.path.exists(url_path1):
os.mkdir(url_path1)
product_detail = '{0}/{1}.{2}'.format(url_path1, '商品描述', 'json')
if not os.path.exists(product_detail):
with open(product_detail, 'a') as f:
text = json.dumps(good_detaildict, ensure_ascii=False) + "\n"
f.write(text) # 写入商品描述
print("商品描述写入完成")
url_path2 = '{0}/{1}'.format(url_path1, '商品图片')
if not os.path.exists(url_path2):
os.mkdir(url_path2)
# file_path = keyword/name/'商品图片'
url_path = '{0}/{1}.{2}'.format(url_path2, color[i], 'jpg') # 指定图片存放路径
if not os.path.exists(url_path):
with open(url_path, 'wb') as f:
f.write(response.content) #图片写入
print('商品图片写入OK')
else:
print('picture has exist')
except:
continue
#处理评论页里面的内容
for page in range(1, 51):
conment_url = "https://rate.tmall.com/list_detail_rate.htm?itemId=" + str(tmId) + "&sellerId=" + str(buyId) + "&order=3¤tPage=" \
+ str(page) + "&append=0&content=1&tagId=&posi=&picture=1"
headers = {
"User-Agent": random.choice(USER_AGENTS)}
proxies = {'http': random.choice(PROXIES), 'https': random.choice(PROXIES)}
content = requests.get(conment_url, headers=headers, proxies=proxies).text
comments = re.findall(r'"rateContent":"(.*?)"', content) #每一页中的评论
pics_lists = re.findall(r'"pics":\[(.*?)\]', content) # 每一页中的评论中所有图片链接
pics_list=del_str(pics_lists)
color_lists = re.findall('"auctionSku":"(.*?)"', content) # 评论中用户购买产品的颜色
color_list1 = deal_color(color_lists)
color_list = colorstyle(color_list1)
print(color_list)
for i in range(len(color_list)): #统一对应颜色和评论个数
if pics_list[i] == []:
pics_list.pop(i)
color_list.pop(i)
# 评论下的图片操作
for img_url in range(len(color_list)):
try:
if not os.path.exists(keyword):
os.mkdir(keyword)
url_path1 = '{0}/{1}'.format(keyword, name)
if not os.path.exists(url_path1):
os.mkdir(url_path1)
buyerImg_path = '{0}/{1}'.format(url_path1, '买家晒图') # 买家晒图路径
if not os.path.exists(buyerImg_path):
os.mkdir(buyerImg_path)
# print(color_list[img_url]+'%s%s'%(page, img_url))
comment_path = '{0}/{1}.{2}'.format(buyerImg_path, '%s.%s'%(page, img_url+1), 'txt')
with open(comment_path, 'a') as f:
f.write(comments[img_url]) #写入用户评论
for i in range(len(pics_list[img_url])):
if pics_list[img_url][i]:
realUrl = "https:" + pics_list[img_url][i]
# print(realUrl)
response = requests.get(realUrl, headers=headers, proxies=proxies)
buyerurl_path = '{0}/{1}.{2}'.format(buyerImg_path, color_list[img_url]+'%s.%s.%s'%(page, img_url+1, i+1), 'jpg') # 指定图片存放路径
if not os.path.exists(buyerurl_path):
with open(buyerurl_path, 'wb') as f:
f.write(response.content) # 图片写入
else:
print("已经存在")
print("用户晒图下载完毕")
except:
continue
#处理商品描述格式,保存为json格式
def deal_detail(lst):
good_details = []
for content in lst:
good_details.append(''.join(content.split())) # 将其中的\xa0去掉
good_detaildict = {}
for contents in good_details:
k = contents.split(':')
good_detaildict[k[0]] = k[1] # 将商品详情内容转化为字典格式,便于存入json
return good_detaildict
def del_str(lst):
l1 = []
for i1 in lst:
a = i1.split(',')
# print(a)
for i2 in range(len(a)):
a[i2] = a[i2].strip('"')
l1.append(a)
return l1
#处理图片链接,更改图片尺寸
def deal_url(lst):
img_urls = []
img_urls2 = []
for picture in lst:
img_url = re.findall('(//.*.jpg)', picture)[0]
img_urls.append(img_url) # 获取商品图片列表
for url in img_urls:
# print(url)
url=url.replace('40x40', '430x430')
img_urls2.append(url)
return img_urls2
#处理颜色字符串格式
def deal_color(lst):
L = []
for i in lst:
if '上衣' in i:
lst.remove(i)
k = (i.split(';')[0]).split("#")[0]
if '颜色分类'in k:
a = k[5:]
L.append(a)
else:
a = k[3:]
L.append(a)
return L
#去掉其中不想要的杂牌,如运动短裤中的上衣
def colorstyle(lst):
l1=[]
for i in lst:
if '上衣' in i:
lst.remove(i)
i=(i.split('#')[0])
if '色'in i:
if '+' in i:
i=i.split('(')[0]
l1.append(i)
else:
for j in range(len(i)):
if i[j] == '色':
b=i[0:j+1]
l1.append(b)
break
else:
l1.append(i)
return l1
if __name__ == '__main__':
keyword = '运动短裤'
kw = urlencode({"q": keyword})
for i in range(1,100):
url = "https://s.taobao.com/search?q=" + kw + "&s=" + str(i * 44)
list_page(get_page(url))