import requests
import re
urls = []
for i in list(range(1,51)):
urls.append('https://rate.tmall.com/list_detail_rate.htm?itemId=523840172328&spuId=457482192&sellerId=2260385300&order=1¤tPage=%s'%i)
username=[]
auctionSku=[]
rateContent=[]
rateDate=[]
goldUser=[]
headers={'user-agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
'accept-encoding':'gzip, deflate, br',
'accept-language':'zh-CN,zh;q=0.8'
}
for url in urls:
content = requests.get(url,headers=headers).text
username.extend(re.findall('"displayUserNick":"(.*?)"',content))
auctionSku.extend(re.findall(re.compile('"auctionSku":"(.*?)","auctionTitle"'),content))
rateContent.extend(re.findall(re.compile('"rateContent":"(.*?)","rateDate"'),content))
rateDate.extend(re.findall(re.compile('"rateDate":"(.*?)","reply"'),content))
goldUser.extend(re.findall(re.compile('"goldUser":(\w+),"id":'),content))
print(username)
#然后打印每个字段的长度
a=[username,rateContent,rateDate,auctionSku,goldUser]
for i in range(5):
print(len(a[i]))
上面的爬虫代码试了好几次,每次爬取后面打印出来的字段长度都不一样,正常打印出来是爬取的页数*每页的20条,但每次都少几页的样子,一会儿多一会儿少的,不知道这是我代码问题还是传说中天猫的反爬虫造成的,各位有经验的朋友麻烦指点指点,谢谢!!!