# -*- coding: utf-8 -*-
#定向爬取淘宝商品页面
import requests
import re
import pymongo
client = pymongo.MongoClient('localhost')
db = client['taobao']
headers = {
'Cookie':'thw=c9B567C18',
'Host':'s.taobao.com',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
}
def getHTMLText(url):
"""提取页面HTML代码,并返回HTML文本"""
try:
r = requests.get(url, timeout=30,headers=headers)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print("页面提取错误")
return ""
def parsePage(infoList, html):
"""解析页面,将[价格,名字] 存入列表"""
try:
#分析网页源码解析出价格
shop = re.findall(r'\"nick\":\".+?\"',html)
price = re.findall(r'\"view_price\":\"\d+\.\d+?"',html)
title = re.findall(r'\"raw_title\":\".+?\"',html)
sales = re.findall(r'\"view_sales\":\".+?\"',html)
loc = re.findall(r'\"item_loc\":\".+?\"',html)
nid = re.findall(r'\"nid\":\".+?\"',html)
#test this re
#print(price)
#print(title)
for i in range(len(price)):
dic = {}
dic['shop'] = eval(shop[i].split(':')[1])
dic['title'] = eval(title[i].split(':')[1])
dic['price'] = eval(price[i].split(':')[1])
dic['sales'] = eval(sales[i].split(':')[1])
dic['loc'] = eval(loc[i].split(':')[1])
dic['nid'] = str('https://detail.tmall.com/item.htm?id=')+eval(nid[i].split(':')[1])
save_mongo(dic)
# print(dic)
except:
print('页面解析错误')
def save_mongo(data):
if db['徐福记'].update({'nid':data['nid']},{'$set':data},True):
print('save to mongo',data['nid'])
else:
print('failed',data['title'])
def main():
keyword = "徐福记"
deep = 1 #页数
url = 'https://s.taobao.com/search?q=' + keyword + 'sort%3Dsale-desc&sort=sale-desc'
infoList = []
for i in range(deep):
_url = url + '&s=' + str(i*44)
text = getHTMLText(_url)
parsePage(infoList, text)
main()
python淘宝爬虫存入MonggoDB
最新推荐文章于 2023-07-20 21:09:58 发布