python爬取关键词_python 简单实现淘宝关键字商品爬取

#-*- coding:utf-8 -*-#author : yesehngbao#time:2018/3/20

importosimportrequestsimportreimportjsonimportpymongoimporthashlibfrom taobao_re_xpath_setting import *

from multiprocessing importPoolfrom lxml importetree

dirname=DIRNAMEif notos.path.exists(dirname):

os.mkdir(dirname)

dirname1=DIRNAME1if not os.path.exists(dirname+'/'+dirname1):

os.mkdir(dirname+'/'+dirname1)

dirname2=DIRNAME2if not os.path.exists(dirname+'/'+dirname2):

os.mkdir(dirname+'/'+dirname2)

dirname3=DIRNAME3if not os.path.exists(dirname+'/'+dirname3):

os.mkdir(dirname+'/'+dirname3)

url= 'https://s.taobao.com/search'headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"

"Chrome/64.0.3282.186 Safari/537.36"}defmd5(strs):

strs= strs + '12sdwz..'strs= hashlib.md5(strs.encode('utf-8'))

key=strs.hexdigest()returnkeydefget_html(page):"""获取('首页')

:param page: 获取的页数, q: 想获取的宝贝

:return:"""parmas={'q': '%s' %GOODS,'s': page,

}

respons= requests.get(url, headers=headers, params=parmas)if respons.status_code == 200:returnrespons.textelse:returnNonedefget_ajax_html():"""解析可能出现的ajax网页"""ajax_url= 'https://s.taobao.com/api'parmas={'_ksTS': 1521612082036_312,'callback': 'jsonp267','ajax': 'true','m': 'customized','q': '%s' %GOODS,'s': 36,'bcoffset': 0,'rn': '4e1dc906143376f8d2e735536fd3ee0c'}

respons= requests.get(ajax_url, headers=headers, params=parmas).text

comp= re.compile('jsonp\d+\((.*?)\)', re.S)

strs=re.findall(comp, respons)ifstrs:

strs=json.loads(strs[0])

commdity_list= strs.get('API.CustomizedApi').get('itemlist').get('auctions')ifcommdity_list:for commdity incommdity_list:

addr= commdity.get('item_loc')

nick= commdity.get('nick')

sales= commdity.get('view_sales')

detail= 'http:' + commdity.get('detail_url')yield{'addr': addr,'nick': nick,'sales': sales,'detail': detail,

}defanalysis(html):"""解析列表页的数据

html:列表页源码

content : js中的数据和ajax数据 的列表"""content=[]

comp= re.compile('g_page_config = (.*?)g_srp_loadCss', re.S)ifcomp:

strs=re.findall(comp, html)[0]

strs= strs.replace(';', '')

strs=json.loads(strs)

data= strs.get('mods').get('itemlist').get('data').get('auctions')ifdata:for i indata:

detail= i.get('detail_url')if re.match('http', detail):pass

else:

detail= 'http:'+detail

addr= i.get('item_loc')

nick= i.get('nick')

sales= i.get('view_sales')

item={'addr': addr,'nick': nick,'sales': sales,'detail': detail,

}

content.append(item)if len(data) < 44:

cont=get_ajax_html()for i incont:

content.append(i)returncontentelse:print(len(data))returncontentelse:returnNoneelse:returnNonedefsave_img(img_new, page):"""主图的下载

img_new : 主图地址"""

ifimg_new:

img_name= img_new[30:].replace('/', '-')

respons= requests.get(img_new, headers=headers).content

with open(dirname+'/'+dirname1+'/'+md5(img_name)+'.jpg', 'wb')as fp:

fp.write(respons)return print('第%s页————' % page + '主图片保存完成:', img_name)defsave_color_img(color_url, page):"""颜色图的下载

color_url : 颜色图地址"""

ifcolor_url:

img_name= color_url[30:].replace('/', '-')

respons= requests.get(color_url, headers=headers).content

with open(dirname+ '/' + dirname2 + '/' + md5(img_name)+'.jpg', 'wb')as fp:

fp.write(respons)return print('第%s页————' % page + 'color图片保存完成:', img_name)defsave_video(detail_url, title, page):"""视频的下载

url : 视频地址

title : 视频名字"""

ifdetail_url:

respons= requests.get(detail_url, headers=headers).content

with open(dirname+'/'+dirname3+'/' + md5(title)+'.mp4', 'wb') as fp:

fp.write(respons)print('第%s页————' % page + '视频下载完成 :('+title+')')return 'download_ok'

defalaysis_detail(respons, page):"""获取详情页的图片, 颜色(类别)图, 视频

respons : 详情页源码

doc : xpath解析对象

img_new : 图片地址

color_url : 颜色图地址

video_new : 视频地址"""

#主图

if page ==0:

page= 1

else:

page= page // 44 + 1doc=etree.HTML(respons)

li_list= doc.xpath('.//ul[@class="tb-clearfix" or @id="J_UlThumb"]/li')for li inli_list:

img_old= li.xpath('./a/img/@src')ifimg_old:

img_old=img_old[0]

img_new= img_old[-15:].replace('60', '400')

img_new= 'http:'+img_old[:-15]+img_new

save_img(img_new, page)else:pass

#颜色(类别)图

compi= re.compile('style="background:url\((.*?)\)')

color_img=re.findall(compi, respons)for color incolor_img:ifcolor:

color_url= color[-15:].replace('40', '400').replace('30', '400')

color_url= 'http:'+color[:-15]+color_url

save_color_img(color_url, page)#获取视频

comp= re.compile('TShop.Setup\(\s(.*?)\s\)', re.S)

strs=re.findall(comp, respons)ifstrs:

strs=json.loads(strs[0])

video_lod= strs.get('itemDO').get('imgVedioUrl')ifvideo_lod:

video_new= video_lod.replace('e/1', 'e/6').replace('t/8', 't/1')

title= strs.get('itemDO').get('title')

save_video('http:'+video_new, title, page)else:return '无视频'

else:return '无视频(或有加密)'

return '下载完成'

defget_detail(content, page):"""获取详情页信息

content: 列表页数据

detail_url : 详情页入口"""

ifcontent:for cont incontent:

detail_url= cont.get('detail')

respons= requests.get(detail_url, headers=headers)if respons.status_code == 200:

alaysis_detail(respons.text, page)else:print(respons.status_code)return '详情页爬取完成'

defsave_mongo(content):"""实现保存列表页的数据

content: 列表页数据"""mongo_client= pymongo.MongoClient(host=MONGO_HOST, port=MONGO_PORT)

db=mongo_client[MONGO_DB]

coll=db[MONGO_COLL]

coll.insert(content)print('数据保存成功:', content, len(content))returnNonedefmain(page):"""此程序为了爬取淘宝宝贝而生,可以获得宝贝图片,店面,地址,信息,视频等。。

page :获取列表页的数量

html :获取列表页的源码

content : 列表页宝贝的详细信息和地址"""html=get_html(page)

content=analysis(html)

save_mongo(content)

get_detail(content, page)if __name__ == '__main__':

pool=Pool()

pool.map(main, [page*44 for page inrange(NUM)])print('程序结束')

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值