针对于搜索某一关键字,我这里是(狗狗),爬取了20页数据.保存图片数据到img文件夹中
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# file:京东商城.py
# author:ytytyt
# datetime:2021/7/15 14:15
# software: PyCharm
'''
url:https://search.jd.com/Search?keyword=%E7%8B%97%E7%8B%97&page=1
page每翻一页加2
'''
# import module your need
import requests
from requests.exceptions import RequestException
from pyquery import PyQuery as pq
import time,random
from contextlib import closing
PIC = 0
#获取页面
def getpage(url):
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'
}
try:
res = requests.get(url, headers=headers)
if res.status_code == 200:
res.encoding = "utf-8"
return res.text
else:
print(res.status_code)
return None
except RequestException as e:
print(e)
return None
#页面解析
def parsePage(html):
doc = pq(html)
res = doc('#J_goodsList .gl-warp.clearfix .gl-item')
for item in res.items():
doc2 = pq(item)
result = doc2('.gl-i-wrap .p-img a img')
for data in result.items():
yield data.attr('data-lazy-img')
#写入文件或者数据库
def datasave(url):
global PIC
PIC += 1
url = 'http:'+url
path = './img/'+str(PIC)+'.jpg'
response = requests.get(url, stream=True)
with closing(requests.get(url, stream=True)) as response:
# 这里打开一个空的png文件,相当于创建一个空的txt文件,wb表示写文件
with open(path, 'wb') as file:
# 每128个流遍历一次
for data in response.iter_content(128):
file.write(data)
#主爬虫调度
def main(start):
#拼接url(翻页操作等)
url='https://search.jd.com/Search?keyword=%E7%8B%97%E7%8B%97&page='+str(start)
html = getpage(url)
res = parsePage(html)
for data in res:
datasave(data)
if __name__ == '__main__':
for i in range(20):
main(i*2+1)
print("正在爬取第{}页".format(i+1))
time.sleep(random.random()*3)