百度图片基本没什么反爬虫措施,我们爬取图片时直接鼠标右键--->检查----->network---->XHR,往下拖动得到页面,可以看到headers下的General,查看实际的请求Request URL,提取其中的关键信息即可
话不多说,直接上代码;
spider文件:
class BaidupictureSpider(scrapy.Spider):
name = 'baidupicture'
allowed_domains = ['http://image.baidu.com']
#搜寻关键字列表
search_lists = [‘臭豆腐’,‘鸡肉’,‘美女’]
#关键字对应搜索的页数
search_pages = [20,10,100]
def start_requests(self):
for i in range(len(self.search_lists)):
queryWord = urlencode({'queryWord': self.search_lists[i]})
word = urlencode({'word': self.search_lists[i]})
for i in range(self.search_pages[i]):
url ="https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&" + queryWord +"&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&"+word+"&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn=" + str(i*30) +"&rn=30"
yield scrapy.Request(url, callback=self.parse, meta=({'search': self.search_lists[i]}),dont_filter=True)
def parse(self, response):
item = FacepictureItem()
item['recode'] = response.meta['search']#关键字,可以传到pipeline作为文件名
datas = response.text
item['imgs_url'] = re.findall('"thumbURL":"(https://.*?.jpg)"', datas)#提取图片链接
yield item
settings:
设置
ROBOTSTXT_OBEY = False
pipeline:
from hashlib import md5
from urllib.request import urlretrieve
import os
class FacepicturePipeline(object):
def process_item(self, item, spider):
if not os.path.exists(item['recode']):
os.mkdir(item['recode'])
for url in item['imgs_url']:
print('正在写的url是:', url)
img_path = '{0}/{1}.{2}'.format(item['recode'], md5(url.encode("utf-8")).hexdigest(), 'jpg')
try:
if not os.path.exists(img_path):
urlretrieve(url, filename=img_path)
except:
continue
return item
完毕