import os
import requests
from urllib.parse import urlencode
from requests import codes
from hashlib import md5
from multiprocessing.pool import Pool
def get_page(offset):
params = {
'offset': offset,
'format': 'json',
'keyword': '街拍',
'autoload': 'true',
'count': '20',
'cur_tab': '1',
'from': 'search_tab'
}
url = 'https://www.toutiao.com/search_content/?' + urlencode(params)
try:
response = requests.get(url)
if codes.ok == response.status_code:
return response.json()
except requests.ConnectionError:
return None
def get_images(json):
if json.get('data'):
for item in json.get('data'):
if item.get('cell_type') is not None:
continue
title = item.get('title')
images = item.get('image_list')
for image in images:
yield {
'image': image.get('url'),
'title': title
}
def save_image(item):
file_path = 'image' + os.path.sep + item.get('title')
if not os.path.exists(file_path):
os.makedirs(file_path)
try:
save_nrl = 'http:' + item.get('image')
rep = requests.get(save_nrl)
if codes.ok == rep.status_code:
image_path = file_path + os.path.sep + '{}.{}'.format(md5(rep.content).hexdigest(),'jpg')
if not os.path.exists(image_path):
with open(image_path, 'wb')as f:
f.write(rep.content)
print('当前下载路径:%s' % image_path)
else:
print('未找到目标路径')
except requests.ConnectionError:
print('下载失败')
def main(offset):
json = get_page(offset)
for item in get_images(json):
print(item)
save_image(item)
GROUP_START = 1
GROUP_END = 10
if __name__ == '__main__':
pool = Pool()
groups = ([x * 20 for x in range(GROUP_START, GROUP_END + 1)])
pool.map(main, groups)
pool.close
pool.join
Ajax爬取今日头条街拍
最新推荐文章于 2024-04-07 14:00:00 发布