import json
import re
import requests
import datetime
from bs4 import BeautifulSoup
import os
def down_save_pic(name,pic_urls):
'''
根据图片链接列表pic_urls, 下载所有图片,保存在以name命名的文件夹中,
'''
path = '/Desktop/'+ name + '/'
if not os.path.exists(path):
os.makedirs(path)
for i, pic_url in enumerate(pic_urls):
try:
pic = requests.get(pic_url, timeout=15)
string = str(i + 1) + '.jpg'
with open(path+string, 'wb') as f:
f.write(pic.content)
print('成功下载第%s张图片: %s' % (str(i + 1), str(pic_url)))
except Exception as e:
print('下载第%s张图片时失败: %s' % (str(i + 1), str(pic_url)))
print(e)
continue
def crawl_wiki_data():
"""
爬取百度百科中《乘风破浪的姐姐》中嘉宾信息,返回html
"""
headers &
爬取《乘风破浪的姐姐》的选手图片信息
最新推荐文章于 2021-05-06 16:51:39 发布