今天爬取优美图库的照片
import time
from concurrent.futures import ThreadPoolExecutor
import time
import os
import re
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
rootrurl = 'https://umei.cc'
save_dir = 'D:/estimages/'
max_pages = 1 # 每个条目只爬取一个页面
headers = {
"Referer": rootrurl,
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive'
} ###设置请求的头部,伪装成浏览器
def del_file(path):
ls = os.listdir(path)
for i in ls:
c_path = os.path.join(path, i)
if os.path.isdir(c_path):
del_file(c_path)
else:
os.remove(c_path)
def saveOneImg(dir, img_name, img_url):
new_headers = {
"Referer": img_url,
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive'
} ###设置请求的头部,伪装成浏览器,实时换成新的 header 是为了防止403 http code问题,防止反盗链,
try:
img = requests.get(img_url, headers=new_headers, timeout=(10, 30)) # 请求图片的实际URL
if (str(img).find('200') > 1):
with open(
'{}/{}.jpg'.format(dir, img_name), 'wb') as jpg: # 请求图片并写进去到本地文件
jpg.write(img.content)
print(img_url + 'is downloaded...')
jpg.close()
return True
else:
return False
except Exception as e:
print('exception occurs: ' + img_url)
print(e)
return False
def processPages(dir, url):
print("当前正在爬的页面是:" + url)
html = BeautifulSoup(requests.get(url, headers=headers, timeout=(10, 30)).text, features="html.parser")
bigpic = html.find('div', {'class': 'TypeList'}).find_all('a', {'class': 'TypeBigPics'})
for a in bigpic:
# 跳转到大图页面
r = requests.get(rootrurl + a.get('href'), headers=headers, timeout=(10, 30))
r.encoding = 'utf-8'
html = BeautifulSoup(r.text, features="html.parser")
img_name = html.find('div', {'class': 'ArticleTitle'}).find('strong').text
img_url = html.find('div', {'class': 'ImageBody'}).find('img').get('src')
saveOneImg(dir, img_name, img_url)
def tagSpiders(dir, url):
# 创建目录
if not os.path.exists(dir):
os.makedirs(dir)
idx = 0
next = url
while idx < max_pages:
# 处理当前页面
processPages(dir, next)
next = "{}{}".format(url, 'index_{}.htm'.format(idx + 2)) # 拼装下一个页面
time.sleep(1)
idx = idx + 1
pass
def getAllTags():
list = {}
r = requests.get(rootrurl)
r.encoding = 'utf-8'
html = BeautifulSoup(r.text, features="html.parser")
lis = html.find('ul', {'class': 'Nav'}).find_all('li', {'class': 'NavLi'})
idx = 0
for li in lis:
if idx < 1:
idx = idx + 1
continue # 第一个不需要
maindir = li.find('a', {'class': 'MainNav'}).get('title')
print(maindir)
subas = li.find('div', {'class': 'ShowNav'}).find_all('a')
for a in subas:
href = a.get('href')
subdir = a.text
list["{}/{}".format(maindir, subdir)] = href
return list
if __name__ == '__main__':
# 删除之前的文件
del_file(save_dir)
# 获得所有标签
taglist = getAllTags()
print(taglist)
#
# 给每个标签配备一个线程
with ThreadPoolExecutor(max_workers=30) as t: # 创建一个最大容纳数量为30的线程池
for tag, url in taglist.items():
t.submit(tagSpiders, save_dir + tag, rootrurl + url)
# 单个连接测试下下
# tagSpiders(save_dir + '壁纸图片/电脑壁纸', rootrurl + '/bizhitupian/diannaobizhi/')
效果如下: