废话少说上代码
当然还有多线程爬虫python学习文件以及demo
请移步gitee:https://gitee.com/xuanyuan9528/yande_pider.git
其中需要先添加包,详细可百度
import re
import requests
import os
import time
##创建目录文件夹
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print('目录文件夹创建成功!')
return True
else:
print('目录已存在,无需再创建!')
return False
##下载保存图片
def down_load(url_list,name):
if '.' in name :
name=name.replace('.',' ')
i=1
mkpath="C:\\Users\\MSI\\Desktop\\测试图片\\tu\\"+name
mkdir(mkpath)
file_name = mkpath + '\\'
for url in url_list:
response = requests.get(url)
with open(file_name + name+str(i) + '.jpg',"wb") as f:
f.write(response.content)
i=i+1
print("正在下载:",name,'\n',url)
print("图片已储存到D:\\妹子图 请理性赏图,身体为重!")
##从子页面获取图片链接
def jpg_url (url):
x=1
p=['下一页']
list_jpg=[]
while '下一页' in p :
p.clear()
try:
response = requests.get("http://xiumeitu.herokuapp.com/record/" + url + "/" +str(x))
name=re.findall('
(.*?)
',response.text,re.S)[0]url_list =re.findall('',response.text,re.S)
url_list.pop(0)
p=re.findall('class="jump-button">(.*?)',response.text,re.S)
list_jpg=list_jpg + url_list
except Exception as e:
print(e)
x=x+1
## print('测试01:',list_jpg,name)
return list_jpg ,name
##获取子页面链接
def meizi_url(url):
try:
response = requests.get(url,headers=headers)
url_list2= re.findall('href="/record/(.*?)"',response.text,re.S)
list_2=(set(url_list2))
for url in list_2:
yield url
except Exception as e:
print(e)
##批量下载
if __name__ == '__main__':
print('hello,妹子图会下载到 D:\\妹子图 哦!')
for n in range(5,492):
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'}
meizi="http://xiumeitu.herokuapp.com/"+str(n)
meizi_list = meizi_url (meizi)
for m_url in meizi_list:
## print('url测试',m_url)
url_list,name = jpg_url (m_url)
## print('测试:',url_list)
down_load(url_list,name)
time.sleep(3)
except Exception as e:
print(e)
print('所有图片下载完成!')
input('图片已储存到D:\\妹子图,请理性观赏,身体为重!')
效果图: