爬取图片分类保存

import requests
from bs4 import BeautifulSoup
import os
import sys
#http://blog.csdn.net/baidu_35085676/article/details/68958267
#安卓端需要此语句
reload(sys)
sys.setdefaultencoding('utf-8')


if(os.name == 'nt'):
        print(u'你正在使用win平台')
else:
        print(u'你正在使用linux平台')

header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 UBrowser/6.1.2107.204 Safari/537.36',
          'Referer':'http://www.mzitu.com/'
          }
#http请求头
all_url = 'http://www.mzitu.com'
start_html = requests.get(all_url,headers = header)

character=['"','/','?','<','>','*','|','\\',':']#文件名不能含有这些字符
#保存地址
#path = 'D:/mzitu/'
path=os.getcwd()+'/pic/'

#找寻最大页数
soup = BeautifulSoup(start_html.text,"html.parser")
page = soup.find_all('a',class_='page-numbers')
max_page = page[-2].text


same_url = 'http://www.mzitu.com/page/'
for n in range(1,int(max_page)+1):
    ul = same_url+str(n)
    start_html = requests.get(ul, headers=header)
    soup = BeautifulSoup(start_html.text,"html.parser")
    all_a = soup.find('div',class_='postlist').find_all('a',target='_blank')
    for a in all_a:
        title = a.get_text() #提取文本
        old_title=title
        for i in character:
            if i in title:
                title=title.replace(i,'')
        #print(title)
        #print(old_title)
        if(title != ''):
            print("准备扒取:"+title)

            #win不能创建带?的目录
            if(os.path.exists(path+title.strip())):
                #print('目录已存在')
                flag=1
            else:
                os.makedirs(path+title.strip())
                flag=0
            os.chdir(path + title.strip())
            href = a['href']
            html = requests.get(href,headers = header)
            mess = BeautifulSoup(html.text,"html.parser")
            pic_max = mess.find_all('span')
            pic_max = pic_max[10].text #最大页数
            if(flag == 1 and len(os.listdir(path+title.strip())) >= int(pic_max)):
                print('已经保存完毕,跳过')
                continue
            for num in range(1,int(pic_max)+1):
                pic = href+'/'+str(num)
                html = requests.get(pic,headers = header)
                mess = BeautifulSoup(html.text,"html.parser")
                pic_url = mess.find('img',alt = old_title)
                #print(pic_url)
                html = requests.get(pic_url['src'],headers = header)
                file_name = pic_url['src'].split(r'/')[-1]
                f = open(file_name,'wb')
                f.write(html.content)
                f.close()
            print('完成')
    print('第',n,'页完成')

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值