爬取zol壁纸网站上所有的壁纸

# _*_ coding:utf-8 _*_
"""爬取zol壁纸网站上所有的壁纸"""

import time
from bs4 import BeautifulSoup
import requests
class ZolSpider(object):
    def __init__(self, url=''):
        self.url = url
        self.base_url = 'http://desk.zol.com.cn'

    def get_html(self):
        """将获取到的html文本经过beautifulsoup加工"""
        url = self.url
        html = requests.get(url)
        soup_html = BeautifulSoup(html.content, 'html.parser')
        return soup_html

    def spider(self):
        """调用类别爬虫"""
        CategorySpider(self.base_url).category_spider()
# 类别
class CategorySpider(ZolSpider):
    def get_category_url(self):
        """获取所有的类别"""
        soup_html = self.get_html()
        category_url = []
        category = soup_html.find('dl', class_='filter-item first clearfix')
        for child in category.find_all('a'):
            category_url.append(self.base_url + child['href'])
        return category_url

    def category_spider(self):
        """调用图片列表爬虫"""
        for url in self.get_category_url():
            print('Current_Category:' + url.split('.cn')[-1])
            PhotoListSpider(url).list_spider()
            time.sleep(10)
# 图片列表
class PhotoListSpider(ZolSpider):
    def get_photo_list_url(self):
        """获取图片列表链接"""
        soup_html = self.get_html()
        photo_list_url = []
        photo_list = soup_html.find_all('li', class_='photo-list-padding')
        for child in photo_list:
            photo_list_url.append(self.base_url + child.a['href'])
        return photo_list_url

    def get_next_page_url(self):
        """获取下一页链接"""
        soup_html = self.get_html()
        next_page_tag = soup_html.find('a', id='pageNext')
        next_page_url = self.base_url + next_page_tag['href']
        return next_page_url

    def list_spider(self):
        """递归遍历所有列表"""
        print(u'Current_List:' + self.url.split('.cn')[-1])
        next_page = self.get_next_page_url()
        self.photo_spider()
        if next_page:
            PhotoListSpider(next_page).list_spider()
            time.sleep(5)

    def photo_spider(self):
        """调用图片爬虫"""
        for url in self.get_photo_list_url():
            print('Current_Set:' + url)
            PhotoSpider(url).photo_spider()
            time.sleep(2)
# 图片展示
class PhotoSpider(ZolSpider):
    def get_photo_url(self):
        """获取图片链接"""
        soup_html = self.get_html()
        img_tag = soup_html.find('img', id='bigImg')
        img_url = img_tag['src']
        return img_url

    def get_next_page_url(self):
        """获取下一页链接"""
        soup_html = self.get_html()
        next_page_tag = soup_html.find('div', id='photo-next')
        _next_page_url = self.base_url + next_page_tag.a['href']
        return _next_page_url

    def photo_spider(self):
        """递归遍历所有图片"""
        print('Photo_Url: ' + self.get_photo_url())
        next_page = self.get_next_page_url()
        if 'javascript' not in next_page:
            PhotoSpider(next_page).photo_spider()
if __name__ == '__main__':
    ZolSpider().spider()
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值