Python爬虫之urllib批量下载图片

import urllib.request
import re
import os

def handle_content(request):
    response = urllib.request.urlopen(request)
    html = response.read().decode('utf-8')
    pattern = re.compile(r'<div class="thumb">.*?</div>', re.S)
    ret_list = pattern.findall(html)
    img_list = []
    for i in ret_list:
        x = re.compile(r'img src="(.*?)" alt', re.S)
        y = x.findall(i)
        for src in y:
            img_path = 'http:' + src
            img_list.append(img_path)
            # print(os.path.basename(img_path))
            save_path = os.path.join(os.path.dirname(os.path.abspath(__file__)) + '\qiubai',os.path.basename(img_path))
            file1 = urllib.request.urlopen(img_path)
            file2 = open(save_path,'wb')
            file2.write(file1.read())
            file2.close()
            print(os.path.basename(img_path)+'下载成功')
    # print(img_list)

# 构建请求对象,并且返回请求对象
def handle_url(url,page):
    url = url + str(page)
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
    }
    request = urllib.request.Request(url,headers=headers)
    return request

def main():
    url = 'https://www.qiushibaike.com/pic/page/'
    start_page = int(input('请输入抓取的起始页:'))
    end_page = int(input('请输入抓取的结束页:'))
    for page in range(start_page,end_page + 1):
        # 拼接url,生成一个request
        request = handle_url(url,page)
        #获取内容,处理内容
        handle_content(request)

if __name__ == '__main__':
    main()

  • 0
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
Python爬虫用于下载多页图片通常涉及使用一些网络请求库(如requests)配合解析库(如BeautifulSoup或Pandas的read_html)来获取网页内容,然后使用正则表达式或者CSS选择器定位图片链接,最后使用流式下载批量下载工具(如StreamDownloader、wget等)进行下载。以下是一个简单的步骤概述: 1. 导入必要的库: ```python import requests from bs4 import BeautifulSoup import os from urllib.parse import urljoin ``` 2. 定义下载图片的函数,处理URL和保存路径: ```python def download_image(url, save_path): response = requests.get(url, stream=True) if response.status_code == 200: with open(save_path, 'wb') as f: for chunk in response.iter_content(chunk_size=1024): if chunk: f.write(chunk) def get_all_images(url, save_dir): # ... (后续代码会在这里查找并下载图片) ``` 3. 获取初始网页并解析出图片链接: ```python def parse_first_page(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') image_links = [urljoin(url, img['src']) for img in soup.find_all('img', src=True)] return image_links initial_url = "http://example.com/page1" first_page_images = parse_first_page(initial_url) ``` 4. 递归或循环下载多页图片: ```python def download_all_pages(url, save_dir, current_page=1): if current_page > 1: # 假设每页有下一页链接 next_page_url = f"{url}?page={current_page}" next_page_images = parse_first_page(next_page_url) for img_url in next_page_images: download_image(img_url, os.path.join(save_dir, f"image_{current_page}_{os.path.basename(img_url)}")) # 处理当前页面 for img_url in first_page_images: download_image(img_url, os.path.join(save_dir, f"image_{current_page}_{os.path.basename(img_url)}")) # 如果还有更多页面... # 下载下一页 if has_next_page(next_page_url): download_all_pages(next_page_url, save_dir, current_page + 1) # 假设has_next_page是一个检查是否还有下一页的函数 ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

haeasringnar

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值