python协程多任务爬取下载数据

本文介绍了如何使用Python的requests库抓取网页数据,通过BeautifulSoup和XPath解析内容,实现异步下载图片并存储到本地文件,展示了从数据爬取到持久化的完整流程。
摘要由CSDN通过智能技术生成

1、首先实现爬取数据的基本功能

import requests
import os
from lxml import etree
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36'
}
for page in range(2, 4):
    url = 'https://pic.netbian.com/4kqiche/index_%d.html'%page
    resp = requests.get(url=url, headers=headers)
    page_text = resp.text
    print(page_text)
打印一下,看一下页面内容有没有编码错误

有点小问题,设置一下编码

for page in range(2, 4):
    url = 'https://pic.netbian.com/4kqiche/index_%d.html'%page
    resp = requests.get(url=url, headers=headers)
    resp.encoding = 'gbk' #设置编码
    page_text = resp.text
    print(page_text)

2、xpath数据解析

打印一下,看一下有没有取到列表元素
    tree = etree.HTML(page_text)
    li_list = tree.xpath('//*[@id="main"]/div[3]/ul/li')
    print(li_list)

解析文件名称和下载地址

    for li in li_list:
        title = li.xpath('./a/b/text()')[0]
        print(title)
        pic_url = 'https://pic.netbian.com'+li.xpath('./a/img/@src')[0]
        print(pic_url)

3、封装第一个函数

用字典把title和pic_url都存储起来,同时加上函数返回值方便调用
def get_car():
    car_msg = [] #存储所有汽车图片信息
    for page in range(2, 4):
        url = 'https://xxx.xxxxxxx.com/4kqiche/index_%d.html'%page
        resp = requests.get(url=url, headers=headers)
        resp.encoding = 'gbk' #设置编码
        page_text = resp.text
        #print(page_text)
    #xpath数据解析
        tree = etree.HTML(page_text)
        li_list = tree.xpath('//*[@id="main"]/div[3]/ul/li')
        #print(li_list)
        for li in li_list:
            title = li.xpath('./a/b/text()')[0]
            #print(title)
            pic_url = 'https://xxx.xxxxxxx.com'+li.xpath('./a/img/@src')[0]
            #print(pic_url)
            #把信息存储到字典中
            dic = {
                'title': title,
                'pic_url': pic_url
            }
            print(dic)
            car_msg.append(dic)
        return car_msg
car_msg_list = get_car()

 看运行的结果

4、阻塞操作(例如下载等等待时间较长的行为)通过特殊函数进行请求

把二进制数据写入dic,并留下返回值
async def get_request(dic):
    async with aiohttp.ClientSession() as sess:
        async with await sess.get(url=dic['pic_url'],headers=headers) as resp:
            car_data = await resp.read()
            dic['car_data'] = car_data
            return dic

5、 存储函数

先创建文件夹
dirName = '汽车'
if not os.path.exists(dirName):
    os.mkdir(dirName)

 持久化存储

def save_car(t):
    dic = t.result()
    car_data = dic['car_data']
    title = dic['title']
    dir_path = dirName+'/'+title
    with open(dir_path,'wb') as fp:
        fp.write(car_data)
    print(title,'爬取成功')

6、协程任务执行

#用for循环,让任务协程运行
tasks=[]
for dic in car_msg_list:
    c = get_request(dic)
    task = asyncio.ensure_future(c)
    task.add_done_callback(save_car)
    tasks.append(task)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))

7、成功执行

总共耗时几秒钟

import asyncio

import aiohttp
import requests
import os
from lxml import etree
import time
start = time.time()
dirName = '汽车'
if not os.path.exists(dirName):
    os.mkdir(dirName)
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36'
}
def get_car():
    car_msg = [] #存储所有汽车图片信息
    for page in range(2, 3):
        url = 'https://xxx.xxxxxx.com/4kqiche/index_%d.html'%page
        resp = requests.get(url=url, headers=headers)
        resp.encoding = 'gbk' #设置编码
        page_text = resp.text
        #print(page_text)
    #xpath数据解析
        tree = etree.HTML(page_text)
        li_list = tree.xpath('//*[@id="main"]/div[3]/ul/li')
        #print(li_list)
        for li in li_list:
            title = li.xpath('./a/b/text()')[0]
            #print(title)
            pic_url = 'https://xxx.xxxxxxx.com'+li.xpath('./a/img/@src')[0]
            #print(pic_url)
            #把信息存储到字典中
            dic = {
                'title': title,
                'pic_url': pic_url
            }
            #print(dic)
            car_msg.append(dic)
    return car_msg
car_msg_list = get_car()
print(car_msg_list)

#特殊函数
async def get_request(dic):
    async with aiohttp.ClientSession() as sess:
        async with await sess.get(url=dic['pic_url'],headers=headers) as resp:
            car_data = await resp.read()
            dic['car_data'] = car_data
            return dic
#存储函数
def save_car(t):
    dic = t.result()
    car_data = dic['car_data']
    title = dic['title']
    dir_path = dirName+'/'+title+'.jpg'
    with open(dir_path,'wb') as fp:
        fp.write(car_data)
    print(title,'爬取成功)
#用for循环,让任务协程运行
tasks=[]
for dic in car_msg_list:
    c = get_request(dic)
    task = asyncio.ensure_future(c)
    task.add_done_callback(save_car)
    tasks.append(task)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
print(time.time()-start)

  • 6
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值