批量爬取PPT

import requests
from lxml import etree
from bs4 import BeautifulSoup
import lxml
import os
import math
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36 Edg/89.0.774.77'
}
num = 0
in_se = 0
#获取主页面
def get_url():
    select=int(input("你想爬多少个:").strip())
    global in_se
    in_se=select
    sum=math.ceil(select/20)
    url_list=[]
    for i in range(sum):
        url_list.append(f"http://www.1ppt.com/moban/ppt_moban_{i+1}.html")
    for i in url_list:
        url =i
        page_text=requests.get(url=url,headers=headers)
        page_text.encoding='gb2312'
        soup=BeautifulSoup(page_text.text,'lxml')
        li_url=soup.select(".tplist>li>a")
        li_name = soup.select(".tplist>li>h2>a")
        for i in range(len(li_url)):
            url="http://www.1ppt.com"+li_url[i]['href']
            name=li_name[i].text
            get_down(url,name)
#在二级页面找到下载链接
def get_down(url,name):
    url=url
    page_text = requests.get(url=url, headers=headers)
    page_text.encoding='gb2312'
    soup = BeautifulSoup(page_text.text, 'lxml')
    li_down=soup.select(".downurllist>li>a")
    down_url="http://www.1ppt.com/"+li_down[0]['href']
    down(down_url,name)
#对下载链接进行访问,并且保存在指定路径
def down(li_down,name):
    url = li_down
    page_text = requests.get(url=url, headers=headers).text
    soup = BeautifulSoup(page_text, 'lxml')
    down_url = soup.select(".downloadlist>li>a")[0]['href']
    page_text = requests.get(url=down_url, headers=headers).content
    if not os.path.exists('./PPT'):
        os.mkdir('./PPT')
    path="./PPT/"+name+".zip"
    global num
    if num!=in_se:
        with open(path,'wb')as f:
            f.write(page_text)
            print(f"{name}下载成功!")

            num+=1
    else:
        print(f"下载完毕,当前下载{num}个PPT!")
        exit()

if __name__=="__main__":
    get_url()

选择类别爬取

import requests
from lxml import etree
from bs4 import BeautifulSoup
import lxml
import os
import math
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36 Edg/89.0.774.77'
}
num = 0
#获取主页面
def Select():
    data=input("搜索关键字:").strip()
    page={
        'q':data,
        'click':1,
        'cc':'1ppt.com',
        's':'',
        'nsid':''
    }
    url ="http://zhannei.baidu.com/cse/site?"
    page_text=requests.get(url=url,params=page,headers=headers)
    page_text.encoding='utf-8'
    soup=BeautifulSoup(page_text.text,'lxml')
    url_li=soup.select('.c-title>a')[0]['href']
    try:
        get_url(url_li)
    except:
        print("请重新输入关键字")
        Select()
#在二级页面找到下载链接
def get_url(url):
    url=url
    page_text = requests.get(url=url, headers=headers)
    page_text.encoding = 'gb2312'
    soup = BeautifulSoup(page_text.text, 'lxml')
    li_url = soup.select(".tplist>li>a")
    li_name = soup.select(".tplist>li>h2>a")
    for i in range(len(li_url)):
        url = "http://www.1ppt.com" + li_url[i]['href']
        name = li_name[i].text
        get_down(url, name)
def get_down(url,name):
    url=url
    page_text = requests.get(url=url, headers=headers)
    page_text.encoding='gb2312'
    soup = BeautifulSoup(page_text.text, 'lxml')
    li_down=soup.select(".downurllist>li>a")
    down_url="http://www.1ppt.com/"+li_down[0]['href']
    down(down_url,name)
#对下载链接进行访问,并且保存在指定路径
def down(li_down,name):
    url = li_down
    page_text = requests.get(url=url, headers=headers).text
    soup = BeautifulSoup(page_text, 'lxml')
    down_url = soup.select(".downloadlist>li>a")[0]['href']
    page_text = requests.get(url=down_url, headers=headers).content
    if not os.path.exists('./PPT'):
        os.mkdir('./PPT')
    path="./PPT/"+name+".zip"
    with open(path, 'wb')as f:
        f.write(page_text)
        global num
        num+=1
        print(f"{name}\033[0;35m...下载成功,当前共下载{num}个\033[0m")

if __name__=="__main__":
    Select()

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值