Python爬取麻辣烫首页视频并保存到本地

这段代码展示了如何使用Python的requests和BeautifulSoup库来抓取网页上的视频链接,并通过you-get工具进行下载。首先获取视频列表页面的URL,然后解析每个视频的详细页URL,从中提取视频名称和实际下载地址,最后调用you-get进行下载。
摘要由CSDN通过智能技术生成

code:

# !/usr/bin/env python
# -*-coding:utf-8-*-
# date :2021/3/10 22:23
# author:Sabo
import requests
from bs4 import BeautifulSoup
import os


vedioPath = 'F:/麻辣烫耙耳朵'
header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36',
        'Connection': 'close'
        }

def getUrls(origin):
    dstLinks = []
    response = requests.get(url=origin)
    if response.status_code == 200:
        response.encoding = 'utf-8'
        txt = response.text
        mainpage = BeautifulSoup(txt, 'html.parser')
        urlLinks = mainpage.find_all('div', attrs={"class": "item-cell"})
        lenOfLinks = urlLinks.__len__()
        i = 0
        for i in range(0, lenOfLinks):
            aTag = urlLinks[i].find_all('a')
            url = aTag[0].get('href')
            ans = origin.replace('index.shtml', url)
            dstLinks.append(ans)
    else:
        print('Error!')
    return dstLinks

def getTitle(title):
    title = title.__str__()
    title = title.strip()
    indexBegin = title.find('>')
    lastBegin = title.find('<', indexBegin)
    title = title.__str__()
    ans = title[indexBegin + 1: lastBegin]
    ans = ans.strip()
    title = ans
    return title

def getUrl(origin):
    newUrl = ''
    response = requests.get(url=origin, headers=header)
    if response.status_code == 200:
        response.encoding = 'utf-8'
        txt = response.text
        mainPage = BeautifulSoup(response.text, 'html.parser')
        first = mainPage.find("div", attrs={"class": "img"}).find('a')
        web = first.get("href")
        dstStr = web[2:len(web)]
        newUrl = origin.replace('index.shtml', dstStr)  # 拿到了目的网址
    else:
        print('Error!')
    return newUrl

def paserVedio(vedioAddress):
    commond = 'you-get -i {}'.format(vedioAddress)
    os.system(commond)

def download(vedioName, vedioAddress):
    commond = 'you-get -o {0} -O {1} "{2}"'.format(vedioPath, vedioName, vedioAddress)
    print(commond)
    os.system(commond)

def getVedioName(website):
    vedioName = ''
    response = requests.get(url=website, timeout=1)
    if response.status_code == 200:
        response.encoding = 'utf-8'
        childrenPage = BeautifulSoup(response.text, 'html.parser')
        title = childrenPage.find("div", attrs={'class':"videoTitle container turn-off"})
        vedioName = getTitle(title)
    else:
        print('Error!')
    return vedioName

if __name__ == '__main__':
    origin = 'http://show.sctv.com/mlt/index.shtml'
    # origin = ''
    dstUrl = getUrls(origin)

    for link in dstUrl:
        vedioAddress = link
        vedioName = getVedioName(link)

        if vedioName is not '':
            paserVedio(vedioAddress)
            download(vedioName, vedioAddress)
        else:
            print('No vedio name ,can\'t download the vedio!')
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值