Python爬取麻辣烫所有视频

# !/usr/bin/env python
# -*-coding:utf-8-*-
# date :2021/4/16 12:57
# author:Sabo
import os
import requests
from bs4 import BeautifulSoup

savePath = 'F:/麻辣烫耙耳朵'
root = 'http://show.sctv.com/mlt/index'
urlTail = '.shtml'
headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36',
        'Connection': 'close'
    }

def getRootUrls(printFlag):
    originUrl = ''
    rootUrls = []
    for i in range(0, 10):
        if i is not 0:
            originUrl = root + '_' + i.__str__() + urlTail
        else:
            originUrl = root + urlTail
        rootUrls.append(originUrl)
    if printFlag is True:
     print(rootUrls)
    return rootUrls

def getLinksPerRootUrl(rootUrl, printFlag):
    response = requests.get(url = rootUrl,  header = headers)
    if response.status_code != 200:
        print('Get response error!')
        return ''
    else:
        response.encoding = 'utf-8'
        txt = response.text
        mainLink = BeautifulSoup(txt, 'html.parser')
        childLinks = []
        mainLink_txt = mainLink.find_all('div', attrs={"class": "txt"})
        for i in range(0, mainLink_txt.__len__()):
            link = mainLink_txt[i].find_next('a')
            href = link.get('href')
            childLinks.append(href)
        if printFlag == True:
            print(childLinks)
        return childLinks

def catUrl(catFlag, signalLinks):
    root = 'http://show.sctv.com/mlt'
    result = []
    for index in range(0, signalLinks.__len__()):
        if catFlag == 0:
            result.append(root+signalLinks[index][1:])
        else:
            result.append(root + catFlag.__str__() + signalLinks[index][1:])
    return result

def urlTitles(rootUrl):
    titles = []
    response = requests.get(url=rootUrl, header = headers)
    if response.status_code != 200:
        print('Get titles error!')
        return ''
    response.encoding ='utf-8'
    txt = response.text
    mainPage = BeautifulSoup(txt, 'html.parser')
    nameLinks = mainPage.find_all('div', attrs={'class':'name'})
    for index in range(0, nameLinks.__len__()):
        titles.append(nameLinks[index].text)
    return titles

def download(savePath, titles, links):
    for index in range(0, titles.__len__()):
        commond = 'you-get -o {0} -O {1} "{2}"'.format(savePath, titles[index], links[index])
        print(commond)
        os.system(commond)

def main():
    RootUrls = getRootUrls(printFlag = False)
    catFlag = 0
    for RootUrl in RootUrls:
        links = getLinksPerRootUrl(rootUrl=RootUrl, printFlag=False)
        titles = urlTitles(rootUrl=RootUrl)
        dstUrls = catUrl(catFlag = catFlag, signalLinks=links)
        download(savePath=savePath, titles=titles, links=dstUrls)

if __name__ == '__main__':
    main()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值