用python实现相声、评书自动下载(五)

写完这些后,觉得分两个脚本跑太麻烦,而且每次还要去改下载脚本的csv文件名,效率太低。于是把所有的代码整合到一起,实现伪·全自动下载,只要有第一页的链接,就可以下完所有的相声、评书。

发这篇博客的时候出现了一个问题,用

link = r.html.absolute_links爬源码,返回的结果集变成了se(),还不清楚是什么原因。

其实做成全自动的也有思路,就是打开任意一个人的作品集。抓取所有作品的“第一页”链接,然后用getAllLink()函数去打开每个作品的第一页,后面的步骤都一样。也许以后有空了会写一下。

from selenium import webdriver
from requests_html import HTMLSession
import requests
import time
import re
import pandas as pd

from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from selenium.webdriver.chrome.options import Options

'''
try:
   element = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.ID, "myDynamicElement")))
finally:
   browser.quit()
'''

'''
chrome_options = Options()
chrome_options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
chrome_driver = "C:\Program Files\Google\Chrome\Application\chromedriver.exe"
driver = webdriver.Chrome(chrome_driver, chrome_options=chrome_options)
print(driver.title)
'''

driver = webdriver.Chrome(r'E:\bbb\chromedriver.exe')


targetUrl = 'https://www.pingshu8.com/MusicList/mmc_235_4615_1.Htm'
headUrl = 'https://www.pingshu8.com'
mp3name = ''

session = HTMLSession()


def openDriver(url):
    driver.get(url)   #打开浏览器.


def getAllSession(url):    #当前页所有的链接
    print('It\'s comming getAllSession()!')
    downLink = []

    r = session.get(url)
    link = r.html.absolute_links
    downLink.append(link)  # 把所有的链接放入列表

    if downLink != None:
        return downLink


def jump_link():    #获取第一页的跳转链接
    print('It\'s comming jump_link()!')

    for i in getAllSession(targetUrl):  # 这是页面跳转链接
        pattern = re.compile(r'\/Musiclist\/mmc_\w+\.htm')  # 编写正则表达式
        m = pattern.findall(str(i))  # 用正则表达式查出总共有多少页
        # print(m)

        '''
        通过split切割出最终的页码
        '''
        pagenum_list = []
        for n in m:
            pagename = re.split(r'\/Musiclist\/mmc_\d{2,3}_\d{0,}_', n, maxsplit=0, flags=0)  # 去掉页码前缀
            # print('pagename:',pagename)

            pagenum = pagename[1].split('.htm')  # 去掉页码后缀
            pagenum_list.append(pagenum)


    def end_num():  #最终的页码
        num1 = pagenum_list[0][0]
        num1 = int(num1)
        
        try:
            num2 = pagenum_list[1][0]
            num2 = int(num2)
            return num2
        except:
            num2 = 0
            return num2
        finally:        
            if num1 < num2:
                # print('num1:',num1,type(num1))
                return num2
            else:
                # print('num2:',num2,type(num2))
                return num1

    '''
    获取总页数之后,拼接每一页的跳转链接
    '''
    pagejump = []
    pages = 2
    # print(end_num())
    num = end_num()
    num = int(num)
    result1 = re.match('(\/Musiclist\/mmc_\d{2,3}_\d{0,}_)', n)
    t = result1.group(1)
    result2 = '.htm'
    while pages <= num:
        result = headUrl + t + str(pages) + result2
        pages += 1
        pagejump.append(result)
        # print('result:',result)
    if pagejump != None:
        return pagejump


def down_link(url):    #每一页的下载链接
    print('It\'s comming down_link()!')
    
    save_down_link = []

    for i in getAllSession(url): #这是所有的链接
        pattern = re.compile(r'\/down_\d{1,6}\.html')    #编写正则表达式+
        #print('pattern:',pattern)
        re_link = pattern.findall(str(i)) #用正则表达式查出所有的down地址

    for j in re_link:
        links = f'{headUrl}'+j
        #print(links,type(links))#拼接完整的下载地址
        save_down_link.append(links)

    if save_down_link != None:
        return save_down_link


def mp3Name(url):  #当前页所有的评书题目
    print('It\'s comming mp3Name()!')
    
    num = 5
    pstitle = []
    numlist = []
    while num < 33:
        numlist.append(num)
        num += 3
    
    maxfor = len(down_link(url))
    
    for nums in numlist[0:maxfor]:
        sel = driver.find_element_by_xpath(
            f'/html/body/div[2]/div[13]/div[1]/ul[2]/div[2]/ul/form/li[{nums}]/a').text
        pstitle.append(sel)        

    if pstitle != None:
        return pstitle


def getAllLinks(url):  #打开每一页获取下载链接
    print('It\'s comming getAllLinks()!')
    
    ps_link_list = []
    ps_txt_list = []
    mylist = []

    ps_link_list.append(down_link(targetUrl))
    ps_txt_list.append(mp3Name(targetUrl))

    for i in jump_link():
        openDriver(i)  # 打开第二页
        result = driver.find_element_by_link_text('首页')
        if result != None:
            ps_link_list.append(down_link(i))
            ps_txt_list.append(mp3Name(i))
        else:
            time.sleep(50)
            ps_link_list.append(down_link(i))
            ps_txt_list.append(mp3Name(i))

    for links in ps_link_list:
        links.sort()
    print(ps_link_list)

    x = 10  #正常情况每页有10条
    y = len(ps_link_list)
    z = len(ps_link_list[-1])
    m = 0
    
    while m < y-1:    #正常遍历
        for n in range(x):
            mytext = ps_txt_list[m][n]
            mylink = ps_link_list[m][n]
            mylist.append((mytext, mylink))
        m += 1 
    
    if z < x:   #判断最后一页有没有10条,没有10条遍历10次会报溢出    
        l = 0
        while l < z:
            mytext = ps_txt_list[-1][l]
            mylink = ps_link_list[-1][l]
            mylist.append((mytext, mylink))
            l += 1
    
    if mylist != None:
        return mylist   
    

def openXss(url,title):  #下载主方法
    '''
    计数 1/3 2/3
    
    global a
    a += 1
    print('\n',a,'/',lenxs)
    '''

    openDriver(url)
    time.sleep(1)
    handle = driver.current_window_handle  # 获取当前标签句柄

    '''
    获取文件名并赋值给filename
    '''
#     txt = driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[3]/font').text
#     filename = txt.split('- 下载')[0]
#     print('filename:',filename)
    def name(title):
        name = title
        return name

    bt = driver.find_element_by_id('clickina').click()
    handles = driver.window_handles  # 获取当前所有标签句柄

    for newHand in handles:  # 对标签进行遍历
        if newHand != handle:  # 筛选新打开的标签
            driver.switch_to.window(newHand)  # 切换到新打开的标签

            link = driver.current_url  # 获取当前页面地址
            print(link)

            driver.close()
            driver.switch_to.window(handles[0])

            myfile = requests.get(link)
            filename = name(title)
            open(f'e:\pydownload\\{filename}.mp3', 'wb').write(myfile.content)


def Name():
    name = driver.find_element_by_xpath('/html/body/div[2]/div[12]/div/h1').text
    return name





def csv(list):
    print('It\'s comming csv()!')    
    
    global mp3name
    mp3name = Name()
    
    df = pd.DataFrame(list)
    df.columns = ['title','link']
    df.to_csv(f'{mp3name}.csv', encoding='gbk', index=False)
    
    
def running(targetUrl):
    
    mylist = []
    ps_txt_list = []
    ps_link_list = []
    
    openDriver(targetUrl)
    
    def ex(): #判断有没有下一页
        try:
            sel = driver.find_element_by_xpath('/html/body/div[2]/div[13]/div[1]/ul[2]/div[3]/div/a[1]')
            return True
        except:
            return False
        
    
    def z():
        try:            
            z = len(ps_link_list[-1])
            return z
        except:
            return x+1
        

    if ex():        
        jump_link()
        return csv(getAllLinks(targetUrl))
    else:
        x = 10  #正常情况每页有10条  
        m = 0
        
        ps_link_list.append(down_link(targetUrl))
        ps_txt_list.append(mp3Name(targetUrl))

        z = z()
        if z < x:   #判断最后一页有没有10条,没有10条遍历10次会报溢出    
            l = 0
            while l < z:
                mytext = ps_txt_list[-1][l]
                mylink = ps_link_list[-1][l]
                mylist.append((mytext, mylink))
                l += 1
   
        
    if mylist != None:
        return csv(mylist)
        
    
        

running(targetUrl)
#mp3name = Name()
    
data = pd.read_csv(f'{mp3name}.csv', encoding='GB18030')
content = data.values
lenxs = len(content)
a = 0

for i in content:
    title = i[0]
    link = i[1]
    print(title,link)
    
def jishu():
    '''
    计数 1/3 2/3
    '''
    global a
    a += 1
    print('\n',a,'/',lenxs)
    

def down():
    try:
        for i in content:
            jishu()
            title = i[0]
            link = i[1]
            print(title, link)
            openXss(link, title)
    except:
        for i in content[a-1:]:
            jishu()
            title = i[0]
            link = i[1]
            print(title, link)
            openXss(link, title)
    finally:
        if a == lenxs:
            return 'OK!'
        else:
            for i in content[a-1:]:
                jishu()
                title = i[0]
                link = i[1]
                print(title, link)
                openXss(link, title)
down()


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值