使用步骤
1.引入库
代码如下(示例):
from selenium import webdriver
from time import sleep
import os
import requests
2.读入数据
代码如下(示例):
def get_urls(index1, index2):
base_url = "https://new-bxgstorge.boxuegu.com/bxg/textbook/052/afterClassVideo/052"
p=q=''
if len(index1) == 1:
p = "00"+index1
else:
p = "0"+index1
if len(index2) == 1:
q = '00'+index2
else:
q = '0'+index2
url = base_url+p+q+'.mp4'
return url
def get_dir(url):
option = webdriver.ChromeOptions()
# option.add_argument('headless')
browser = webdriver.Chrome(chrome_options=optionyuy
browser.get(url)
sleep(3)
chapters = browser.find_elements_by_xpath("//div[@class='video-directory-content']"
"/div[@id='chapterPointsBox']/div[@class='el-scrollbar']"
"/div[@class='el-scrollbar__wrap']/div[@class='el-scrollbar__view']"
"/div[@class='chapter-item-box expand']")
for index1, c in enumerate(chapters):
chapt_name = c.find_element_by_xpath("div[@class='chapter-text ellipsis']/span[2]").text
path = "F:\\video\\" + chapt_name
if not os.path.exists(path):
os.makedirs(path)
dchapt_names = c.find_elements_by_xpath("div[@class='points-box']/div[contains(@class,'point-item-box')]/"
"span[@class='point-text-box']/span[@class='point-text ellipsis']")
for index2, dchapt_name in enumerate(dchapt_names):
mv_path = path+"\\"+str(index2+1)+'、'+dchapt_name.text.replace('<','p').replace('>', 'p')+'.mp4'
print(mv_path)
url = get_urls(str(index1+1),str(index2+1))
print(url)
# try:
if not os.path.exists(mv_path):
resp = requests.get(url)
sleep(3)
with open(mv_path, 'wb') as f:
f.write(resp.content)
f.close()
print("success")
else:
print('文件已经存在')
# except:
# print("爬取失败")
# https://new-bxgstorge.boxuegu.com/bxg/textbook/052/afterClassVideo/052001001.mp4
url = "http://tch.ityxb.com/video/6aa88b8193be49c3b8bef9910058b0ca/c1333acbfda346489af0f7488d9a0e2b/adcbc40cb73e4d229ed199da1742bd8f"
get_dir(url)
该处使用的url网络请求的数据。
总结
提示:这里对文章进行总结:
例如:以上就是今天要讲的内容,本文仅仅简单介绍了pandas的使用,而pandas提供了大量能使我们快速便捷地处理数据的函数和方法。