import requests
from collections import OrderedDict
import random
#这里粘贴从网站上复制的
urls = ["https://ucampus.cdn.unipus.cn/uexercise/quesres/hearmp3/b_320/c_3/t_110/201510/2928081e-c012-44c5-b09c-51f41c313f6b.mp3","15","https://ucampus.cdn.unipus.cn/uexercise/quesres/hearmp3/b_320/c_3/t_110/201510/2928081e-c012-44c5-b09c-51f41c313f6b.mp3","15","https://ucampus.cdn.unipus.cn/uexercise/quesres/hearmp3/b_320/c_3/t_110/201510/2928081e-c012-44c5-b09c-51f41c313f6b.mp3","15"]
url_list = []
for item in urls:
if item.startswith("https"):
url_list.append(item)
unique_urls = OrderedDict.fromkeys(url_list)
i=random.randint(1,10000)
for url in unique_urls:
r = requests.get(url)
#这里前半部分改成自己的url
with open(r"C:\Users\free\Downloads\\" + str(i) + r".mp3", 'ab') as f:
f.write(r.content)
这个就是一个很低级的爬虫,高级的爬虫至少应该更自动化一点的 至少要把urls自己数据处理出来,奈何u校园反爬太厉害了 根本爬不进去
具体怎么用 教程发抖音了
6.97 SYZ:/ a@a.AT 01/08 复制打开抖音,看看【YFree的作品】如何速通u校园听力测试!# 如何速通u校 # 脚本... https://v.douyin.com/i8PREc15/