import requests
url = "https://passport.17k.com/ck/user/login"
data = {
"loginName": "13228603221",
"password": "maihe520"
}
session = requests.session()
# res=requests.post(url,data=data)
# print(res.cookies)
session.post(url, data=data)
res = session.get("https://user.17k.com/ck/author2/shelf?page=1&appKey=2406394919")
res.encoding = "utf-8"
# print(res.text)
print(res.json())
//****** 第二种
resp = requests.get("https://user.17k.com/ck/author/shelf?page=1&appKey=2406394919", headers={
"Cookie":'GUID=12c5a845-051d-45d3-9780-e85fcc506bf0; sajssdk_2015_cross_new_user=1; c_channel=0; c_csc=web; Hm_lvt_9793f42b498361373512340937deb2a0=1703394634,1703416222; accessToken=avatarUrl=https%3A%2F%2Fcdn.static.17k.com%2Fuser%2Favatar%2F14%2F74%2F05%2F102750574.jpg-88x88%3Fv%3D1703416674000&id=102750574&nickname=maihehe&e=1718971444&s=0c5fad1cb4262495; sensorsdata2015jssdkcross={"distinct_id":"102750574","$device_id":"18c9a3b687ba3f-084fbb734c7765-26021051-2073600-18c9a3b687ceb3","props":{"$latest_traffic_source_type":"直接流量","$latest_referrer":"","$latest_referrer_host":"","$latest_search_keyword":"未取到值_直接打开"},"first_id":"12c5a845-051d-45d3-9780-e85fcc506bf0"}; Hm_lpvt_9793f42b498361373512340937deb2a0=1703419680'
})
print(resp.text)
# 发现,通过cook当headers方式,现在登录不了了
//************** 我写的 你随便改革链接,都可以下载!!
import requests
url="https://pearvideo.com/video_1743011"
contID=url.split("_")[1]
ur=f"https://pearvideo.com/videoStatus.jsp?contId={contID}"
# ur="https://pearvideo.com/videoStatus.jsp?contId=1743011&mrd=0.05285239952004561" 也行
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
"Referer": url
}
resp=requests.get(ur,headers=headers)
# print(reap.text)
dic=resp.json()
# print(dic)
tihuan=dic["systemTime"]
src=dic["videoInfo"]["videos"]["srcUrl"]
src=src.replace(tihuan,f"cont-{contID}")
print(src)
with open("下载的视频.mp4",mode="wb") as f:
f.write(requests.get(src).content)
print("完毕")
//********* 老师代码
# 1. 拿到contId
# 2. 拿到videoStatus返回的json. -> srcURL
# 3. srcURL里面的内容进行修整
# 4. 下载视频
import requests
# 拉取视频的网址
url = "https://www.pearvideo.com/video_1725199"
contId = url.split("_")[1]
videoStatusUrl = f"https://www.pearvideo.com/videoStatus.jsp?contId={contId}"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36",
# 防盗链: 溯源, 当前本次请求的上一级是谁
"Referer": url
}
resp = requests.get(videoStatusUrl, headers=headers)
dic = resp.json()
srcUrl = dic['videoInfo']['videos']['srcUrl']
systemTime = dic['systemTime']
srcUrl = srcUrl.replace(systemTime, f"cont-{contId}")
# 下载视频
with open("a.mp4", mode="wb") as f:
f.write(requests.get(srcUrl).content)
//************************** 别人写的的 看不懂 QQ问的
import requests
headers = {
"Accept": "*/*",
"Accept-Language": "zh",
"CONTENT-TYPE": "application/x-www-form-urlencoded; charset=UTF-8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Origin": "https://passport.17k.com",
"Pragma": "no-cache",
"Referer": "https://passport.17k.com/login/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"sec-ch-ua": "^\\^Not_A",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "^\\^Windows^^"
}
url = "https://passport.17k.com/ck/user/login"
data = {
"loginName": "13228603221",
"password": "maihe520"
}
response = requests.post(url, headers=headers, data=data)
# print(response.text)
print(response.cookies.get_dict())
print(response.cookies)
# print(response)
//*************** 代理
# 代理. 可以使用第三方的机器来代理你的请求
# 代理的弊端:
# 1. 慢.
# 2. 代理IP不好找.
import requests
# https://www.kuaidaili.com/free/intr/1/ 代理网站
url = "https://www.baidu.com"
# 准备代理信息
proxy = {
"http": "http://182.84.144.66:3256/",
"https": "https://182.84.144.66:3256/"
}
# proxies 代理
resp = requests.get(url, proxies=proxy)
resp.encoding = 'utf-8'
print(resp.text)
//******* 第三方代理
import requests
def get_ip():
while 1: # 反复提取代理IP
# 有待完善. 如果代理ip都用完了. 怎么办????
url = "http://dev.kdlapi.com/api/getproxy/?orderid=902718903050420&num=100&protocol=2&method=1&an_an=1&an_ha=1&quality=1&format=json&sep=1"
resp = requests.get(url)
ips = resp.json()
if ips['code'] == 0:
for ip in ips['data']['proxy_list']: # 拿到每一个ip
print("即将返回ip", ip)
yield ip # 一个一个返回代理ip
print("所有IP已经用完, 即将更新!") # for循环结束. 继续提取新IP
else:
print("获取代理IP出现异常. 重新获取!")
def spider():
url = "https://www.dy2018.com/"
while 1:
try:
proxy_ip = next(gen) # 拿到代理ip
proxy = {
"http": "http://" + proxy_ip,
"https": "https://" + proxy_ip,
}
print(proxy)
proxy = {
'http': 'http://118.117.188.32:3256',
'https': 'https://118.117.188.32:3256'
}
proxy = {
'http': 'http://182.84.145.178:3256',
'https': 'https//182.84.145.178:3256'
}
resp = requests.get(url, proxies=proxy, timeout=20)
resp.encoding = "utf-8"
return resp.text
except :
print("报错了. ")
if __name__ == '__main__':
gen = get_ip() # gen就是代理ip的生成器
for i in range(10):
print(spider())
反爬 基础
最新推荐文章于 2024-07-07 13:11:12 发布