看明白这一篇呢,需要搞懂之前的两篇,点赞(豆瓣篇)
proxies
两篇一结合,就可以实现点赞啦
import requests
import time
# 带入了个py文件 get_IP.py
import get_IP
# 豆瓣
post_url = ' https://m.douban.com/rexxar/api/v2/note/795061634/react'
post_data = {
'reaction_type': '1 ',
'ck': 'DkpI'
}
# 调用了get_IP.py文件中的main函数
ips = get_IP.main()
cookie = 'bid=VghJOH4JtUY; douban-fav-remind=1; ll="108297"; push_doumail_num=0; push_noty_num=0; __utmv=30149280.23331; UM_distinctid=177b030f2912d1-0e882d9b96217e-70266a36-8dcc0-177b030f292216; Hm_lvt_19fc7b106453f97b6a84d64302f21a04=1613568734; _ga=GA1.2.1885368446.1589424246; douban-profile-remind=1; __utma=30149280.1885368446.1589424246.1613738974.1613907657.5; __utmz=30149280.1613907657.5.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmc=30149280; dbcl2="233318987:xcL/bLvssFI"; ck=DkpI; ap_v=0,6.0; __gads=ID=9ae741e6508ee1df-22a188e919c60084:T=1613907779:RT=1613907779:S=ALNI_MZ74XqJZ4n1XOHYBWmQlLU00wZifQ; frodotk="95148763b5a9238dbf3d962b20fb6aa7"; talionusr="eyJpZCI6ICIyMzMzMTg5ODciLCAibmFtZSI6ICJcdTY2MmZcdTg0M2RcdTg0M2RcdTU0NjIifQ=="; _gid=GA1.2.405336772.1613908310; Hm_lvt_6d4a8cfea88fa457c3127e14fb5fabc2=1613740394,1613740860,1613908309,1613908324; Hm_lpvt_6d4a8cfea88fa457c3127e14fb5fabc2=1613908324; __utmt=1; __utmb=30149280.55.10.1613907657'
post_header = {
'cookie': cookie,
'Referer': 'https://www.douban.com/note/795061634/',
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Mobile Safari/537.36',
}
header = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Mobile Safari/537.36',
}
'''
url = 'https://accounts.douban.com/j/mobile/login/basic'
data = {
'remember': 'true',
'name': 账号,
'password': 密码
}
session = requests.Session()
resopnse = session.post(url, headers=header, data=data)
print('登陆成功')
'''
for ip in ips:
try:
new_response = requests.post(post_url, headers=post_header, proxies={'https': 'https://'+str(ip)}, data=post_data)
print('1个点赞完成')
except:
print('1个失败')
finally:
# 大家都是高素质的文明人,要合理的控制速度啊,不能太快
time.sleep(2)
get_IP.py
import requests
from bs4 import BeautifulSoup
import time
baidu_url = 'https://www.baidu.com/'
# 存放ip地址
ip_lists = []
useful_ip = []
url = 'https://www.89ip.cn/'
header = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Mobile Safari/537.36',
}
def get_ip(url):
response = requests.get(url, headers=header)
if response.status_code == 200:
response_content = BeautifulSoup(response.text, 'html.parser')
IPS = response_content.find('tbody').find_all('tr')
for IP in IPS:
ip = IP.find_all('td')
ip_tetx = ip[0].text+':'+ip[1].text
new_ip = ip_tetx.replace('\t', '').replace('\n', '')
ip_lists.append(new_ip)
else:
print('网页请求错误')
def filter_ip():
for ip in ip_lists:
try:
html = requests.get(baidu_url, headers=header, proxies={'https': 'https://'+ip, 'http':'http://'+ip})
if html.status_code == 200:
useful_ip.append(ip)
print(ip + '可用')
else:
pass
except:
print(ip + '这个ip地址不可用')
finally:
time.sleep(2)
print(useful_ip)
def main():
for i in range(1, 11):
url_index = url +'/index_'+str(i)+'.html'
get_ip(url_index)
filter_ip()
print('共'+str(len(ip_lists))+'个ip,'+str(len(useful_ip))+'个可用,'+str(len(ip_lists)-len(useful_ip))+'个不可用')
return useful_ip