爬虫之红薯网首页的爬取(成功进行验证码识别并模拟登录成功)
核心
session = requests.Session()
代码:
#!/usr/bin/env python
# coding:utf-8
import requests
from lxml import etree
from hashlib import md5
import time
class Chaojiying_Client(object):
def __init__(self, username, password, soft_id):
self.username = username
password = password.encode('utf8')
self.password = md5(password).hexdigest()
self.soft_id = soft_id
self.base_params = {
'user': self.username,
'pass2': self.password,
'softid': self.soft_id,
}
self.headers = {
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)',
}
def PostPic(self, im, codetype):
"""
im: 图片字节
codetype: 题目类型 参考 http://www.chaojiying.com/price.html
"""
params = {
'codetype': codetype,
}
params.update(self.base_params)
files = {'userfile': ('ccc.jpg', im)}
r = requests.post('http://upload.chaojiying.net/Upload/Processing.php', data=params, files=files, headers=self.headers)
return r.json()
def ReportError(self, im_id):
"""
im_id:报错题目的图片ID
"""
params = {
'id': im_id,
}
params.update(self.base_params)
r = requests.post('http://upload.chaojiying.net/Upload/ReportError.php', data=params, headers=self.headers)
return r.json()
if __name__ == "__main__":
#准备一个UA伪装
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
}
#对需要识别的图片进行请求
url = 'https://i.hongshu.com/userverify.html?1624113370734'
#得到二进制文本的图片
img_data_text = requests.get(url = url,headers = headers).content
#对图片进行存储
with open('./a.jpg','wb') as fp:
fp.write(img_data_text)
#利用超级鹰进行识别
chaojiying = Chaojiying_Client('user-name', 'passwd', '918582') # 用户中心>>软件ID 生成一个替换 96001
im = open('a.jpg', 'rb').read() # 本地图片文件路径 来替换 a.jpg 有时WIN系统须要//
print(chaojiying.PostPic(im, 1902)) # 1902 验证码类型 官方网站>>价格体系 3.4+版 print 后要加()
#准备需要登录的url
url = 'https://i.hongshu.com/login.html'
#读取我们刚刚识别到的结果
result = chaojiying.PostPic(im, 1902)['pic_str']
print(result)
post_data = {
'username': '',
'password': '',
'validcode': result,
'url': 'https://i.hongshu.com/login.html?url=https%3A%2F%2Fi.hongshu.com%2Fprofile%2Faccount.do',
'sign': '1',
'logining': '1'
}
#得到网页的源码数据
#使用保持页面的方法进行post请求
session = requests.Session()
response = session.post(url=url,data=post_data,headers=headers)
print(response.status_code)
#由于进行了重定向,所以我们需要重新去get一下这个网页
url = 'https://i.hongshu.com/profile/center.do'
res = session.get(url=url,headers=headers)
print(res.status_code)
text_data = res.text
print(text_data)
with open('./hongshu.html','w',encoding='utf-8') as fp:
fp.write(text_data)
print("爬取完成!")