众所周知http是无状态的协议,即每一次请求都是独立的,这样每一次请求访问服务器并不认得我们,所以引入了cookies,和session机制。
爬虫如何解决
需求:实现人人网获取“我的状态”
# 第一种
# 直接手动登录,浏览器中找到登录的cookies值,
headers = cookies值 加入到请求头中
response = requests.get(url, headers=headers)
import requests
from fake_useragent import UserAgent
ua = UserAgent()
headers = {
'User-Agent': ua.random
}
# 步骤1:先登录
def renren_login(user, pwd):
url = 'http://www.renren.com/PLogin.do'
form_data = {
'email': user,
'password': pwd
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
}
response = requests.post(url, data=form_data, headers=headers)
with open('renren_login.html', 'w', encoding='utf-8')as fp:
fp.write(response.text)
# 步骤2:获取主页信息
def ren_index(url):
headers = {
'Cookie': 'zi ji bu quan',
}
respond = requests.post(url, headers=headers)
con = respond.content.decode('utf-8')
with open('ren_index.html', 'w', encoding='utf-8') as f:
f.write(con)
print(respond.status_code)
if __name__ == '__main__':
# 登录
# renren_login(user='',pwd='')
# 获取"我的状态"
url = 'http://status.renren.com/status/v7/972262212'
renren_spider(url)
第二种方法
创建session对象,session对象请求登录,使用同一session对象发起请求访问
import requests
from fake_useragent import UserAgent
ua = UserAgent()
headers = ua.random
# 步骤1:先登录
def ren_login(form_data):
url = 'http://www.renren.com/PLogin.do'
# 使用session对象登录
respond = session.post(url, data=form_data, headers=headers)
with open('ren_login.html', 'w', encoding='utf-8') as f:
f.write(respond.text)
print(respond.status_code)
# 步骤2:获取主页信息
def ren_index(url):
headers = {
'Cookie': 'zi ji bu quan',
}
# 使用同一session对象请求访问
respond = session.post(url, headers=headers)
con = respond.content.decode('utf-8')
with open('ren_index.html', 'w', encoding='utf-8') as f:
f.write(con)
print(respond.status_code)
# 1.创建session对象
session = requests.session()
if __name__ == '__main__':
form_data = {}
form_data['email'] = input("请输入用户名")
form_data['password'] = input("请输入密码")
# 获取"我的状态"
url = 'http://status.renren.com/status/v7/972262212'
ren_index(url)