方法一selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome('./chromedriver') #填写你的chromedriver的路径
driver.get("https://www.zhihu.com/#signin")
elem = driver.find_element_by_name("account") #寻找账号输入框
elem.clear()
elem.send_keys("xxx@gmail.com") #输入账号
password = driver.find_element_by_name('password') #寻找密码输入框
password.clear()
password.send_keys("12345678") #输入密码
input(’请在网页上点击倒立的文字,完成以后回到这里按任意键继续。')
elem.send_keys(Keys.RETURN) #模拟键盘回车键
time.sleep(10)#这里可以直接sleep,也可以使用上一章讲到的等待某个条件出现
print(driver.page_source)
driver.quit()
方法二:使用cookie
通过已经登录的Cookies,可以让爬虫绕过登录过程,直接进入登录以后的页面
在已经登录知乎的情况下,打开Chrome的开发者工具,定位到“Network”选项卡,然后刷新网页,在加载的内容中随便选择一项,然后看右侧的数据,从Request Headers中可以找到Cookie
只要把这个Request Headers的内容通过requests提交,就能直接进入登录以后的知乎页面了
import requests
headers={
'Cookie':'BIDUPSID=C4F812C6E50CAAE9EF42E0CEDC62B283; PSTM=1653552985; BD_UPN=12314753; BAIDUID=C4F812C6E50CAAE9EC7763C2A6343229:SL=0:NR=10:FG=1; newlogin=1; BAAAAAAAAAAAAAAAAAAGQ2VjeGxvNmNIRE9wSUUwdTl-ZjB6VmRqRVFBQUFBJCQAAAAAAAAAAAEAAACaQxViMTExNzA0MDNZTEwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPRAMGP0QDBjc; H_PS_PSSID=37147_36542_37117_37300_36885_37405_36786_37261_26350_37478_37195; BDRCVFR[feWj1Vr5u3D]=mk3SLVN4HKm; sug=3; sugstore=0; ORIGIN=0; bdime=0; H_PS_645EC=eb189T8lWcIzxcu+rPiz7t1HhQBJk2En3uNOGNeKZM6+shIs31125cNTa7aq9X+8GU8k',
"user-agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36'
}
session=requests.Session()
source=session.get('https://www.baidu.com/',headers=headers,verify=False).content.decode('utf-8')
print(source)
使用Cookie来登录网页,不仅可以绕过登录步骤,还可以绕过网站的验证码。
代码中,使用了requests的Session模块。所谓Session,是指一段会话。网站会把每一个会话的ID(Session ID)保存在浏览器的Cookies中用来标识用户的身份。requests的Session模块可以自动保存网站返回的一些信息
方法三:表单登录
# -*- coding: utf-8 -*-
import requests
import re
from bs4 import BeautifulSoup
s = requests.Session()
url_login = 'https://accounts.douban.com/login'
formdata = {
'redir': 'https://www.douban.com',
'form_email': '账号',
'form_password': '密码',
'login': u'登陆'
}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/55.0.2883.87 Safari/537.36'}
r = s.post(url_login, data=formdata, headers=headers)
content = r.text
soup = BeautifulSoup(content, 'html.parser')
captcha = soup.find('img', id='captcha_image')#当登陆需要验证码的时候
if captcha:
captcha_url = captcha['src']
re_captcha_id = r'<input type="hidden" name="captcha-id" value="(.*?)"/'
captcha_id = re.findall(re_captcha_id, content)
print(captcha_id)
print(captcha_url)
captcha_text = input('Please input the captcha:')
formdata['captcha-solution'] = captcha_text
formdata['captcha-id'] = captcha_id
r = s.post(url_login, data=formdata, headers=headers)
with open('contacts.html', 'w+', encoding='utf-8') as f:
f.write(r.text)
以下是一个使用 cookie 模拟登录请求页面的例子
# -*- coding: UTF-8 -*-
import requests
import sys
import io
if __name__ == "__main__":
# 登录后才能访问的网页
url = 'http://www.csdn.net'
# 浏览器登录后得到的cookie
cookie_str = r'xxx=yyy;zzz=mmm'
# 把cookie字符串处理成字典,以便接下来使用
# TODO(You): 请正确准备cookie数据
# 设置请求头
headers = {
'User-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
}
# 在发送get请求时带上请求头和cookies
resp = requests.get(
url,
headers=headers,
cookies=cookies
)
print(resp.content.decode('utf-8'))
把cookie字符串处理成字典
cookie_str= 'XXXX'
cookies = {}
for line in cookie_str.split(';'):
key, value = line.split('=', 1)
cookies[key] = value
print(cookies)