---恢复内容开始---
'''
今日内容:
1 requests之POST请求
session
cookie
token
2 requests高级用法
3 selenium模块
'''
'''
# 1 requests之POST请求
请求url:
请求方式: POST
请求头:
referer:(上一次请求)
user-agent:
请求体:
只有post请求才会有请求体
commit: Sign in
utf8:✓
login:
password:
webauthn-support: unsupported
'''
import requests
import re
# 1 访问login页获取token信息 ''' 请求url: http://github.com/login 请求方式: GET 响应头: Set-Cookie 请求头: Cookie User-Agent ''' headers={ 'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safar' 'i/537.36' } response=requests.get(url='http://home.cnblogs.com/u/feiyufei/', headers=headers) # print(response.text) # 把login页返回的cookies信息转换成字典 login_cookies = response.cookies.get_dict() authenticity_token = re.findall('<input type="hidden" name="authenticity_token" value="(.*?)" />',response.text, re.S)[0] print(authenticity_token) # 2 往session发送POST请求 ''' 请求url: http://github.com/session 请求方式: POST 请求头: referer: http://github.com/login Cookie:...... USer-Agent: 请求体: (只有POST请求才会有请求体) commit: Sign in utf8: authenticity_token: login: password: ****** webauthn-support: unsupported ''' # 拼接请求头信息 headers2={ 'referer':'http://github.com/login', 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36' } # 拼接请求体信息 form_data={ "commit":"Sign in", "utf8": "✓", "authenticity_token": authenticity_token, "login": "tankjam", "password": "kermit46709394", "webauthn-support": "unsupported", } # 往session地址发送post请求 # 携带请求头、请求体、login页的cookies信息 response2 = requests.post(url='https://github.com/session', data=form_data, headers=headers2, cookies=login_cookies) print(response2.status_code) # print(response2.text) with open('github.html', 'w', encoding='utf-8') as f: f.write(response2.text)
# response响应
1 import requests 2 3 response=requests.get('https://baidu.com') 4 # response响应 5 print(response.status_code) # 获取响应状态码 6 print(response.url) # 获取url地址 7 print(response.encoding) # 字符编码 8 response.encoding='utf-8' 9 print(response.text) # 获取文本 10 print(response.content) # 获取二进制流 11 print(response.headers) # 获取页面请求信息 12 print(response.history) # 上一次跳转的地址 13 # 1 返回cookie字典 2 返回cookies信息 14 print(response.cookies) # 获取cookies信息 15 print(response.cookies.get_dict()) # 获取cookies信息转换成字典 16 print(response.cookies.items()) # 获取cookies信息转换成字典 17 print(response.encoding) 18 print(response.elapsed) # 访问时间 19 20 21 import requests 22 # 往音频地址发送get请求 23 url = 'https://vd2.bdstatic.com/mda-ifjegte8t7bxuzbb/hd/mda-ifjegte8t7bxuzbb.mp4' 24 response = requests.get(url, stream=True) # stream=True 把content设置为一个迭代器对象 25 print(response.content) 26 27 with open('像我这样的人.mp4', 'wb') as f: 28 for content in response.iter_content(): 29 f.write(content)
#2 requests高级用法
1 ''' 2 # 1 http://http+ssl(携带证书) 3 #证书验证(大部分网站都是https) 4 ''' 5 import requests 6 # 如果是ssl请求,首先检查证书是否合法,不合法则报错,程序终端 7 response = requests.get('https://www.xiaohuar.com') 8 print(response.status_code) 9 10 # 改进1:去掉报错,但是会报警告 11 import requests 12 response = requests.get('https://www.xiaohuar.com', verify=False) 13 # 不验证证书,报警告,返回200 14 print(response.status_code) 15 16 # 改进2:去掉报错,并且去掉警报信息 17 import requests 18 import urllib3 19 urllib3.disable_warnings() # 关闭警告 20 response = requests.get('https://www.xiaohuar.com', verify=False) 21 print(response.status_code) 22 23 # 改进3:加上证书 24 # 很多网站都是https,但是不用证书也可以访问,大多数情况都是可以携带也可以不携带证书 25 # 知乎\百度等都是可带可不带 26 # 有硬性要求的,则必须带,比如对于定向的用户,拿到证书后才有权限访问某个特定网站 27 import requests 28 import urllib3 29 # urllib3.disable_warnings() # 关闭警告 30 # 伪代码 31 response = requests.get( 32 'https://www.xiaohuar.com', 33 # verify=False, 34 cert=('/path/server.crt', '/path/key')) 35 print(response.status_code) 36 37 38 39 ''' 40 # 2 超时设置 41 # 超时设置 42 # 两种超时:float or tuple 43 # timeout=0.1 # 代表接收数据的超时时间 44 # timeout=(0.1,0.2) # 0.1代表链接超时 0.2代表接收数据的超时时间 45 ''' 46 import requests 47 48 response = requests.get('https://www.baidu.com', 49 timeout=0.0001) 50 51 52 53 ''' 54 # 3 使用代理 55 # 官网链接: http://docs.python-requests.org/en/master/user/advanced/#proxies 56 57 # 代理设置:先发送请求给代理,然后由代理帮忙发送(封ip是常见的事情) 58 ''' 59 import requests 60 proxies={ 61 # 带用户名密码的代理,@符号前是用户名与密码 62 'http':'http://tank:123@localhost:9527', 63 'http':'http://localhost:9527', 64 'https':'https://localhost:9527', 65 } 66 response=requests.get('https://www.12306.cn', 67 proxies=proxies) 68 print(response.status_code) 69 70 71 72 ''' 73 爬取西刺免费代理: 74 1.访问西刺免费代理页面 75 2.通过re模块解析并提取所有代理 76 3.通过ip测试网站对爬取的代理进行测试 77 4.若test_ip函数抛出异常代表代理作废,否则代理有效 78 5.利用有效的代理进行代理测试 79 80 <tr class="odd"> 81 <td class="country"><img src="//fs.xicidaili.com/images/flag/cn.png" alt="Cn"></td> 82 <td>112.85.131.99</td> 83 <td>9999</td> 84 <td> 85 <a href="/2019-05-09/jiangsu">江苏南通</a> 86 </td> 87 <td class="country">高匿</td> 88 <td>HTTPS</td> 89 <td class="country"> 90 <div title="0.144秒" class="bar"> 91 <div class="bar_inner fast" style="width:88%"> 92 93 </div> 94 </div> 95 </td> 96 <td class="country"> 97 <div title="0.028秒" class="bar"> 98 <div class="bar_inner fast" style="width:97%"> 99 100 </div> 101 </div> 102 </td> 103 104 <td>6天</td> 105 <td>19-05-16 11:20</td> 106 </tr> 107 re: 108 <tr class="odd">(.*?)</td>.*?<td>(.*?)</td> 109 110 ''' 111 # import requests 112 # import re 113 # import time 114 # 115 # HEADERS = { 116 # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36', 117 # } 118 # 119 # 120 # def get_index(url): 121 # time.sleep(1) 122 # response = requests.get(url, headers=HEADERS) 123 # return response 124 # 125 # 126 # def parse_index(text): 127 # ip_list = re.findall('<tr class="odd">.*?<td>(.*?)</td>.*?<td>(.*?)</td>', text, re.S) 128 # for ip_port in ip_list: 129 # ip = ':'.join(ip_port) 130 # yield ip 131 # 132 # def test_ip(ip): 133 # print('测试ip: %s' % ip) 134 # try: 135 # proxies = { 136 # 'https': ip 137 # } 138 # 139 # # ip测试网站 140 # ip_url = 'https://www.ipip.net/' 141 # 142 # # 使用有效与无效的代理对ip测试站点进行访问,若返回的结果为200则代表当前测试ip正常 143 # response = requests.get(ip_url, headers=HEADERS, proxies=proxies, timeout=1) 144 # 145 # if response.status_code == 200: 146 # print(f'有用的ip:{ip}') 147 # return ip 148 # 149 # # 若ip代理无效则抛出异常 150 # except Exception as e: 151 # print(e) 152 # 153 # # 使用代理爬取nba 154 # def spider_nba(good_ip): 155 # url = 'https://china.nba.com/' 156 # 157 # proxies = { 158 # 'https': good_ip 159 # } 160 # 161 # response = requests.get(url, headers=HEADERS, proxies=proxies) 162 # print(response.status_code) 163 # print(response.text) 164 # 165 # 166 # if __name__ == '__main__': 167 # base_url = 'https://www.xicidaili.com/nn/{}' 168 # 169 # for line in range(1, 3677): 170 # ip_url = base_url.format(line) 171 # 172 # response = get_index(ip_url) 173 # 174 # # 解析西刺代理获取每一个ip列表 175 # ip_list = parse_index(response.text) 176 # 177 # # 循环每一个ip 178 # for ip in ip_list: 179 # # print(ip) 180 # 181 # # 对爬取下来的ip进行测试 182 # good_ip = test_ip(ip) 183 # 184 # if good_ip: 185 # # 真是代理,开始测试 186 # spider_nba(good_ip) 187 188 189 190 191 192 ''' 193 5 认证设置 194 ''' 195 import requests 196 # 通过访问github的api来测试 197 url = 'https://api.github.com/user' 198 HEADERS = { 199 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36', 200 } 201 202 # 测试1,失败返回401 203 # response = requests.get(url, headers=HEADERS) 204 # print(response.status_code) # 401 205 # print(response.text) 206 ''' 207 打印结果: 208 { 209 "message": "Requires authentication", 210 "documentation_url": "https://developer.github.com/v3/users/#get-the-authenticated-user" 211 } 212 ''' 213 # 214 # # 测试2,通过requests.auth内的HTTPBasicAuth进行认证,认证成功返回用户信息 215 # from requests.auth import HTTPBasicAuth 216 # response = requests.get(url, headers=HEADERS, auth=HTTPBasicAuth('tankjam', 'kermit46709394')) 217 # print(response.text) 218 # 219 220 # 测试3,通过requests.get请求内的auth参数默认就是HTTPBasicAuth,认证成功返回用户信息 221 # response = requests.get(url, headers=HEADERS, auth=('tankjam', 'kermit46709394')) 222 # print(response.text) 223 224 225 226 227 ''' 228 # 6 上传文件 229 ''' 230 231 # import requests 232 # 233 # # 上传文本文件 234 # files1 = {'file': open('user.txt', 'rb')} 235 # response = requests.post('http://httpbin.org/post', files=files1) 236 # print(response.status_code) # 200 237 # print(response.text) # 200 238 239 # # 上传图片文件 240 # files2 = {'jpg': open('小狗.jpg', 'rb')} 241 # response = requests.post('http://httpbin.org/post', files=files2) 242 # print(response.status_code) # 200 243 # print(response.text) # 200 244 # 245 # # 上传视频文件 246 # files3 = {'movie': open('love_for_GD.mp4', 'rb')} 247 # 248 # response = requests.post('http://httpbin.org/post', files=files3) 249 # print(response.status_code) # 200 250 # print(response.text) # 200
# 3 selenium模块讲解
'''
1 什么是selenium?
最初是一个自动化测试工具,可以使用它帮我们驱动浏览器
自动去执行某些自定义好的操作。例如在页面中执行JS代码、跳过登录验证。
2 为什么要使用selenium
1)优点:使用requests模块登录需要分析大量的复杂通信流程,使用selenium可以轻松跳过登录验证
2)缺点:浏览器会加载css、js、图片、视频...数据,爬虫效率相比requests模块要低
3 如何使用selenium?
下载selenium模块: pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple selenium
下载浏览器驱动: http://taobao.org/mirrors/chromedriver/2.38
'''
# selenium之第一次 from selenium import webdriver # 用来驱动浏览器 # 调用得到一个动作链对象,破解华东验证码的时候用的,可以拖动图片 from selenium.webdriver import ActionChains # 按照什么方式查找属性,By.ID, By.CSS_SELECIOR, By.Class from selenium.webdriver.common.by import By from selenium.webdriver.common.Keys import Keys #键盘按键操作 # 和下面WebDriverWait一起用的,EC是expected_conditions的别名 from selenium.webdriver.support import expected_conditions # 等待页面加载某些元素 from selenium.webdriver.support.wait import WebDriverWait import time # 通过谷歌浏览器驱动打开谷歌浏览器 # webdriver.Chrome(r'chromedriver.exe的绝对路径) chrome=webdriver.Chrome(r'D:\chromedriver_win32\chromedriver.exe') # ()内输入chromedriver.exe的绝对路径 #chromedriver.exe存放于python解释器的Scripts文件夹中 # chrome=webdriver.Chrome() # 若try出现异常 try: chrome.get('https://cnblogs.com/feiyufei/') time.sleep(3) # 无论发生什么都会关闭浏览器 finally: # 关闭浏览器 chrome.close() # # 1 若try出现异常 try: # 往博客主页发送get请求 # chrome.get('https://www.cnblogs.com/feiyufei/') # 参数1: 驱动对象 参数2: 等待时间 wait = WebDriverWait(chrome, 10) # 1、访问百度 chrome.get('https://www.baidu.com/') # 2、查找input输入框 input_tag = wait.until( # 调用EC的presence_of_element_located() EC.presence_of_element_located( # 此处可以写一个元组 # 参数1: 查找属性的方式 # 参数2: 属性的名字 (By.ID, "kw") ) ) input_tag = wait.until(EC.presence_of_element_located((By.ID, "kw"))) # 3、搜索一拳超人 input_tag.send_keys('一拳超人') # 4、按键盘回车键 input_tag.send_keys(Keys.ENTER) time.sleep(3) # 无论发生什么都会关闭浏览器 finally: # 关闭浏览器 chrome.close() # 2 try: # 往tank博客主页发送get请求 # chrome.get('https://www.cnblogs.com/feiyufei/') # 参数1: 驱动对象 参数2: 等待时间 wait = WebDriverWait(chrome, 10) # 1、访问京东主页 chrome.get('https://www.jd.com/') # 2、查找input输入框 input_tag = wait.until(EC.presence_of_element_located((By.ID, "key"))) # 3、搜索唐诗三百首 input_tag.send_keys('唐诗三百首') # 4、根据class属性名称查找标签 search_button = wait.until( EC.presence_of_element_located((By.CLASS_NAME, 'button'))) # 5、点击搜索按钮 search_button.click() time.sleep(3) # 无论发生什么都会关闭浏览器 finally: # 关闭浏览器 chrome.close()
# selenium之基本选择器
from selenium import webdriver # 用来驱动浏览器的 import time ''' 隐式等待 ''' # 获取驱动对象、 driver = webdriver.Chrome() try: # 显式等待: 等待某个元素加载 # 参数1: 驱动对象 参数2: 等待时间 # wait = WebDriverWait(chrome, 10) driver.get('https://china.nba.com/') # 隐式等待: 等待页面所有元素加载 driver.implicitly_wait(10) news_tag = driver.find_element_by_class_name('nav-news') # 获取标签对象 print(news_tag) # 获取标签的名字 print(news_tag.tag_name) time.sleep(10) finally: driver.close() from selenium import webdriver # 用来驱动浏览器的 import time ''' ===============所有方法=================== element是查找一个标签 elements是查找所有标签 1、find_element_by_link_text 通过链接文本去找 2、find_element_by_id 通过id去找 3、find_element_by_class_name 4、find_element_by_partial_link_text 5、find_element_by_name 6、find_element_by_css_selector 7、find_element_by_tag_name ''' # 获取驱动对象、 driver = webdriver.Chrome() try: # 往百度发送请求 driver.get('https://www.baidu.com/') driver.implicitly_wait(10) # 1、find_element_by_link_text 通过链接文本去找 # 根据登录 # send_tag = driver.find_element_by_link_text('登录') # send_tag.click() # 2、find_element_by_partial_link_text 通过局部文本查找a标签 login_button = driver.find_element_by_partial_link_text('登') login_button.click() time.sleep(1) # 3、find_element_by_class_name 根据class属性名查找 login_tag = driver.find_element_by_class_name('tang-pass-footerBarULogin') login_tag.click() time.sleep(1) # 4、find_element_by_name 根据name属性查找 username = driver.find_element_by_name('userName') username.send_keys('15622792660') time.sleep(1) # 5、find_element_by_id 通过id属性名查找 password = driver.find_element_by_id('TANGRAM__PSP_10__password') password.send_keys('*******') time.sleep(1) # 6、find_element_by_css_selector 根据属性选择器查找 # 根据id查找登录按钮 login_submit = driver.find_element_by_css_selector('#TANGRAM__PSP_10__submit') # driver.find_element_by_css_selector('.pass-button-submit') login_submit.click() # 7、find_element_by_tag_name 根据标签名称查找标签 div = driver.find_element_by_tag_name('div') print(div.tag_name) time.sleep(10) finally: driver.close()
作业: 爬取快代理(参考爬取西刺代理代码) https://www.kuaidaili.com/free/
熟悉selenium模块,敲课上例子
自动登录抽屉新热榜
'''
from selenium import webdriver import time driver = webdriver.Chrome(r'D:\chromedriver_win32\chromedriver.exe') # 把窗口转成全屏 driver.maximize_window() try: driver.get('https://dig.chouti.com/') driver.implicitly_wait(10) time.sleep(5) # 1、点击登录 login_btn = driver.find_element_by_id('login_btn') login_btn.click() time.sleep(2) # 2、输入用户名 phone = driver.find_element_by_class_name('login-phone') phone.send_keys('15622792660') # 3、输入密码 pwd = driver.find_element_by_class_name('pwd-password-input') pwd.send_keys('kermit46709394') # 4、确认登录 login_submit = driver.find_element_by_class_name('btn-large') login_submit.click() time.sleep(20) # 捕获异常并打印 except Exception as e: print(e) finally: driver.close()
---恢复内容结束---