1.搭建环境
- 安装pycharm或者其他编辑器
- 安装python本次安装的是(3.7)
- 创建虚拟环境
- 安装requests库
2.requests的基础使用-post请求
import requests
import json
# 上传字典格式
def send_requests1():
url = 'http://httpbin.org/post'
d = {'key1': 'value1', 'key2': 'value2'}
r = requests.post(url, data=d)
return r
# 上传JSON格式
def send_requests2():
url = 'http://httpbin.org/post'
# 格式化处理
s = json.dumps({'key1': 'value1', 'key2': 'value2'})
r = requests.post(url, data=s)
return r
# 上传文件 files需要构建不同网站上传的格式要求,字段要求
def send_requests3():
url = 'http://httpbin.org/post'
files = {'file': open('report.txt', 'rb')}
r = requests.post(url, files=files)
return r
if __name__ == "__main__":
response1 = send_requests1()
print '------ response1.text = ', response1.text
# response2 = send_requests2()
# print '------ response2.text = ', response2.text
#
# response3 = send_requests3()
# print '------ response3.text = ', response3.text
输出:
------ response1.text = {
"args": {},
"data": "",
"files": {},
"form": {
"key1": "value1",
"key2": "value2"
},
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Content-Length": "23",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "httpbin.org",
"User-Agent": "python-requests/2.18.1"
},
"json": null,
"origin": "220.194.45.154, 220.194.45.154",
"url": "https://httpbin.org/post"
}
------ response2.text = {
"args": {},
"data": "{\"key2\": \"value2\", \"key1\": \"value1\"}",
"files": {},
"form": {},
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Content-Length": "36",
"Host": "httpbin.org",
"User-Agent": "python-requests/2.18.1"
},
"json": {
"key1": "value1",
"key2": "value2"
},
"origin": "220.194.45.154, 220.194.45.154",
"url": "https://httpbin.org/post"
}
------ response3.text = {
"args": {},
"data": "",
"files": {
"file": "hello word!\n"
},
"form": {},
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Content-Length": "158",
"Content-Type": "multipart/form-data; boundary=0562a8553c254ba298a69aa9e7fb477e",
"Host": "httpbin.org",
"User-Agent": "python-requests/2.18.1"
},
"json": null,
"origin": "220.194.45.154, 220.194.45.154",
"url": "https://httpbin.org/post"
}
复制代码
3.requests的基础使用-get请求
# 发送GET请求
import requests
r = requests.get("http://httpbin.org/get")
# 添加headler和cookies
import requests
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36"
}
cookie = {
"PSTM": "553180542",
"HMACCOUNT": "BA4C08D999D27E4E"
}
r = requests.get(url="http://httpbin.org/get", headers=header, cookies=cookie)
print(r.text) # 查看打印结果headers中的Cookie和User-Agent的values
# 带参请求
import requests
payload = {
'name': 'sitven',
'age': 25,
'time':"2019-03-08"
}
req = requests.get(url="http://httpbin.org/get", params=payload)
# 查看返回信息args的value
print(req.text)
# 获取响应信息
import requests
r = requests.get("http://httpbin.org/get")
print(r.text) # 字符串方式的响应体,会自动根据响应头部的字符编码进行解码
print(r.status_code) # 响应状态码
print(r.json()) # Requests中内置的JSON解码器
print(r.headers) # 以字典对象存储服务器响应头,但是这个字典比较特殊,字典键不区分大小写,若键不存在则返回None
r = requests.get("https://www.baidu.com")
# 字节方式的响应体,会自动为你解码 gzip 和 deflate 压缩
print(r.content)
# 获取url
print(r.url)
# 编码格式
print(r.encoding)
# 获取cookie
print(r.cookies)
# 获取原始响应体
print(r.raw)
# 请求历史
print(r.history)
复制代码
3.上传文件
# 上传百度图片,需要特定的格式
import requests
url = "https://graph.baidu.com/upload"
files = {
"tn":"pc",
"image":("123.jpg",open('123.jpg','rb'),"image/jpeg"),
"from":"pc",
"image_source":"PC_UPLOAD_SEARCH_FILE",
"range":'{"page_from": "searchIndex"}'
}
r = requests.post(url,files=files)
print(r.json())
复制代码
4.下载文件
import requests
# 下载地址
Download_addres='https://nj02cm01.baidupcs.com/file/da941ce26b392a4ea0b010b6e021a695?bkt=p3-1400da941ce26b392a4ea0b010b6e021a6956171262a00000003bca9&fid=3310494135-250528-127659779854873&time=1533574416&sign=FDTAXGERLQBHSK-DCb740ccc5511e5e8fedcff06b081203-KqPVE0es2sUR30U1G%2Fvps9I3VY4%3D&to=88&size=244905&sta_dx=244905&sta_cs=0&sta_ft=jpg&sta_ct=0&sta_mt=0&fm2=MH%2CQingdao%2CAnywhere%2C%2Cchongqing%2Ccmnet&resv0=cdnback&resv1=0&vuk=282335&iv=-2&newver=1&newfm=1&secfm=1&flow_ver=3&pkey=1400da941ce26b392a4ea0b010b6e021a6956171262a00000003bca9&sl=82640974&expires=8h&rt=sh&r=220567738&mlogid=445212826855757932&vbdid=1883780403&fin=1533574308687.jpg&fn=1533574308687.jpg&rtype=1&dp-logid=445212826855757932&dp-callid=0.1.1&hps=1&tsl=50&csl=78&csign=0vnYzTYv2VV%2Ff%2FRkrbacf8q2JPs%3D&so=0&ut=8&uter=4&serv=0&uc=1400105996&ic=321428139&ti=86348c5ac45f19b1da511678c3490bd3448fbb7a71823ad8&by=themis'
# 把下载地址发送给requests模块
f=requests.get(Download_addres)
# 下载文件(可以下载不止是图片)
with open("12.ipg","wb") as code:
code.write(f.content)
复制代码
5.request中header介绍及使用
发送GET或POST请求出现非法参数,解决反爬机制 解决:添加header,当然还有其它解决反爬机制,这里讲解的只是简单的一种
复制请求头字段
import requests
res=requests.get("http://www.baidu.com/",headers=headers)
print(res.text)
#输出会出现:抱歉!页面无法访问....这就是限制爬虫了
#解决方法:加入headers,在requests.get(headers=headers)里面,添加headers
#构建headers
import requests
headers={
"Host": "www.baidu.com"
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
}
res=requests.get("http://www.baidu.com/",headers=headers)
print(res.text)
复制代码
6.请求头加密串处理
即使设置了请求头,还是出现无效参数,某些网站对请求头进行了加密 对于测试需要了解本公司是如何加密的一般都是进行MD5双重或多重加密
import requests
import hashlib # 加密MD5
import json
# 需要得知请求头加密的原始数据和加密方式(问后端开发人员是如何加密的)
jiamishuju = "tian wang gai di hu"
md5 = hashlib.md5()
# 注意需要进行转码
md5.update(jiamishuju.encode('utf-8'))
res = md5.hexdigest() # 可继续对res继续加密,本次就加密一次,根据项目而定
headers={
"Host": "www.baidu.com"
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
"token": res # 将加密后的数据放到请求头中
}
res=requests.get("http://www.baidu.com/",headers=headers)
print(res.text)
复制代码
好了各位,以上就是这篇文章的全部内容了,能看到这里人啊,都是人才。
如果这个文章写得还不错,觉得我有点东西的话 求点赞👍求关注❤️求分享👥 对宅女我来说真的非常有用!!!
白嫖不好,创作不易,各位的支持和认可,就是我创作的最大动力,我们下篇文章见!
最后感谢每一个认真阅读我文章的人,看着粉丝一路的上涨和关注,礼尚往来总是要有的,虽然不是什么很值钱的东西,如果你用得到的话可以直接拿走:
下方这份完整的软件测试资料已经上传CSDN官方认证的二维码,朋友们如果需要可以自行免费领取 【保证100%免费】
这些资料,对于从事【软件测试】的朋友来说应该是最全面最完整的备战仓库,这个仓库也陪伴我走过了最艰难的路程,希望也能帮助到你!凡事要趁早,特别是技术行业,一定要提升技术功底。希望对大家有所帮助……