第二章:requests模块
1.简介
基于网络请求的模块,功能强大,简单便捷,效率较高
2.作用
模拟浏览器发送请求
3.使用流程
- 指定url
- 发起请求
- 获取响应数据
- 响应数据的数据存储
4.第一个简单案例:
爬取搜狗首页数据
import requests
#爬取搜狗首页的页面数据
if __name__ == '__main__':
#step1:指定url
url = 'http://www.sougou.com/'
#step2:发起get请求
#get方法会返回一个响应对象
response = requests.get(url=url)
#step3:获取响应数据 ,text方法反悔啊字符串形式的响应数据
page_text = response.text
print(page_text)
#step4:持久化存储
with open('./sougou.html', 'w',encoding='utf-8') as fp:
fp.write(page_text)
print('爬取数据结束')
5.requests对响应数据获取的三种方式
responese = requests.text #获取字符串类型的响应数据
responese = requests.json #获取对象类型的响应数据
responese = requests.content #获取的对象时二进制形式
6.实战巩固
(1)爬取搜狗指定词条对应的搜索结果页面(简易的网页采集器)
import requests
#UA伪装(User-Agent)
headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Mobile Safari/537.36"
}
url = 'https://www.sogou.com/web'
#处理url携带的参数:封装到字典中
kw = input('please enter a key word:')
param = {
'query': kw
}
#携带参数的请求
response = requests.get(url=url,params=param,headers=headers)
page_text = response.text
filename = kw+'.html'
with open(filename,'w',encoding='utf-8') as fp:
fp.write(page_text)
print(filename,'保存成功!')
(2)破解百度翻译
import requests
import json
post_url = 'https://fanyi.baidu.com/sug'
headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Mobile Safari/537.36"
}
word = input("please input a word:")
data = {
'kw' : word
}
response = requests.post(url=post_url, data=data, headers=headers)
#获取响应数据 json返回的是一个对象
dic_obj = response.json()
#存储数据
fileName = word+'.json'
fp = open(fileName, 'w', encoding='utf-8')
# ensure_ascii=False 返回结果有中文,中文不能使用ASCII编码
#json.dump() 存储json格式的数据到文件中
json.dump(dic_obj, fp=fp, ensure_ascii=False)
print('over!数据爬取结束')
(3)爬取豆瓣电影分类排行榜
import requests
import json
url = "https://movie.douban.com/j/chart/top_list"
headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Mobile Safari/537.36"
}
list_data = []
for i in range(0,100,20):
# print(i)
start = str(i)
param = {
'type': '24',
'interval_id': '100:90',
'start': start, # 从库中第几部电影开始去取
'limit': '20', # 一次取出的个数
'action': ''
}
response = requests.get(url=url, params=param, headers=headers)
list_data += response.json()
fp = open('./douban.json','w',encoding='utf-8')
json.dump(list_data, fp=fp, ensure_ascii=False)
print('over!!')