Requests模块
环境安装:
pip install requests
实战编码:
一需求:爬取搜狗首页的页面数据
# 一需求:爬取搜狗首页的页面数据
import requests
if __name__ == "__main__":
# step 1:指定url
url = 'https://www.sogou.com/'
# step 2:发起请求
# get()方法会返回一个响应对象
response = requests.get(url = url)
# step 3:获取响应数据,text返回的是字符串形式的响应数据
page_text = response.text
print(page_text)
# 持久化存储
with open("./sogou.html","w",encoding="utf-8") as fp:
fp.write(page_text)
print("爬取数据结束!!!")
实战巩固
01.requests实战之网页采集器
import requests
if __name__ == "__main__":
# UA伪装:将对应的一个User-Agent封装到一个字典中
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.55'
}
url = "https://www.sogou.com/web"
# 处理url携带的参数(param):封装到字典中
kw = input("enter a word:")
param = {
'query':kw
}
#对指定的url发起的请求对应的url是携带参数的,并且请求过程中处理了参数
response = requests.get(url=url ,params=param,headers=headers)
page_text = response.text
fileName = kw+'.html'
with open(fileName,'w',encoding='utf-8') as fp:
fp.write(page_text)
print(fileName,'保存成功!!!')
02.request实战之破解百度翻译
import requests
import json
if __name__ == "__main__":
#1. 指定URL
post_url = 'https://fanyi.baidu.com/sug'
#2. 进行UA伪装
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.55'
}
#3. post请求参数处理(同get()请求一致)
word = input("enter a word:")
data = {
'kw':word
}
#4. 请求发送
response = requests.post(url=post_url,data=data,headers=headers)
# 5.获取响应数据:json()方法返回的是一个obj
dic_obj = response.json()
#6. 进行持久化存储
fileName = word+'.json'
fp = open(fileName,'w',encoding='utf-8')
json.dump(dic_obj,fp=fp,ensure_ascii=False)
print("over!!!!")
03.requests实战之爬取豆瓣电影
import requests
import json
if __name__ == '__main__':
url = 'https://movie.douban.com/j/search_subjects'
num = input("Start;")
param = {
'type': 'movie',
'tag': '喜剧',
'sort': 'recommend',
'page_limit': '20',#一次取多少部电影
'page_start':num,#从页面的第几部电影开始取
}
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.55'
}
response = requests.get(url=url ,params=param,headers=headers)
list_data = response.json()
fp = open('./douban.json','w',encoding='utf-8')
json.dump(list_data,fp=fp,ensure_ascii=False)
print("oVer!!")