实战编码: -需求:爬取搜狗首页页面的数据
import json
import requests
if __name__ == '__main__':
post_url = 'https://fanyi.baidu.com/sug'
#psot请求参数处理(和get请求一致)
word = input('enter a word:')
data = {
'kw':word
}
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'
}
response = requests.post(url=post_url,data=data,headers=headers)
#获取响应数据:json方法返回的是obj。如果确认响应数据是json类型的的,才可以使用ison()
dic_obj=response.json()
print(dic_obj)
'''#持久化存储
fileName=word+'.json'
fp = open(fileName,'w',encoding='utf-8')
json.dump(dic_obj,fp=fp,ensure_ascii=False)
print('over!!')'''