准备工作
(1)导入requeste库,需要下载
(2)获取url
(3)查找User-Agent(请求载体的身份标识符),把它当成headers,避免反爬
(4)把获取到的的数据存储起来
import requests
if __name__ == "__main__":
url = "https://www.sogou.com/web"
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 SLBrowser/8.0.0.3161 SLBChan/105"
} #从网页中获取,避免识别到是爬虫
kw = input("目标")
param = {
"query": kw #控制搜索内容
}
#requeste.get()拿response接收
response = requests.get(url=url, params=param, headers=headers)
r = response.text
fileName = kw+".html"
# encoding="utf-8"避免乱码
with open(fileName, 'w', encoding="utf-8") as fp:
fp.write(r)
print(fileName, "保存成功")