1.创建 .py 文件
2. pip 安装第三方依赖
pip install beautifulsoup4
pip install requests
3. python 文件名 // 启动
import requests
from bs4 import BeautifulSoup
def scan_website(url):
# 发送GET请求获取网页内容
response = requests.get(url)
# 检查请求是否成功
if response.status_code == 200:
# 使用BeautifulSoup解析网页内容
soup = BeautifulSoup(response.text, 'html.parser')
# 查找所有链接标签<a>的href属性
links = soup.find_all('a')
# 打印所有链接
for link in links:
href = link.get('href')
print(href)
else:
print("请求失败")
# 调用函数并传入目标网站的URL
scan_website("http://www.xiankabao.com")