import requests
from bs4 import BeautifulSoup
def get_links(url):
# 发送请求
response = requests.get(url)
# 解析HTML
soup = BeautifulSoup(response.text, 'html.parser')
# 获取所有链接
links = []
for link in soup.find_all('a'):
href = link.get('href')
if href and href.startswith('http'):
links.append(href)
return links
if __name__ == '__main__':
# 指定要爬取的网站
url = 'https://www.example.com'
# 获取所有链接
links = get_links(url)
# 打印链接
for link in links:
print(link)