import requests
from bs4 import BeautifulSoup
def scrape_website(url):
try:
response = requests.get(url)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
# 这里以提取所有链接为例
links = soup.find_all('a')
for link in links:
print(link.get('href'))
except requests.RequestException as e:
print(f"请求网页时发生错误: {e}")
except Exception as e:
print(f"发生了一个未知错误: {e}")
if __name__ == "__main__":
url = "https://example.com" # 替换为实际网页 URL
scrape_website(url)
分析 CSV 文件中的数据
import csv
def analyze_csv_file(file_path, column_index):
try:
values = []
with open(file_path, 'r', encoding='utf-8') as file:
reader = csv.reader(file)
next(reader) # 跳过标题行
for row in reader:
try:
value = float(row[column_index])
values.append(value)
except (IndexError, ValueError):
continue
if values:
average = sum(values) / len(values)
max_value = max(values)
min_value = min(values)
print(f"平均值: {average}")
print(f"最大值: {max_value}")
print(f"最小值: {min_value}")
else:
print("未找到有效的数值数据。")
except FileNotFoundError:
print("错误: 指定的 CSV 文件未找到!")
except Exception as e:
print(f"错误: 发生了一个未知错误: {e}")
if __name__ == "__main__":
file_path = "your_file.csv" # 替换为实际 CSV 文件路径
column_index = 0 # 替换为要分析的列索引
analyze_csv_file(file_path, column_index)