网址链接:
准备工作(库):
requests
os
bs4
urllib
re
html2text
代码实现:
import requests
import os
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import re
import html2text
url = "https://auto.163.com/"
response = requests.get(url)
response.encoding = response.apparent_encoding
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
h.ignore_emphasis = True
text = response.text
start_index = text.find("本地车市")
end_index = text.find("舟山")
if start_index != -1 and end_index != -1:
trimmed_text = text[:start_index] + text[end_index + 2:]
else:
trimmed_text = text
soup = BeautifulSoup(trimmed_text, 'html.parser')
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
text = re.sub(r'\s+', ' ', text)
text = text.strip()
text = h.handle(text)
nav_start = text.find("网易汽车")
nav_end = text.find("热门车型")
nav_text = text[nav_start:nav_end]
content_text = text[nav_end:]
nav_text = re.sub(r'\s+', ' ', nav_text)
nav_text = nav_text.strip()
nav_text = h.handle(nav_text)
content_text = re.sub(r'\s+', ' ', content_text)
content_text = content_text.strip()
content_text = h.handle(content_text)
save_path = r'D:/python/Project/text/text.txt'
with open(save_path, 'w', encoding='utf-8') as file:
file.write("导航栏文本:\n\n")
file.write(nav_text)
file.write("\n\n内容文本:\n\n")
file.write(content_text)
print('文本保存成功!')
save_dir = r"D:\python\Project\img"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
request = requests.get("http://product.auto.163.com/")
request.encoding = "GBK"
soup = BeautifulSoup(request.content, 'html.parser')
lists = soup.select(".brand_cont .brand_name")
l = []
for brand in lists:
data = "http://product.auto.163.com/new_daquan/brand/" + brand["id"] + ".html"
l.append(data)
saved_images = set()
for url in l:
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
images = soup.find_all("img")
for image in images:
image_url = image["src"]
if image_url.startswith("http"):
filename = os.path.basename(urlparse(image_url).path)
if filename not in saved_images:
save_path = os.path.join(save_dir, filename)
try:
with requests.get(image_url, timeout=5) as r:
r.raise_for_status()
with open(save_path, 'wb') as f:
f.write(r.content)
saved_images.add(filename) # 将已保存的图片文件名加入集合
print(f"保存图像成功: {filename}")
except requests.exceptions.RequestException as e:
print(f"保存图像失败: {image_url}")
print(e)
print("所有图像均已保存")
上述路径可自行修改 爬取图片时 是全网站爬取