Python 爬取-网易汽车 文本及图片

网址链接:

https://auto.163.com/

准备工作(库):

requests
os
bs4
urllib
re
html2text

代码实现:

import requests
import os
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import re
import html2text
url = "https://auto.163.com/"
response = requests.get(url)
response.encoding = response.apparent_encoding
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
h.ignore_emphasis = True
text = response.text
start_index = text.find("本地车市")
end_index = text.find("舟山")
if start_index != -1 and end_index != -1:
    trimmed_text = text[:start_index] + text[end_index + 2:]
else:
    trimmed_text = text
soup = BeautifulSoup(trimmed_text, 'html.parser')
for script in soup(["script", "style"]):
    script.extract()
text = soup.get_text()
text = re.sub(r'\s+', ' ', text)
text = text.strip()
text = h.handle(text)
nav_start = text.find("网易汽车")
nav_end = text.find("热门车型")
nav_text = text[nav_start:nav_end]
content_text = text[nav_end:]
nav_text = re.sub(r'\s+', ' ', nav_text)
nav_text = nav_text.strip()
nav_text = h.handle(nav_text)
content_text = re.sub(r'\s+', ' ', content_text)
content_text = content_text.strip()
content_text = h.handle(content_text)
save_path = r'D:/python/Project/text/text.txt'
with open(save_path, 'w', encoding='utf-8') as file:
    file.write("导航栏文本:\n\n")
    file.write(nav_text)
    file.write("\n\n内容文本:\n\n")
    file.write(content_text)
print('文本保存成功!')

save_dir = r"D:\python\Project\img"
if not os.path.exists(save_dir):
    os.makedirs(save_dir)
request = requests.get("http://product.auto.163.com/")
request.encoding = "GBK"
soup = BeautifulSoup(request.content, 'html.parser')
lists = soup.select(".brand_cont .brand_name")
l = []
for brand in lists:
    data = "http://product.auto.163.com/new_daquan/brand/" + brand["id"] + ".html"
    l.append(data)
saved_images = set()
for url in l:
    response = requests.get(url)
    soup = BeautifulSoup(response.content, 'html.parser')
    images = soup.find_all("img")
    for image in images:
        image_url = image["src"]
        if image_url.startswith("http"):
            filename = os.path.basename(urlparse(image_url).path)
            if filename not in saved_images:
                save_path = os.path.join(save_dir, filename)
                try:
                    with requests.get(image_url, timeout=5) as r:
                        r.raise_for_status()
                        with open(save_path, 'wb') as f:
                            f.write(r.content)
                            saved_images.add(filename)  # 将已保存的图片文件名加入集合
                            print(f"保存图像成功: {filename}")
                except requests.exceptions.RequestException as e:
                    print(f"保存图像失败: {image_url}")
                    print(e)
print("所有图像均已保存")

上述路径可自行修改 爬取图片时 是全网站爬取

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值