汇配汽配库存共享平台供应商信息爬取

汇配汽配库存共享平台根据需要的关键字信息先收集相关商家的链接信息,再根据链接信息获取供应商信息如微信、QQ、地址等信息。方便收集联系方式和供应商沟通交流,不需要再一个个联系。仅供个人使用。

自动登录获取关键字相关的所有供应商链接信息

import os
import time
import urllib.parse
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import openpyxl

def setup_webdriver(headless=True):
    # 设置Chrome选项
    chrome_options = Options()
    if headless:
        chrome_options.add_argument("--headless")  # 无头模式
    chrome_options.add_argument("--disable-gpu")  # 禁用GPU
    chrome_options.add_argument("--no-sandbox")  # 禁用沙盒
    chrome_options.add_argument("--disable-dev-shm-usage")  # 解决资源限制问题

    # 设置WebDriver路径
    webdriver_path = 'D:\\chromedriver\\chromedriver.exe'  # 替换为你的chromedriver路径
    service = Service(webdriver_path)

    # 启动WebDriver
    driver = webdriver.Chrome(service=service, options=chrome_options)
    return driver

def login(driver, username, password, login_url="https://www.huiparts.com/user/login"):
    try:
        driver.get(login_url)
        print(f"打开登录页面: {login_url}")

        wait = WebDriverWait(driver, 30)
        # 等待用户名输入框出现
        username_field = wait.until(
            EC.visibility_of_element_located((By.NAME, 'username'))
        )
        print("找到用户名输入框")

        # 输入用户名和密码
        username_field.send_keys(username)
        print(f"输入用户名: {username}")

        password_field = driver.find_element(By.NAME, 'password')
        password_field.send_keys(password)
        print("输入密码")

        # 点击登录按钮
        login_button = driver.find_element(By.XPATH, '//button[@class="loginBtn"]')
        login_button.click()
        print("点击登录按钮")

        # 等待登录完成,可以通过等待URL变化或特定元素出现
        wait.until(EC.url_changes(login_url))
        current_url = driver.current_url
        print(f"当前URL: {current_url}")

        # 可选:确认是否成功登录,可以根据页面内容进一步判断
        time.sleep(5)  # 确保页面完全加载

    except Exception as e:
        print(f"登录过程中出现异常: {e}")
        driver.quit()
        raise e

def extract_company_links(driver):
    wait = WebDriverWait(driver, 30)
    try:
        # 等待商户信息块加载
        wait.until(
            EC.visibility_of_element_located((By.CLASS_NAME, 'rowTr'))
        )
        # 找到所有包含公司信息的div
        company_divs = driver.find_elements(By.CSS_SELECTOR, 'div.rowTr div.pClick.rComp')
        print(f"找到 {len(company_divs)} 个公司信息块")

        company_links = []

        for div in company_divs:
            try:
                uid = div.get_attribute('uid')
                msg = div.get_attribute('msg')
                company_name = div.find_element(By.CSS_SELECTOR, 'a.company_name_box').text.strip()

                # URL-encode msg
                encoded_msg = urllib.parse.quote(msg)

                # 构建链接
                link = f"https://www.huiparts.com/home/chat?uid={uid}&msg={encoded_msg}"

                company_links.append({
                    '公司名称': company_name,
                    '链接': link
                })
                # print(f"提取公司: {company_name}, 链接: {link}")  # 可选,防止日志过多

            except Exception as e:
                # print(f"提取单个公司信息时出错: {e}")  # 可选,防止日志过多
                continue

        return company_links

    except Exception as e:
        print(f"提取公司链接时出现异常: {e}")
        return []

def extract_all_company_links(driver):
    all_company_links = []
    scroll_pause_time = 2  # 每次滚动后等待加载新内容的时间(秒)
    max_scroll_attempts = 50  # 最大滚动次数,防止无限循环

    last_height = driver.execute_script("return document.body.scrollHeight")
    print(f"初始页面高度: {last_height}")

    scroll_attempt = 0
    previous_company_count = 0
    while scroll_attempt < max_scroll_attempts:
        # 执行滚动操作
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        print(f"执行滚动操作,第 {scroll_attempt+1} 次")

        # 等待新内容加载
        time.sleep(scroll_pause_time)

        # 获取新的页面高度
        new_height = driver.execute_script("return document.body.scrollHeight")
        print(f"新的页面高度: {new_height}")

        if new_height == last_height:
            # 页面高度未变化
            print("页面高度未变化,检查是否有新商户链接")
            # 尝试提取更多商户链接
            company_links = extract_company_links(driver)
            unique_new_links = [link for link in company_links if link not in all_company_links]
            if not unique_new_links:
                # 没有新链接被提取到,认为已到达底部
                print("没有新商户链接,已到达底部")
                break
            else:
                all_company_links.extend(unique_new_links)
                print(f"提取到 {len(unique_new_links)} 个新商户链接")
        else:
            # 页面高度变化,继续滚动
            last_height = new_height
            scroll_attempt += 1
            # 提取当前页的公司链接
            company_links = extract_company_links(driver)
            # 添加新的商户链接,避免重复
            unique_new_links = [link for link in company_links if link not in all_company_links]
            if unique_new_links:
                all_company_links.extend(unique_new_links)
                print(f"提取到 {len(unique_new_links)} 个新商户链接")

    print(f"\n总共提取到 {len(all_company_links)} 个公司链接")
    return all_company_links

def initialize_excel(file_name):
    if not os.path.exists(file_name):
        wb = openpyxl.Workbook()
        ws = wb.active
        ws.title = "配件商信息"
        # 写入表头
        ws.append(["公司名称", "链接"])
        wb.save(file_name)
        print(f"创建新的Excel文件: {file_name}")
    else:
        print(f"Excel文件已存在: {file_name}")

def append_to_excel(file_name, data):
    if not data:
        print("没有数据可追加")
        return

    wb = openpyxl.load_workbook(file_name)
    ws = wb.active
    for entry in data:
        ws.append([entry['公司名称'], entry['链接']])
    wb.save(file_name)
    print(f"已追加 {len(data)} 行数据到Excel文件")

def main():
    # 推荐使用环境变量存储敏感信息
    import os
    USERNAME = os.getenv('HUIPARTS_USERNAME') or '8888'  # 替换为你的用户名
    PASSWORD = os.getenv('HUIPARTS_PASSWORD') or '88885'     # 替换为你的密码

    # Excel文件名
    excel_file = "配件商信息.xlsx"
    initialize_excel(excel_file)

    # 设置WebDriver并登录
    driver = setup_webdriver(headless=False)  # 设置为 False 以便调试时看到浏览器操作
    try:
        login(driver, USERNAME, PASSWORD)

        # 导航到搜索结果页面,假设已经登录后页面会跳转到一个搜索结果页面
        # 如果需要手动导航到某个特定页面,请在此处添加
        target_search_url = "https://www.huiparts.com/home/search?search_btn=1&keywords=%E6%B1%BD%E8%BD%A6%E9%85%8D%E4%BB%B6&city=&area_id="  # 替换为实际的搜索结果URL
        driver.get(target_search_url)
        print(f"导航到搜索结果页面: {target_search_url}")
        time.sleep(5)  # 等待页面加载

        # 提取所有公司链接
        all_company_links = extract_all_company_links(driver)

        # 过滤重复项
        unique_company_links = { (item['公司名称'], item['链接']) for item in all_company_links }
        unique_company_links = [ {'公司名称': name, '链接': link} for name, link in unique_company_links ]

        # 追加到Excel
        append_to_excel(excel_file, unique_company_links)

    except Exception as e:
        print(f"主程序出现异常: {e}")

    finally:
        # 截图页面,帮助调试
        driver.save_screenshot("final_screenshot.png")
        print("页面截图已保存到 'final_screenshot.png'")

        # 保存页面源代码
        with open("final_page_source.html", "w", encoding='utf-8') as f:
            f.write(driver.page_source)
        print("页面源代码已保存到 'final_page_source.html'")

        # 关闭WebDriver
        driver.quit()
        print("WebDriver已关闭")

if __name__ == "__main__":
    main()

根据获取的链接收集供应商联系信息

import os
import time
import openpyxl
import logging
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

# 配置日志记录
logging.basicConfig(
    filename='scraper.log',  # 日志文件名
    filemode='a',             # 追加模式
    format='%(asctime)s - %(levelname)s - %(message)s',
    level=logging.INFO         # 日志级别
)

def setup_webdriver(headless=True):
    """
    设置并启动Chrome WebDriver
    """
    chrome_options = Options()
    if headless:
        chrome_options.add_argument("--headless")  # 无头模式
    chrome_options.add_argument("--disable-gpu")  # 禁用GPU
    chrome_options.add_argument("--no-sandbox")  # 禁用沙盒
    chrome_options.add_argument("--disable-dev-shm-usage")  # 解决资源限制问题

    # 设置WebDriver路径
    webdriver_path = 'D:\\chromedriver\\chromedriver.exe'  # 替换为您的chromedriver路径
    if not os.path.exists(webdriver_path):
        logging.error(f"WebDriver路径不存在: {webdriver_path}")
        raise FileNotFoundError(f"WebDriver路径不存在: {webdriver_path}")

    service = Service(webdriver_path)

    # 启动WebDriver
    driver = webdriver.Chrome(service=service, options=chrome_options)
    driver.maximize_window()
    logging.info("WebDriver已启动")
    return driver

def login(driver, username, password, login_url="https://www.huiparts.com/user/login"):
    """
    登录到目标网站
    """
    try:
        driver.get(login_url)
        logging.info(f"打开登录页面: {login_url}")

        wait = WebDriverWait(driver, 30)
        # 等待用户名输入框出现
        username_field = wait.until(
            EC.visibility_of_element_located((By.NAME, 'username'))
        )
        logging.info("找到用户名输入框")

        # 输入用户名和密码
        username_field.send_keys(username)
        logging.info(f"输入用户名: {username}")

        password_field = driver.find_element(By.NAME, 'password')
        password_field.send_keys(password)
        logging.info("输入密码")

        # 点击登录按钮
        login_button = driver.find_element(By.XPATH, '//button[contains(@class, "loginBtn")]')
        login_button.click()
        logging.info("点击登录按钮")

        # 等待URL变化,确认登录成功
        wait.until(EC.url_changes(login_url))
        current_url = driver.current_url
        logging.info(f"当前URL: {current_url}")

        # 根据需要调整登录后的跳转逻辑
        target_home_url = "https://www.huiparts.com/home"  # 根据实际情况修改
        if "home" not in current_url:
            driver.get(target_home_url)
            logging.info(f"导航到主页: {target_home_url}")
            wait.until(EC.url_contains("home"))
            logging.info("已导航到主页")

        time.sleep(5)  # 确保页面完全加载

    except Exception as e:
        logging.error(f"登录过程中出现异常: {e}")
        driver.quit()
        raise e

def load_target_urls(input_excel='公司链接.xlsx', sheet_name='Sheet1', url_column=1):
    """
    从Excel文件中读取目标URL列表
    """
    if not os.path.exists(input_excel):
        logging.error(f"输入的Excel文件不存在: {input_excel}")
        return []

    wb = openpyxl.load_workbook(input_excel)
    if sheet_name not in wb.sheetnames:
        logging.error(f"指定的工作表 '{sheet_name}' 不存在")
        return []
    ws = wb[sheet_name]

    urls = []
    # 假设第一行为标题,从第二行开始读取
    for row in ws.iter_rows(min_row=2, min_col=url_column, max_col=url_column, values_only=True):
        url = row[0]
        if url and isinstance(url, str):
            urls.append(url.strip())
        else:
            logging.warning(f"发现无效的URL: {url}")

    logging.info(f"共加载到 {len(urls)} 个URL")
    return urls

def initialize_excel(file_name):
    """
    初始化输出的Excel文件,若不存在则创建并写入表头
    """
    if not os.path.exists(file_name):
        wb = openpyxl.Workbook()
        ws = wb.active
        ws.title = "商户信息"
        # 写入表头
        ws.append(["配件商", "联系人", "手机号", "微信", "QQ", "联系电话", "公司地址", "URL"])
        wb.save(file_name)
        logging.info(f"创建新的Excel文件: {file_name}")
    else:
        logging.info(f"Excel文件已存在: {file_name}")

def append_to_excel(file_name, data):
    """
    将一行数据追加到Excel文件
    """
    try:
        wb = openpyxl.load_workbook(file_name)
        ws = wb.active
        ws.append(data)
        wb.save(file_name)
        logging.info("已追加一行数据到Excel文件")
    except Exception as e:
        logging.error(f"追加数据到Excel文件时出现异常: {e}")

def extract_data(driver, url):
    """
    提取目标URL页面中的所需数据
    """
    wait = WebDriverWait(driver, 30)
    try:
        driver.get(url)
        logging.info(f"打开目标页面: {url}")

        # 增加等待时间以确保页面完全加载
        time.sleep(5)

        # 检查是否存在iframe
        iframes = driver.find_elements(By.TAG_NAME, 'iframe')
        if len(iframes) > 0:
            driver.switch_to.frame(iframes[0])  # 根据实际情况选择正确的iframe
            logging.info("已切换到第一个iframe")
            time.sleep(2)  # 等待iframe内容加载

        # 等待目标元素出现
        com_info_div = wait.until(
            EC.visibility_of_element_located((By.CSS_SELECTOR, 'div.comInfo.none'))
        )
        logging.info("找到 div.comInfo.none 元素")

        # 为了确保正确提取内容,记录当前页面的HTML(可选)
        # with open("current_page.html", "w", encoding='utf-8') as f:
        #     f.write(driver.page_source)

        # 提取所需信息
        com_name = safe_get_text(com_info_div, By.CSS_SELECTOR, 'p.comName > span.name')
        contact_person = safe_get_text(com_info_div, By.XPATH, './/p[contains(text(), "联系人")]/span')
        mobile = safe_get_text(com_info_div, By.XPATH, './/img[@alt="手机号"]/following-sibling::span')
        wechat = safe_get_text(com_info_div, By.XPATH, './/img[@alt="微信"]/following-sibling::span')
        qq = safe_get_text(com_info_div, By.XPATH, './/img[@alt="QQ"]/following-sibling::span')
        phone = safe_get_text(com_info_div, By.XPATH, './/img[@alt="联系电话"]/following-sibling::span')
        company_address = safe_get_text(com_info_div, By.XPATH, './/img[@alt="公司地址"]/following-sibling::span')

        data = [
            com_name,
            contact_person,
            mobile,
            wechat,
            qq,
            phone,
            company_address,
            url
        ]

        logging.info(f"提取的数据: {data}")
        return data

    except Exception as e:
        logging.error(f"在URL {url} 处提取数据时出现异常: {e}")
        # 返回空值或特定标记,以便后续处理
        return [None, None, None, None, None, None, None, url]

def safe_get_text(parent, by, locator):
    """
    安全地获取元素的文本内容,若元素不存在则返回None
    """
    try:
        element = parent.find_element(by, locator)
        text = element.text.strip()
        return text if text else None
    except Exception as e:
        logging.warning(f"无法找到元素 {locator} 或提取文本: {e}")
        return None

def is_url_processed(file_name, url):
    """
    检查URL是否已在输出Excel中处理过
    """
    if not os.path.exists(file_name):
        return False
    try:
        wb = openpyxl.load_workbook(file_name)
        ws = wb.active
        for row in ws.iter_rows(min_row=2, values_only=True):
            if row and row[-1] == url:
                return True
        return False
    except Exception as e:
        logging.error(f"检查URL是否已处理时出现异常: {e}")
        return False

def main():
    # 用户名和密码,建议使用环境变量或配置文件存储
    USERNAME = 'zzzz'      # 替换为您的用户名
    PASSWORD = 'zzzz'        # 替换为您的密码

    # 输入和输出的Excel文件名
    input_excel = "公司链接.xlsx"   # 包含URL的输入文件
    sheet_name = "Sheet1"           # 输入文件中包含URL的工作表名称
    url_column = 1                  # URL所在的列(1表示A列)
    output_excel = "商户信息.xlsx"  # 输出文件

    # 初始化输出Excel文件
    initialize_excel(output_excel)

    # 设置WebDriver并登录
    driver = setup_webdriver(headless=False)  # 设置为 False 显示浏览器,便于调试
    try:
        login(driver, USERNAME, PASSWORD)

        # 加载目标URL列表
        target_urls = load_target_urls(input_excel=input_excel, sheet_name=sheet_name, url_column=url_column)
        logging.info(f"需要爬取的URL数量: {len(target_urls)}")

        for idx, url in enumerate(target_urls, start=1):
            print(f"\n开始处理第 {idx} 个URL: {url}")
            logging.info(f"开始处理第 {idx} 个URL: {url}")

            if is_url_processed(output_excel, url):
                logging.info(f"URL已处理,跳过: {url}")
                continue

            data = extract_data(driver, url)
            append_to_excel(output_excel, data)
            # 可选:添加延时,避免过快访问导致被封禁
            time.sleep(2)

    except Exception as e:
        logging.error(f"主程序出现异常: {e}")

    finally:
        # 截图页面,帮助调试
        try:
            driver.save_screenshot("final_screenshot.png")
            logging.info("页面截图已保存到 'final_screenshot.png'")
        except Exception as e:
            logging.error(f"保存截图时出现异常: {e}")

        # 保存页面源代码
        try:
            with open("final_page_source.html", "w", encoding='utf-8') as f:
                f.write(driver.page_source)
            logging.info("页面源代码已保存到 'final_page_source.html'")
        except Exception as e:
            logging.error(f"保存页面源代码时出现异常: {e}")

        # 关闭WebDriver
        driver.quit()
        logging.info("WebDriver已关闭")

if __name__ == "__main__":
    main()

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

.Diamond.

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值