百度搜索爬虫爬取网页链接API 2023.4.28

import requests
from bs4 import BeautifulSoup
import os


filename = "config.txt"
if not os.path.exists(filename):  # 检测同目录下是否有config.txt文件
    with open(filename, "w", encoding="utf-8") as f:
        f.write("###爬取的百度搜索关键字为:\npython\n###爬取的百度搜索页数为 [ 从 0开始,填 0 就是第一页]\n0\n###欢迎使用画中云Joy的爬虫小程序")


headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    # "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "Cookie": "BIDUPSID=F5F53404896484619083727D20E476F8; PSTM=1682631299; BAIDUID=F5F5340489648461083E51C38DBB41E4:FG=1; delPer=0; BD_CK_SAM=1; BD_UPN=12314753; BA_HECTOR=al0g8g0l812h0h058g04a4251i4lqk51n; BAIDUID_BFESS=F5F5340489648461083E51C38DBB41E4:FG=1; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; ZFY=CyBq:BF0EQcgwzKHU3:BSkrQSNaRfM3RzeSzkw2HWWU9U:C; H_PS_PSSID=38516_36552_38529_38470_38468_38485_37933_26350_38545; B64_BOT=1; ZD_ENTRY=baidu; __bid_n=187c4b0bf1552fbe904207; ab_sr=1.0.1_NzhkYjk1NDU2Mzg0ZWY4ZWU2ZDE3ZjY3YTU2Mzg0YmU4MWQ1NzU0ZDJiMTI5Y2VjMzg4YTNmMWFmZDk2YjBkMTZlNjk1MThmZDBmOGQ5ZWVkZmI1MTViYjE2ODQ4YjQyYzZkYmQyMjVhZmZlNGZiYjMzMDQ1MDc0NjQ3MzdhNTgzZjA3MGExNzRhYjNkZGIwMjlhZTcyMzUwNzhmMTRmMg==; FPTOKEN=pTYDJtDLqZgzHFhac1EFGONPeHZngXDGWXeoDZwq/dXystICu8R2PR0FD2qpHV6QwwCTpeGxpcF4u6alLx8VN0sNDhd0UGVjBzR7FA3iRd+aU0KYvFxx2bBFBA/tRX9//P05sGn1T3IaKricJN+k+S3dSgmNSqTi7stegk1/rdub/NpFnXGwAI0zyjExpoLqF8si2alR0qwMnGzxfDnZbDHK54904TbcZ2WBUR8pxEaSiHRkZxOvmBQ5EUW5O0nKVgfi/Or05ZuW1jAN8eLdrNAF9N2Z9pk7RCKx+GpJAsdkmV72uCQ+muGRRwJs/02YmAuacHVAkO/feSGCgdOHFiSpW8CHu7OoyKXZ0u3cOOnjNsG0UF5S7olaB7o01QvMp8EUaffNvsakWQ/qsCCKGw==|HqkxH5/QuUvVA6TxjKDBI2T8PRFQ0IzdtgtW+HJNg2s=|10|80a76d2820674f4fa9d7953ebd5645a6; BD_HOME=1; PSINO=7; H_PS_645EC=0b06c0sWBMMx7bAHRyGClmtHzu4HYlBGPcimiXoGt6H6iPs1lgxw8ARgyzI; BDSVRTM=207; baikeVisitId=fe929bf2-16a1-467d-98cf-6b06990bebac; COOKIE_SESSION=874_0_1_2_0_1_1_0_1_1_1_0_0_0_0_0_0_0_1682632174%7C2%230_0_1682632174%7C1",
    # "Host": "www.baidu.com",
    "sec-ch-ua": '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": '"Windows"',
    # "Sec-Fetch-Dest": "document",
    # "Sec-Fetch-Mode": "navigate",
    # "Sec-Fetch-Site": "same-origin",
    # "Sec-Fetch-User": "?1",
    # "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
}

with open("config.txt", "r", encoding="utf-8") as f:
    lines = f.readlines()
    wd = lines[1].strip()    # 读取第一行,并去除行末的换行符
    pn = lines[3].strip()    # 读取第二行,并去除行末的换行符
url = f"https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd={wd}&fenlei=256&rsv_pq=0x98fc140b000009ee&rsv_t=3841BUy9uZg1LjCF0hDjGKw8K6gP15Br8yJb8io4c2iNTfXQx44FUqCcHYxT&rqlang=en&rsv_enter=1&rsv_dl=tb&rsv_sug3=6&rsv_sug1=6&rsv_sug7=100&rsv_sug2=0&rsv_btype=i&prefixsug=python&rsp=5&inputT=2082&rsv_sug4=2081"

params = {
    "wd": wd,
    "pn": pn,
}

response = requests.get(url, headers=headers, params=params)
soup = BeautifulSoup(response.text, "html.parser")
results = soup.find_all("div", class_="result")

with open(f"{wd}_results.txt", "w", encoding="utf-8") as f:
    for result in results:
        h3 = result.find("h3")
        a = h3.find("a")
        title = a.get_text()
        link = a["href"]
        f.write(f"{title}\n{link}\n\n")

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值