import requests
from lxml import etree
def get_web_page(wd, pn):
url = 'https://www.baidu.com/s'
# print(ua)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.62",
'Cookie': 'BAIDUID=C58C4A69E08EF11BEA25E73D71F452FB:FG=1; PSTM=1564970099; BIDUPSID=87DDAF2BDABDA37DCF227077F0A4ADAA; __yjs_duid=1_351e08bd1199f6367d690719fdd523a71622540815502; MAWEBCUID=web_goISnQHdIuXmTRjWmrvZPZVKYQvVAxETmIIzcYfXMnXsObtoEz; MCITY=-%3A; BD_UPN=12314353; BDUSS_BFESS=003VTlGWFZGV0NYZU1FdFBTZnFYMGtPcUs2VUtRSERVTWRNcFM5cmtHaGoyb1ZpRUFBQUFBJCQAAAAAAAAAAAEAAABCyphcYWRkZDgyMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGNNXmJjTV5iT; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=34813_35915_36166_34584_36120_36195_36075_36125_36226_26350_36300_22160_36061; ab_sr=1.0.1_ODllMjlmYmJlNjY5NzBjYTRkN2VlMDU3ZGI5ODJhNzA4YzllOTM3OTAwMWNmZTFlMTQ3ZmY3MmRlNDYyYWZjNTI5MzcwYmE3MDk0NGNkOGFmYThkN2FlMDdlMzA0ZjY0MmViNWIzNjc0ZjhmZWZmZGJmMTA3MGI5ZGM5MDM4NmQ3MWI0ZDUyMDljZWU4ZDExZjA1ZTg5MDYyYmNiNDc4ODFkOTQ2MmYxN2EwYTgwOTFlYTRlZjYzMmYwNzQ0ZDI3; BAIDUID_BFESS=C58C4A69E08EF11BEA25E73D71F452FB:FG=1; delPer=0; BD_CK_SAM=1; PSINO=1; H_PS_645EC=c87aPHArHVd30qt4cjwBEzjR%2BwqcUnQjjApbQetZm98YZVXUtN%2FOXOxNv3A; BA_HECTOR=25a0850k0l8h002kio1h5v7ud0q; baikeVisitId=61a414fd-dde7-41c2-9aa5-aa8044420d33',
'Host': 'www.baidu.com'
}
params = {
'wd': wd,
'pn': pn
}
response = requests.get(url, headers=headers, params=params)
response.encoding = 'utf-8'
# print(response.text)
response = response.text
return response
def parse_page(response):
html = etree.HTML(response)
selectors = html.xpath('//div[@class="c-container"]')
data = []
nub = 0
for selector in selectors:
title = "".join(selector.xpath('.//h3/a//text()'))
titleUrl = selector.xpath('.//h3/a/@href')[0]
print(title)
print(titleUrl)
nub += 1
data.append([title, titleUrl])
print(f"当前页一共有{nub}条标题和网址的信息!")
return data
def save_data(datas, kw, page):
for data in datas:
with open(f'./百度{kw}的第{page}页的数据(xpath).csv', 'a', encoding='utf-8') as fp:
fp.write(str(data) + '\n')
print(f"百度{kw}的第{page}页的数据已经成功保存!")
def main():
kw = input("请输入要查询的关键词:").strip()
page = input("请输入要查询的页码:").strip()
page_pn = int(page)
page_pn = str(page_pn * 10 - 10)
resp = get_web_page(kw, page_pn)
datas = parse_page(resp)
save_data(datas, kw, page)
if __name__ == '__main__':
main()
python爬取百度搜索内容链接
最新推荐文章于 2024-07-29 21:31:04 发布