python爬取百度百科保存scv

import urllib
import io
import sys
import requests
from bs4 import BeautifulSoup
from lxml import etree

# 解码gbk
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')
# 输入你要查询的百度百科标签,进行解码
book_type = input('请输入你要查询的百度百科的标签:')
# 文字解码得到部分url
url_decode = urllib.parse.quote(book_type)
# 打印文字解码内容
print("输出文字的编码:", url_decode)
# 百度百科url+解码出来的=你要查询的url
url = "https://baike.baidu.com/item/%s" % url_decode
print("百度百科链接:", url)
# 请求头
headers = {
    'cookie': 'zhishiTopicRequestTime=1627564866290; BIDUPSID=5C268C7FF7FB8C56D0FA1A757DDDD769; PSTM=1611022176; __yjs_duid=1_119e156c8ecbbe19504605adca52a48e1619009500707; BDUSS=G1qYVpHdDlwLXNDR0hHUDhGfkM5bjhkd34wU0hvQ1VWTERHTDNhdTlyVDdjc2xnSVFBQUFBJCQAAAAAAAAAAAEAAAA46re8x~HRq8zO0KHLp7jnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPvloWD75aFgU; BDUSS_BFESS=G1qYVpHdDlwLXNDR0hHUDhGfkM5bjhkd34wU0hvQ1VWTERHTDNhdTlyVDdjc2xnSVFBQUFBJCQAAAAAAAAAAAEAAAA46re8x~HRq8zO0KHLp7jnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPvloWD75aFgU; BAIDUID=D8D2F36DE37F03FEBCDB8F84B786B882:FG=1; MCITY=-%3A; BDSFRCVID_BFESS=mXCOJexroG0Y0N7eIvOtqMNb-gKK0gOTDYLEOwXPsp3LGJLVgK9uEG0Pt_U-mEt-J8jwogKKL2OTHmIF_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF_BFESS=tbkD_C-MfIvDqTrP-trf5DCShUFsKxjRB2Q-XPoO3KJZhhu6bfr-eMKLXtTae4biWbRM2MbgylRp8P3y0bb2DUA1y4vpKMP8bmTxoUJ2XMKVDq5mqfCWMR-ebPRiWTj9QgbLalQ7tt5W8ncFbT7l5hKpbt-q0x-jLTnhVn0MBCK0hI0ljj82e5PVKgTa54cbb4o2WbCQ2P3m8pcN2b5oQT84D-IfKxcqfn6D2lrtHR3vOIJTXpOUWfAkXpJvQnJjt2JxaqRC5hkBfq5jDh3MBpQDhtoJexIO2jvy0hvcBIocShnzyfjrDRLbXU6BK5vPbNcZ0l8K3l02V-bIe-t2XjQhDNtDt60jfn3aQ5rtKRTffjrnhPF33--PXP6-hnjy3bRkopu55lT-8pvlXfTDqt-UyN3MWh3RymJ42-39LPO2hpRjyxv4bUn-5toxJpOJXaILWl52HlFWj43vbURvX5Dg3-7LBx5dtjTO2bc_5KnlfMQ_bf--QfbQ0hOhqP-jBRIEoK0hJC-2bKvPKITD-tFO5eT22-usBJ6d2hcHMPoosIJXDMIbbfFBXprDWx6aHCviaKJjBMbUoqRHXnJi0btQDPvxBf7p5208Ll5TtUJM_UKzhfoMqfTbMlJyKMniBnr9-pnEWlQrh459XP68bTkA5bjZKxtq3mkjbPbDfn028DKuDTtajj3QeaRabK6aKC5bL6rJabC3DfnJXU6q2bDeQN3kJUCf5m5q2-bhBlrDKJ3oyT3JXp0vWtv4WbbvLT7johRTWqR4epkw5fonDh83Bn_L2xQJHmLOBt3O5hvv8KoO3M7VyfKmDloOW-TB5bbPLUQF5l8-sq0x0bOte-bQXH_E5bj2qRIO_CIK3f; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=34299_34099_33971_34333_31254_34330_34004_34093_34094_26350_34288; BAIDUID_BFESS=D8D2F36DE37F03FEBCDB8F84B786B882:FG=1; BDRCVFR[feWj1Vr5u3D]=mk3SLVN4HKm; delPer=0; PSINO=3; SL_GWPT_Show_Hide_tmp=1; SL_wptGlobTipTmp=1; BA_HECTOR=2hak01a180800405051gg5apu0r; Hm_lvt_55b574651fcae74b0a9f1cf9c8d7c93a=1625184028,1627564747,1627564757,1627564866; zhishiTopicRequestTime=1627564875049; BK_SEARCHLOG=%7B%22key%22%3A%5B%22%E9%B9%BF%E6%99%97%22%2C%22%E5%A5%A5%E8%BF%90%E4%BC%9A%22%2C%22%E9%82%B1%E5%8B%8B%E6%B6%9B%22%2C%22%E4%BA%8E%E6%88%91%E4%B8%8E%E4%BD%A0%22%2C%22%E6%89%AC%E5%B7%9E%E7%98%A6%E8%A5%BF%E6%B9%96%22%2C%22%E6%96%BD%E5%90%AF%E6%96%8C%22%2C%22%E6%96%B9%E9%92%BB%22%2C%22%E5%86%89%E5%86%89%22%5D%7D; Hm_lpvt_55b574651fcae74b0a9f1cf9c8d7c93a=1627565520; ab_sr=1.0.1_YTIwMWIxZDY2ODgwZDBiZDVlZTg3MzQwYmExM2RlNzZjNzY2NWI1ZDgyNTU5ZTBmZTBhMmQyYzU0MDE1MDg1NjY2NGU0NTU3NzQ4NzkwMTdiOWFlN2M4MTMzMWVhODE1MDU0N2JlODVhNGZhOWJlZDMwZmEzMzFkMmNhMmRkZThmNjI0MWFiOWRmZDI3ZjFjN2M4ZWZkMzM5YTcxZjUzNA==',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.106 Safari/537.36'}

r = requests.get(url=url, headers=headers).text
# 打印状态码
# print(r)
# 打印页面内容
html = etree.HTML(r.encode('utf-8'))
# print(html)
# XPath使用路径表达式来选取
# xpath取到百度百科第一部分内容
jianjie = html.xpath("//div[@class='lemma-summary']/div[@class='para']//text()")
# 去掉\n
remove_empty2 = [item.strip('\n') for item in jianjie]
# 去掉\xa0
remove_xa0 = [item.strip('\xa0') for item in remove_empty2]
# print(remove_xa0)
# 直接把列表变成字符串
b = " ".join(remove_xa0)
print(b)
# xpath取到的第二部分内容
luhan = html.xpath("//div[@class='main_tab main_tab-defaultTab curTab']//text()")
# 去掉\n
remove_empty1 = [item.strip('\n') for item in luhan]
# 去掉\xa0
remove_xa01 = [item.strip('\xa0') for item in remove_empty1]
# nbsp=[item.strip('   ') for item in remove_xa01]
# 直接把列表变成字符串
a = " ".join(remove_xa01)
# print(a)
c = b + a
# print(c)
d = BeautifulSoup(c, 'lxml')
ps = d.text
print(ps)
# 保存csv
with open('%s.csv' % book_type, 'a', encoding='utf-8') as f:
    f.write(str(ps))

注释很详细 我就不多讲了

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值