【廖雪峰】python爬虫--------学习记录day07(bs4)

from bs4 import BeautifulSoup

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""

# 1、转类型
# 默认bs4会 调用你系统中lxml的解析库 警告提示
# 主动设置 bs4的解析库 ---'lxml'
soup = BeautifulSoup(html_doc,'lxml')

# 格式化输出 补全
# result = soup.prettify()
# print(result)

# 2、解析数据
'''
四大对象(type(result))
Tag ----  soup.head
NavigableString -----  soup.a.string
BeautifulSoup -----   soup = BeautifulSoup(html_doc,'lxml')
Comment -----注释内容
'''

# 取标签对象 每次只能取一个
result = soup.head
result = soup.p
# 取内容 每次只能取一个
result = soup.a.string
# 取属性 每次只能取一个
result = soup.a['href']
print(result)
from bs4 import BeautifulSoup

html_doc = """
<html><head>
<title id="one">The Dormouse's story</title>
</head>
<body>
<p class="story"><!--...--></p>
<p class="title">
    p标签的内容
    <b>The Dormouse's story</b>
</p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>


"""

# 1.转类型 bs4.BeautifulSoup'
soup = BeautifulSoup(html_doc, 'lxml')
# 2、通用解析方法
#  find--返回符合查询条件的 第一个标签对象
result = soup.find(name="a")
result = soup.find(attrs={"class":"title"})
result = soup.find(text="Tillie")
result = soup.find(
    name = "p",
    attrs = {"class":"story"}
)
#  find_all--list(标签对象)
result = soup.find_all("a")
result = soup.find_all("a", limit=1)[0] #输出与find结果一样
result = soup.find_all(attrs={"class": "sister"})

#  select_one ---css选择器
result = soup.select_one('.sister')

#  select----css选择器-----list
result = soup.select(".sister")
result = soup.select("#one")
result = soup.select("head title")
result = soup.select('title,.title')
result = soup.select('a[id="link3"]')

# 标签包裹的内容---list
result = soup.select('.title')[0].get_text()

# 标签的属性
result = soup.select('#link1')[0].get('href')
print(result)
import requests
from bs4 import BeautifulSoup
from lxml import etree
import json

class BtcSpider(object):
    def __init__(self):
        self.url = 'https://www.chainnode.com/forum/61-{}'
        self.headers = {
            "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0'
        }
        # 保存列表页的数据
        self.data_list = []
        self.data_detail = []

    # 1、发请求
    def get_response(self,url):
        response = requests.get(url,headers=self.headers)
        data = response.content.decode()
        return data
    # 2、解析数据list
    def parse_list_data(self,data):
        # 1、转类型
        soup = BeautifulSoup(data,'lxml')
        # 2、解析内容
        title_list = soup.select("a")
        for title in title_list:
            list_dict_data = {}
            list_dict_data['title'] = title.get_text()
            list_dict_data['detail_url'] = title.get('href')
            self.data_list.append(list_dict_data)
    '''
    # 解析数据详情页
    def parse_detail_data(self,data):
        html_data = BeautifulSoup(data,"lxml")
        # 取出问题
        question = html_data.select(".bbt-editor-text bbt-html")[0].get_text()
        print(question)
        answer = html_data.select(".bbt-editor-text bbt-html")[2].get_text()
        detail_data = {
            "question": question,
            "answer": answer
        }
        self.data_detail.append(detail_data)
    '''
    # 3、保存数据
    def save_data(self,data,file_path):
        data_str = json.dumps(data)
        with open(file_path,'w',encoding='utf-8') as f:
            f.write(data_str)
    def start(self):
        # 列表页的请求
        url = self.url.format(1)
        data = self.get_response(url)
        self.parse_list_data(data)
        self.save_data(self.data_list,'news01.html')
        '''
        # 发送详细页的请求
        for data in self.data_list:
            detail_url = data["detail_url"]
            detail_data = self.get_response(detail_url)

            # 解析详情页的数据
            self.parse_detail_data(detail_data)
        self.save_data(self.data_detail, 'detail.json')
        '''
BtcSpider().start()
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值