2.5 爬虫案例2

一、解析标题数据list

1、xpath解析
x_data = etree.HTML(data)
title_list = x_data.xpath('//a[@class="link-dark-major font-bold bbt-block"]/text()')
url_list = x_data.xpath('//a[@class="link-dark-major font-bold bbt-block"]/@href')
封装成列表
data_list = []
for index, title in enumerate(title_list):
    news = {}
    news['name'] = title
    news['url'] = url_list[index]
    data_list.append(news)
[
    {
        "title":" Bitxce交易所周年庆,狂撒2000万USDT
",
        "detail_url":"/post/365325"
    },
    {
        "title":" 【扬帆起航】社区新人必看及好贴索引-2019.2.21更新
",
        "detail_url":"/post/15508"
    },
    {
        "title":" 首个币圈相亲群在组CP,再不报名,小姐姐都被抢走了!
",
        "detail_url":"/post/366360"}
]

 url为 /post/365325,不能后续

2、bs4解析

soup = BeautifulSoup(data, 'lxml')
title_list = soup.select('.bbt-block')

data_list = [ ]
 for title in title_list:
            list_dict_data = {}
            list_dict_data['title'] = title.get_text()
            list_dict_data['detail_url'] = title.get('href')
            data_list.append(list_dict_data)

二、解析详细页数据

1、标题解析

 

 

2、回答解析

answer = soup.select('.comment__content ')

answer_list = []
for i in answer:
    answer_list.append(i.get_text())
print(answer_list)
# 3.解析数据详情页
    def parse_detail_data(self, data):
        html_data = BeautifulSoup(data, 'lxml')

        # 取出问题--list[1][0]
        question = html_data.select('#thread_subject')[0].get_text()
        print(question)
        answer_list = html_data.select('.t_f')
        for answer in answer_list:
            answer_list = []
            answer_list.append(answer.get_text())

        detail_data = {
            "question": question,
            "answer": answer_list
        }

        self.data_detail.append(detail_data)

 

三、连接

    def start(self):
        # 列表页的请求
        for i in range(1, 2):
            url = self.url.format(1)
            data = self.get_response(url)
            print(data)
            self.parse_list_data(data)
        self.save_data(self.data_list, "04list.json")

        # 发送详情页的请求
        for data in self.data_list:
            # detail_url = data['detail_url']
            
            base_url = 'https://www.chainnode.com'
            detail_url =  base_url + data['detail_url']

            detail_data = self.get_response(detail_url)

            # 解析详情页的数据
            self.parse_detail_data(detail_data)

        self.save_data(self.data_detail, 'detail1.json')

 

四、完整代码

import requests
from bs4 import BeautifulSoup
from lxml import etree
import json


class BtcSpider(object):
    def __init__(self):
        self.url = 'http://8btc.com/forum-61-{}.html'
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"}

        # 保存列表页的数据
        self.data_list = []
        self.data_detail = []

    # 1.发请求
    def get_response(self, url):
        response = requests.get(url, headers=self.headers)
        data = response.content
        return data

    # 2.解析数据list
    def parse_list_data(self, data):

        # 1.转类型
        soup = BeautifulSoup(data, 'lxml')
        # 2.解析内容 取出 所有的类选择器的 A
        title_list = soup.select('.bbt-block')
        for title in title_list:
            list_dict_data = {}
            list_dict_data['title'] = title.get_text()
            list_dict_data['detail_url'] = title.get('href')
            self.data_list.append(list_dict_data)

    # 3.解析数据详情页
    def parse_detail_data(self, data):
        html_data = BeautifulSoup(data, 'lxml')

        # 取出问题--list[1][0]
        question = html_data.select('#thread_subject')[0].get_text()
        print(question)
        answer_list = html_data.select('.t_f')
        for answer in answer_list:
            answer_list = []
            answer_list.append(answer.get_text())

        detail_data = {
            "question": question,
            "answer": answer_list
        }

        self.data_detail.append(detail_data)

    # 3.保存数据
    def save_data(self, data, file_path):
        data_str = json.dumps(data)
        with open(file_path, 'w') as f:
            f.write(data_str)

    def start(self):
        # 列表页的请求
        for i in range(1, 2):
            url = self.url.format(1)
            data = self.get_response(url)
            print(data)
            self.parse_list_data(data)
        self.save_data(self.data_list, "04list.json")
        #
        # # 发送详情页的请求
        # for data in self.data_list:
        #     detail_url = data['detail_url']
        #     detail_data = self.get_response(detail_url)
        #
        #     # 解析详情页的数据
        #     self.parse_detail_data(detail_data)
        #
        # self.save_data(self.data_detail, 'detail1.json')


BtcSpider().start()

"""
html_data = etree.HTML(data)

        result_list = html_data.xpath('//div[contains(@id,"stickthread")]')
        result_list = html_data.xpath('//head/following-sibling::*[1]')
        print(len(result_list))
        print(result_list)
"""

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值