Qt在linux下实现kafka客户端开发(三)

一.  创建消费者:

// ConsumerKafka.h
#ifndef CONSUMERKAFKA_H
#define CONSUMERKAFKA_H

#include <Qt>
#include "lib/rdkafkacpp.h"
using std::string;

//class THTFKafkaCpp : public

#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <csignal>
#include <cstring>
#include <getopt.h>
#include <list>
#include <vector>
#include<fstream>

using std::string;
using std::list;
using std::cout;
using std::endl;
using std::vector;
using std::fstream;

static bool run = true;
static bool exit_eof = true;

// 从kafka消费消息存到msg_data数组


struct protodata
{
    uint64_t uuid;
    uint64_t position;
    uint64_t next_position;
    string gtid;
};

static vector<protodata> fulltopic;



class MyEventCb : public RdKafka::EventCb {
public:
  void event_cb (RdKafka::Event &event) {
    switch (event.type())
      {
      case RdKafka::Event::EVENT_ERROR:
        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
          event.str() << std::endl;
        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
          run = false;
        break;

      case RdKafka::Event::EVENT_STATS:
        std::cerr << "\"STATS\": " << event.str() << std::endl;
        break;

      case RdKafka::Event::EVENT_LOG:
        fprintf(stderr, "LOG-%i-%s: %s\n",
                event.severity(), event.fac().c_str(), event.str().c_str());
        break;

      default:
        std::cerr << "EVENT " << event.type() <<
          " (" << RdKafka::err2str(event.err()) << "): " <<
          event.str() << std::endl;
        break;
      }
  }
};


void msg_consume(RdKafka::Message* message, void* opaque) ;


class MyConsumeCb : public RdKafka::ConsumeCb {
public:
  void consume_cb (RdKafka::Message &msg, void *opaque) {
    msg_consume(&msg, opaque);
  }
};

static void sigterm (int sig) {
  run = false;
}

class ConsumerKafka
{
public:
    ConsumerKafka();
    ~ConsumerKafka(){}

    bool LoadCppLiberary(const QString& dllName);
    int InitKafka(int _partition, string broker, string _topic);

    int init_kafka(int partition, string brokers, string topic);
    int pull_data_from_kafka();
    void pull_data_stop();
    void destroy();

private:
    RdKafka::Conf * global_conf;
    RdKafka::Conf * topic_conf;
    string brokers;
    string errstr;
    RdKafka::Consumer *consumer;
    string topic_name ;
    RdKafka::Topic *topic;
    int32_t partition;
    int64_t start_offset;
    RdKafka::Message *msg;

    bool m_bRun = false;
};

#endif // CONSUMERKAFKA_H


	// ConsumerKafka.cpp
   
#include "ConsumerKafka.h"

#include <QLibrary>
#include <QDebug>
#include <QElapsedTimer>
#include <QCoreApplication>

bool g_bLoadedCppLib = false;

//typedef Conf *(*rd_conf_create_decl) (Conf::ConfType type);
ConsumerKafka::ConsumerKafka()
{

}

bool ConsumerKafka::LoadCppLiberary(const QString& dllName)
{
    QLibrary lib(dllName);
    if( !lib.load() )
    {
        //qDebug() << "加载dll失败" + dllName;
        qDebug() << QString("加载dll失败") + dllName + "\n error:" + lib.errorString();
        return false;
    }
    else
    {
        qDebug() << QString("加载dll成功!") + dllName;
        g_bLoadedCppLib = true;
        //rd_conf_create_decl thtf_conf_create = (rd_conf_create_decl)lib.resolve("Conf::create");
        //global_conf = thtf_conf_create(RdKafka::Conf::CONF_GLOBAL);
        //global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
    }
    return true;
}


int ConsumerKafka::InitKafka(int _partition, string broker, string _topic)
{
    if( !g_bLoadedCppLib )
        return -1;
    //global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
    //topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);

    return 0;
}

int ConsumerKafka::init_kafka(int _partition, string broker, string _topic)
{
      global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
      topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);

      brokers = broker;
      partition = _partition;
      topic_name = _topic;
      start_offset = RdKafka::Topic::OFFSET_BEGINNING;
      global_conf->set("metadata.broker.list", brokers, errstr);

      MyEventCb ex_event_cb;
      global_conf->set("event_cb", &ex_event_cb, errstr);


      /*
        * Create consumer using accumulated global configuration.
        */
        consumer = RdKafka::Consumer::create(global_conf, errstr);
       if (!consumer) {
         std::cerr << "Failed to create consumer: " << errstr << std::endl;
         exit(1);
       }
         /* Create topic */
      topic = RdKafka::Topic::create(consumer, topic_name, topic_conf, errstr);
      if (!topic) {
        std::cerr << "Failed to create topic: " << errstr << std::endl;
        exit(1);
      }
}

void ConsumerKafka::destroy()
{
      consumer->stop(topic, partition);
      consumer->poll(1000);

      delete topic;
      delete consumer;
}

int ConsumerKafka::pull_data_from_kafka()
{
    RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset);
    if (resp != RdKafka::ERR_NO_ERROR) {
        std::cerr << "Failed to start consumer: " <<
          RdKafka::err2str(resp) << std::endl;
        exit(1);
      }

    /*
       * Consume messages
       */

      MyConsumeCb ex_consume_cb;
      int use_ccb = 0;

      m_bRun = true;
      QElapsedTimer t;
      t.start();
      //qDebug() << t.elapsed() << endl;
      while(m_bRun)
      {
          QCoreApplication::processEvents();
          if (use_ccb) {
    //        consumer->consume_callback(topic, partition, 1000,
    //                                   &ex_consume_cb, &use_ccb);
          } else {
            RdKafka::Message *msg = consumer->consume(topic, partition, 1000);
            msg_consume(msg, NULL);
            delete msg;
          }
          consumer->poll(0);
      }
}

void ConsumerKafka::pull_data_stop()
{
    m_bRun = false;
}

void msg_consume(RdKafka::Message* message, void* opaque) {
  switch (message->err()) {
  case RdKafka::ERR__TIMED_OUT:
    break;

  case RdKafka::ERR_NO_ERROR:
    /* Real message */
    std::cout << "Read msg at offset " << message->offset() << std::endl;
    if (message->key()) {
      std::cout << "Key: " << *message->key() << std::endl;
    }
    cout << static_cast<const char *>(message->payload()) << endl;
    break;

  case RdKafka::ERR__PARTITION_EOF:
    cout << "reach last message" << endl;
    /* Last message */
    if (exit_eof) {
      run = false;
    }
    break;

  case RdKafka::ERR__UNKNOWN_TOPIC:
  case RdKafka::ERR__UNKNOWN_PARTITION:
    std::cerr << "Consume failed: " << message->errstr() << std::endl;
    run = false;
    break;

  default:
    /* Errors */
    std::cerr << "Consume failed: " << message->errstr() << std::endl;
    run = false;
  }
}

//调用运行
int main(int argc, char **argv) {  
  // Process kill signal, quit from the loop  
  
  signal(SIGINT, sigterm);  
  signal(SIGTERM, sigterm);  
  
   ConsummerKafka test;  
   test.init_kafka(0, "localhost", "Hello-Kafka");  
   test.pull_data_from_kafka();  
  
}  

参考文档: https://blog.csdn.net/sinat_25929227/article/details/73614367




  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Kafka 客户端工具是一种用于与 Kafka 消息队列进行通信和交互的工具。它提供了一系列命令行接口 (CLI) 和 API,可用于在 Kafka 集群中发布、订阅和使用消息。 Kafka 客户端工具具有以下主要功能: 1. 发布消息:可以使用 Kafka 客户端工具将消息发布到 Kafka 集群中的指定主题。可以通过命令行或程序化方式指定消息的内容、主题和其他属性。 2. 订阅消息:可以使用 Kafka 客户端工具从 Kafka 集群中的指定主题订阅消息。可以通过指定消费者组、分区和其他属性来灵活控制订阅行为。 3. 检查主题和分区:可以使用 Kafka 客户端工具查看 Kafka 集群中的所有主题和相应的分区信息。可以检查每个分区的偏移量、副本分布和其他有关分区的元数据。 4. 控制消费者组:可以使用 Kafka 客户端工具管理消费者组。可以列出当前活动的消费者组、查看组内消费者的偏移量以及重置偏移量等操作。 5. 监控和性能测试:Kafka 客户端工具还提供了一些监控工具和性能测试工具,用于监测和调优 Kafka 集群的性能。可以使用这些工具检查消息的产生和消费速率、分区偏移量的变化等。 总之,Kafka 客户端工具是一种便捷的工具,可以帮助开发人员和管理员与 Kafka 集群进行交互,并用于操作和管理消息的发布和订阅。无论是通过命令行还是API,它们都为处理 Kafka 数据流提供了强大的功能和灵活性。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值