c++ kafka环境配置

本文详细介绍了在Ubuntu环境中,如何配置Java环境、安装Kafka以及启动Zookeeper和Kafka服务。接着,讲解了创建和管理Kafka的topic,以及如何创建Producer和Consumer进行消息收发。此外,还展示了如何在C++中使用Qt和librdkafka库实现消息的生产和消费。通过示例代码,读者可以了解到具体的操作步骤和关键函数的用法。
摘要由CSDN通过智能技术生成

本文主要介绍C++ Qt Ubuntu环境下卡夫卡的环境配置和基本收发,整理自@草上爬大神。原贴:https://blog.csdn.net/caoshangpa/article/details/79786100
非常感谢原作者。

kafka依赖安装

卡夫卡需要java才能运行,可安装openjdk11

sudo apt install openjdk-11-jdk

检查版本

java -version

kafka安装和试运行

首先下载kafkahttp://kafka.apache.org/downloads,选择scala那个。
解压即可。
卡夫卡的完整链路需要zookeeper,kafka服务,生产者,消费者四个模块。
进入解压好的目录,打开终端,输入以下指令。
开启zookeeper:

bin/zookeeper-server-start.sh config/zookeeper.properties

开启kafka服务:

bin/kafka-server-start.sh config/server.properties

kafka是订阅发布制的通信系统,所以需要topic来进行管理。创建一个名为test的topic:

bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test

查看topic

bin/kafka-topics.sh --list --zookeeper localhost:2181

创建producer:

bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test

创建consumer:

bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning

接下来,在producer终端输入信息,consumer就可以收到啦。

librdkafka安装

首先克隆librdkafka项目:

git clone https://github.com/edenhill/librdkafka.git

接下来安装:

cd librdkafka/
./configure 
make
sudo make install
-lrdkafka++

librdkafka试运行

新建一个QT工程,在.pro文件引入librdkafka:

INCLUDEPATH += /usr/local/include/librdkafka
LIBS += -L/usr/local/lib -lrdkafka
LIBS += -L/usr/local/lib -lrdkafka++

生产者源文件如下:

#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <csignal>
#include <cstring>
 
#include <getopt.h>
 
#include "rdkafkacpp.h"
 
static bool run = true;
 
static void sigterm (int sig) {
    run = false;
}
 
class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
public:
    void dr_cb (RdKafka::Message &message) {
        std::cout << "Message delivery for (" << message.len() << " bytes): " <<
                     message.errstr() << std::endl;
        if (message.key())
            std::cout << "Key: " << *(message.key()) << ";" << std::endl;
    }
};
 
class ExampleEventCb : public RdKafka::EventCb {
public:
    void event_cb (RdKafka::Event &event) {
        switch (event.type())
        {
        case RdKafka::Event::EVENT_ERROR:
            std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
                         event.str() << std::endl;
            if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
                run = false;
            break;
 
        case RdKafka::Event::EVENT_STATS:
            std::cerr << "\"STATS\": " << event.str() << std::endl;
            break;
 
        case RdKafka::Event::EVENT_LOG:
            fprintf(stderr, "LOG-%i-%s: %s\n",
                    event.severity(), event.fac().c_str(), event.str().c_str());
            break;
 
        default:
            std::cerr << "EVENT " << event.type() <<
                         " (" << RdKafka::err2str(event.err()) << "): " <<
                         event.str() << std::endl;
            break;
        }
    }
};
 
int main ()
{
    std::string brokers = "localhost";
    std::string errstr;
    std::string topic_str="test";
    int32_t partition = RdKafka::Topic::PARTITION_UA;
 
    RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
    RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
 
    conf->set("bootstrap.servers", brokers, errstr);
 
    ExampleEventCb ex_event_cb;
    conf->set("event_cb", &ex_event_cb, errstr);
 
    signal(SIGINT, sigterm);
    signal(SIGTERM, sigterm);
 
    ExampleDeliveryReportCb ex_dr_cb;
    conf->set("dr_cb", &ex_dr_cb, errstr);
 
    RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
    if (!producer) {
        std::cerr << "Failed to create producer: " << errstr << std::endl;
        exit(1);
    }
    std::cout << "% Created producer " << producer->name() << std::endl;
 
    RdKafka::Topic *topic = RdKafka::Topic::create(producer, topic_str,
                                                   tconf, errstr);
    if (!topic) {
        std::cerr << "Failed to create topic: " << errstr << std::endl;
        exit(1);
    }
 
    for (std::string line; run && std::getline(std::cin, line);) {
        if (line.empty()) {
            producer->poll(0);
            continue;
        }
 
        RdKafka::ErrorCode resp =
                producer->produce(topic, partition,
                                  RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
                                  const_cast<char *>(line.c_str()), line.size(),
                                  NULL, NULL);
        if (resp != RdKafka::ERR_NO_ERROR)
            std::cerr << "% Produce failed: " <<
                         RdKafka::err2str(resp) << std::endl;
        else
            std::cerr << "% Produced message (" << line.size() << " bytes)" <<
                         std::endl;
 
        producer->poll(0);
    }
 
    run = true;
   // 退出前处理完输出队列中的消息
    while (run && producer->outq_len() > 0) {
        std::cerr << "Waiting for " << producer->outq_len() << std::endl;
        producer->poll(1000);
    }
 
    delete conf;
    delete tconf;
    delete topic;
    delete producer;
 
    RdKafka::wait_destroyed(5000);
 
    return 0;
}
 

消费者工程文件同上,源文件如下:

#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <csignal>
#include <cstring>
 
#include <sys/time.h>
#include <getopt.h>
#include <unistd.h>
 
#include "rdkafkacpp.h"
 
static bool run = true;
static bool exit_eof = true;
static int eof_cnt = 0;
static int partition_cnt = 0;
static int verbosity = 1;
static long msg_cnt = 0;
static int64_t msg_bytes = 0;
 
static void sigterm (int sig) {
    run = false;
}
 
class ExampleEventCb : public RdKafka::EventCb {
public:
    void event_cb (RdKafka::Event &event) {
        switch (event.type())
        {
        case RdKafka::Event::EVENT_ERROR:
            std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
                         event.str() << std::endl;
            if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
                run = false;
            break;
 
        case RdKafka::Event::EVENT_STATS:
            std::cerr << "\"STATS\": " << event.str() << std::endl;
            break;
 
        case RdKafka::Event::EVENT_LOG:
            fprintf(stderr, "LOG-%i-%s: %s\n",
                    event.severity(), event.fac().c_str(), event.str().c_str());
            break;
 
        case RdKafka::Event::EVENT_THROTTLE:
            std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " <<
                         event.broker_name() << " id " << (int)event.broker_id() << std::endl;
            break;
 
        default:
            std::cerr << "EVENT " << event.type() <<
                         " (" << RdKafka::err2str(event.err()) << "): " <<
                         event.str() << std::endl;
            break;
        }
    }
};
 
void msg_consume(RdKafka::Message* message, void* opaque) {
    switch (message->err()) {
    case RdKafka::ERR__TIMED_OUT:
        //std::cerr << "RdKafka::ERR__TIMED_OUT"<<std::endl;
        break;
 
    case RdKafka::ERR_NO_ERROR:
        /* Real message */
        msg_cnt++;
        msg_bytes += message->len();
        if (verbosity >= 3)
            std::cerr << "Read msg at offset " << message->offset() << std::endl;
        RdKafka::MessageTimestamp ts;
        ts = message->timestamp();
        if (verbosity >= 2 &&
                ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
            std::string tsname = "?";
            if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
                tsname = "create time";
            else if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME)
                tsname = "log append time";
            std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl;
        }
        if (verbosity >= 2 && message->key()) {
            std::cout << "Key: " << *message->key() << std::endl;
        }
        if (verbosity >= 1) {
            printf("%.*s\n",
                   static_cast<int>(message->len()),
                   static_cast<const char *>(message->payload()));
        }
        break;
 
    case RdKafka::ERR__PARTITION_EOF:
        /* Last message */
        if (exit_eof && ++eof_cnt == partition_cnt) {
            std::cerr << "%% EOF reached for all " << partition_cnt <<
                         " partition(s)" << std::endl;
            run = false;
        }
        break;
 
    case RdKafka::ERR__UNKNOWN_TOPIC:
    case RdKafka::ERR__UNKNOWN_PARTITION:
        std::cerr << "Consume failed: " << message->errstr() << std::endl;
        run = false;
        break;
 
    default:
        /* Errors */
        std::cerr << "Consume failed: " << message->errstr() << std::endl;
        run = false;
    }
}
 
class ExampleConsumeCb : public RdKafka::ConsumeCb {
public:
    void consume_cb (RdKafka::Message &msg, void *opaque) {
        msg_consume(&msg, opaque);
    }
};
 
int main () {
    std::string brokers = "localhost";
    std::string errstr;
    std::string topic_str="test";
    std::vector<std::string> topics;
    std::string group_id="101";
 
    RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
    RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
    //group.id必须设置
    if (conf->set("group.id", group_id, errstr) != RdKafka::Conf::CONF_OK) {
           std::cerr << errstr << std::endl;
           exit(1);
         }
 
    topics.push_back(topic_str);
   //bootstrap.servers可以替换为metadata.broker.list
    conf->set("bootstrap.servers", brokers, errstr);
 
    ExampleConsumeCb ex_consume_cb;
    conf->set("consume_cb", &ex_consume_cb, errstr);
 
    ExampleEventCb ex_event_cb;
    conf->set("event_cb", &ex_event_cb, errstr);
    conf->set("default_topic_conf", tconf, errstr);
 
    signal(SIGINT, sigterm);
    signal(SIGTERM, sigterm);
 
    RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr);
    if (!consumer) {
        std::cerr << "Failed to create consumer: " << errstr << std::endl;
        exit(1);
    }
    std::cout << "% Created consumer " << consumer->name() << std::endl;
 
    RdKafka::ErrorCode err = consumer->subscribe(topics);
    if (err) {
        std::cerr << "Failed to subscribe to " << topics.size() << " topics: "
                  << RdKafka::err2str(err) << std::endl;
        exit(1);
    }
 
    while (run) {
        //5000毫秒未订阅到消息,触发RdKafka::ERR__TIMED_OUT
        RdKafka::Message *msg = consumer->consume(5000);
        msg_consume(msg, NULL);
        delete msg;
    }
 
    consumer->close();
 
    delete conf;
    delete tconf;
    delete consumer;
 
    std::cerr << "% Consumed " << msg_cnt << " messages ("
              << msg_bytes << " bytes)" << std::endl;
 
    //应用退出之前等待rdkafka清理资源
    RdKafka::wait_destroyed(5000);
 
    return 0;
}

运行zookeeper和kafka服务,再编译运行这两个文件,即可实现qq一样的收发功能。




评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

工具人ToolBoy

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值