linux qt封装so库,Linux qtcreator下kafka之librdkafka库的C++语言封装,实现生产和消费(★firecat推荐★)...

配置文件参考来源

源码参考https://github.com/edenhill/librdkafka/tree/master/examples/rdkafka_example.cpp

编译用户自己的应用程序,编译选项要加上-lrdkafka -lz -lpthread -lrt这些选项。

例如,我使用QtCreator之cmake模式,CMakeLists.txt如下:

cmake_minimum_required(VERSION 2.8)

project(KafkaProducerClient)

#查找当前目录下的所有源文件,并将名称保存到DIR_SRCS变量

aux_source_directory(. DIR_SRCS)

#指定编译选项,方法1

#ADD_DEFINITIONS(-lz -lpthread -lrt)

#指定编译选项,方法2

#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -lz -lpthread -lrt")

#指定生成目标

add_executable(${PROJECT_NAME} ${DIR_SRCS})

#指定在链接目标文件的时候需要链接的外部库,其效果类似gcc的编译参数“-l”,可以解决外部库的依赖问题

TARGET_LINK_LIBRARIES(${PROJECT_NAME} rdkafka)

TARGET_LINK_LIBRARIES(${PROJECT_NAME} rdkafka++)

编译通过,但是运行时会报错:error while loading shared libraries: librdkafka.so.1: cannot open shared object file: No such file or directory

此时需要在/etc/ld.so.conf中加入librdkafka.so所在的目录:/usr/local/lib/

然后在终端执行命令,使之生效:

[root@localhost etc]# ldconfig

注意,/usr/local/lib/每次有库文件更新,都需要终端重新运行一次ldconfig这条命令。

2、生产者源码

(1)main.cpp

#include

#include "kafkaproducerclient.h"

using namespace std;

int main()

{

//KafkaProducerClient* KafkaprClient_ = new KafkaProducerClient("localhost:9092", "test", 0);

KafkaProducerClient* KafkaprClient_ = new KafkaProducerClient("172.16.6.161:9092", "test", 0);

KafkaprClient_->Init();

KafkaprClient_->Send("hello world!");

char str_msg[] = "Hello Kafka!";

while (fgets(str_msg, sizeof(str_msg), stdin))

{

size_t len = strlen(str_msg);

if (str_msg[len - 1] == '\n')

{

str_msg[--len] = '\0';

}

if (strcmp(str_msg, "end") == 0)

{

break;

}

KafkaprClient_->Send(str_msg);

}

return 0;

}

(2)kafkaproducerclient.h

#ifndef KAFKAPRODUCERCLIENT_H

#define KAFKAPRODUCERCLIENT_H

#include

#include

#include

#include

#include

#include

#include

#include

#include

#include

using std::string;

using std::list;

using std::cout;

using std::endl;

using std::vector;

using std::fstream;

class KafkaProducerDeliveryReportCallBack : public RdKafka::DeliveryReportCb {

public:

void dr_cb(RdKafka::Message &message) {

std::cout << "Message delivery for (" << message.len() << " bytes): " <<

message.errstr() << std::endl;

if (message.key())

std::cout << "Key: " << *(message.key()) << ";" << std::endl;

}

};

class KafkaProducerEventCallBack : public RdKafka::EventCb {

public:

void event_cb(RdKafka::Event &event) {

switch (event.type())

{

case RdKafka::Event::EVENT_ERROR:

std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<

event.str() << std::endl;

if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)

break;

case RdKafka::Event::EVENT_STATS:

std::cerr << "\"STATS\": " << event.str() << std::endl;

break;

case RdKafka::Event::EVENT_LOG:

fprintf(stderr, "LOG-%i-%s: %s\n",

event.severity(), event.fac().c_str(), event.str().c_str());

break;

default:

std::cerr << "EVENT " << event.type() <<

" (" << RdKafka::err2str(event.err()) << "): " <<

event.str() << std::endl;

break;

}

}

};

class KafkaProducerClient

{

public:

KafkaProducerClient(const string &brokers, const string &topics, int nPpartition = 0);

virtual ~KafkaProducerClient();

bool Init();

void Send(const string &msg);

void Stop();

private:

RdKafka::Producer *m_pProducer = NULL;

RdKafka::Topic *m_pTopic = NULL;

KafkaProducerDeliveryReportCallBack m_producerDeliveryReportCallBack;

KafkaProducerEventCallBack m_producerEventCallBack;

std::string m_strTopics;

std::string m_strBroker;

bool m_bRun = false;

int m_nPpartition = 0;

};

#endif // KAFKAPRODUCERCLIENT_H

(3)kafkaproducerclient.cpp

#include "kafkaproducerclient.h"

KafkaProducerClient::KafkaProducerClient(const string &brokers, const string &topics, int nPpartition /*= 1*/)

: m_bRun(true), m_strTopics(topics), m_strBroker(brokers), m_nPpartition(nPpartition)

{

}

KafkaProducerClient::~KafkaProducerClient()

{

Stop();

}

bool KafkaProducerClient::Init()

{

string errstr = "";

/*

* Create configuration objects

*/

RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);

RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);

/*Set configuration properties,设置broker list*/

if (conf->set("metadata.broker.list", m_strBroker, errstr) != RdKafka::Conf::CONF_OK){

std::cerr << "RdKafka conf set brokerlist failed :" << errstr.c_str() << endl;

}

/* Set delivery report callback */

conf->set("dr_cb", &m_producerDeliveryReportCallBack, errstr);

conf->set("event_cb", &m_producerEventCallBack, errstr);

/*

* Create producer using accumulated global configuration.

*/

m_pProducer = RdKafka::Producer::create(conf, errstr);

if (!m_pProducer) {

std::cerr << "Failed to create producer: " << errstr << std::endl;

return false;

}

std::cout << "% Created producer " << m_pProducer->name() << std::endl;

/*

* Create topic handle.

*/

m_pTopic = RdKafka::Topic::create(m_pProducer, m_strTopics,

tconf, errstr);

if (!m_pTopic) {

std::cerr << "Failed to create topic: " << errstr << std::endl;

return false;

}

return true;

}

void KafkaProducerClient::Send(const string &msg)

{

if (!m_bRun)

return;

/*

* Produce message

*/

RdKafka::ErrorCode resp = m_pProducer->produce(m_pTopic, m_nPpartition,

RdKafka::Producer::RK_MSG_COPY /* Copy payload */,

const_cast(msg.c_str()), msg.size(),

NULL, NULL);

if (resp != RdKafka::ERR_NO_ERROR)

std::cerr << "Produce failed: " << RdKafka::err2str(resp) << std::endl;

else

std::cerr << "Produced message (" << msg.size() << " bytes)" << std::endl;

m_pProducer->poll(0);

/* Wait for messages to be delivered */ //firecat add

while (m_bRun && m_pProducer->outq_len() > 0) {

std::cerr << "Waiting for " << m_pProducer->outq_len() << std::endl;

m_pProducer->poll(100);

}

}

void KafkaProducerClient::Stop()

{

delete m_pTopic;

delete m_pProducer;

}

3、消费者源码

(1)main.cpp

#include

#include "kafkaconsumerclient.h"

using namespace std;

int main()

{

KafkaConsumerClient *KafkaConsumerClient_ = new KafkaConsumerClient("localhost:9092", "test", "1", 0, RdKafka::Topic::OFFSET_BEGINNING);//OFFSET_BEGINNING,OFFSET_END

if (!KafkaConsumerClient_->Init())

{

fprintf(stderr, "kafka server initialize error\n");

return -1;

}

KafkaConsumerClient_->Start(1000);

return 0;

}

(2)kafkaconsumerclient.h

#ifndef KAFKACONSUMERCLIENT_H

#define KAFKACONSUMERCLIENT_H

#include

#include

#include

#include

#include

#include

#include

#include

#include

#include

using std::string;

using std::list;

using std::cout;

using std::endl;

using std::vector;

using std::fstream;

class KafkaConsumerClient {

public:

KafkaConsumerClient(const std::string& brokers, const std::string& topics, std::string groupid, int32_t nPartition = 0, int64_t offset = 0);

virtual ~KafkaConsumerClient();

//初始化

bool Init();

//开始获取消息

void Start(int timeout_ms);

//停止

void Stop();

private:

void Msg_consume(RdKafka::Message* message, void* opaque);

private:

std::string m_strBrokers;

std::string m_strTopics;

std::string m_strGroupid;

int64_t m_nLastOffset = 0;

RdKafka::Consumer *m_pKafkaConsumer = NULL;

RdKafka::Topic *m_pTopic = NULL;

int64_t m_nCurrentOffset = RdKafka::Topic::OFFSET_BEGINNING;

int32_t m_nPartition = 0;

bool m_bRun = false;

};

#endif // KAFKACONSUMERCLIENT_H

(3)kafkaconsumerclient.cpp

#include "kafkaconsumerclient.h"

KafkaConsumerClient::KafkaConsumerClient(const std::string& brokers, const std::string& topics, std::string groupid, int32_t nPartition /*= 0*/, int64_t offset /*= 0*/)

:m_strBrokers(brokers),

m_strTopics(topics),

m_strGroupid(groupid),

m_nPartition(nPartition),

m_nCurrentOffset(offset)

{

}

KafkaConsumerClient::~KafkaConsumerClient()

{

Stop();

}

bool KafkaConsumerClient::Init() {

std::string errstr;

RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);

if (!conf) {

std::cerr << "RdKafka create global conf failed" << endl;

return false;

}

/*设置broker list*/

if (conf->set("metadata.broker.list", m_strBrokers, errstr) != RdKafka::Conf::CONF_OK) {

std::cerr << "RdKafka conf set brokerlist failed ::" << errstr.c_str() << endl;

}

/*设置consumer group*/

if (conf->set("group.id", m_strGroupid, errstr) != RdKafka::Conf::CONF_OK) {

std::cerr << "RdKafka conf set group.id failed :" << errstr.c_str() << endl;

}

std::string strfetch_num = "10240000";

/*每次从单个分区中拉取消息的最大尺寸*/

if (conf->set("max.partition.fetch.bytes", strfetch_num, errstr) != RdKafka::Conf::CONF_OK){

std::cerr << "RdKafka conf set max.partition failed :" << errstr.c_str() << endl;

}

/*创建kafka consumer实例*/ //Create consumer using accumulated global configuration.

m_pKafkaConsumer = RdKafka::Consumer::create(conf, errstr);

if (!m_pKafkaConsumer) {

std::cerr << "failed to ceate consumer" << endl;

}

std::cout << "% Created consumer " << m_pKafkaConsumer->name() << std::endl;

delete conf;

/*创建kafka topic的配置*/

RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);

if (!tconf) {

std::cerr << "RdKafka create topic conf failed" << endl;

return false;

}

if (tconf->set("auto.offset.reset", "smallest", errstr) != RdKafka::Conf::CONF_OK) {

std::cerr << "RdKafka conf set auto.offset.reset failed:" << errstr.c_str() << endl;

}

/*

* Create topic handle.

*/

m_pTopic = RdKafka::Topic::create(m_pKafkaConsumer, m_strTopics, tconf, errstr);

if (!m_pTopic) {

std::cerr << "RdKafka create topic failed :" << errstr.c_str() << endl;

}

delete tconf;

/*

* Start consumer for topic+partition at start offset

*/

RdKafka::ErrorCode resp = m_pKafkaConsumer->start(m_pTopic, m_nPartition, m_nCurrentOffset);

if (resp != RdKafka::ERR_NO_ERROR) {

std::cerr << "failed to start consumer : " << errstr.c_str() << endl;

}

return true;

}

void KafkaConsumerClient::Msg_consume(RdKafka::Message* message, void* opaque) {

switch (message->err()) {

case RdKafka::ERR__TIMED_OUT:

break;

case RdKafka::ERR_NO_ERROR:

/* Real message */

std::cout << "Read msg at offset " << message->offset() << std::endl;

if (message->key()) {

std::cout << "Key: " << *message->key() << std::endl;

}

printf("%.*s\n",

static_cast(message->len()),

static_cast(message->payload()));

m_nLastOffset = message->offset();

break;

case RdKafka::ERR__PARTITION_EOF:

/* Last message */

cout << "Reached the end of the queue, offset: " << m_nLastOffset << endl;

//Stop();

break;

case RdKafka::ERR__UNKNOWN_TOPIC:

case RdKafka::ERR__UNKNOWN_PARTITION:

std::cerr << "Consume failed: " << message->errstr() << std::endl;

Stop();

break;

default:

/* Errors */

std::cerr << "Consume failed: " << message->errstr() << std::endl;

Stop();

break;

}

}

void KafkaConsumerClient::Start(int timeout_ms){

RdKafka::Message *msg = NULL;

m_bRun = true;

while (m_bRun) {

msg = m_pKafkaConsumer->consume(m_pTopic, m_nPartition, timeout_ms);

Msg_consume(msg, NULL);

delete msg;

m_pKafkaConsumer->poll(0);

}

m_pKafkaConsumer->stop(m_pTopic, m_nPartition);

m_pKafkaConsumer->poll(1000);

if (m_pTopic) {

delete m_pTopic;

m_pTopic = NULL;

}

if (m_pKafkaConsumer) {

delete m_pKafkaConsumer;

m_pKafkaConsumer = NULL;

}

/*销毁kafka实例*/ //Wait for RdKafka to decommission.

RdKafka::wait_destroyed(5000);

}

void KafkaConsumerClient::Stop()

{

m_bRun = false;

}

4.注意事项

(1)生产者

建议分区使用int partition = RD_KAFKA_PARTITION_UA;即根据key自动计算分区号

/* Use builtin partitioner to select partition*/

RD_KAFKA_PARTITION_UA,

* \p partition is the target partition, either:

*   - RdKafka::Topic::PARTITION_UA (unassigned) for

*     automatic partitioning using the topic's partitioner function, or

*   - a fixed partition (0..N)

(2)消费者

msg = m_pKafkaConsumer->consume(m_pTopic, m_nPartition, timeout_ms);

virtual Message *consume (Topic *topic, int32_t partition,

int timeout_ms) = 0;

看来消费者函数必须指定分区号,那么建议采用多进程的方式,每个进程订阅一个分区。

---

参考文章:

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值