近期在工程上需要推送信息给kafka,即简单实现kafka的producer功能,基于C++代码,过程介绍如下:
(1)librdkafka的安装:
在centos7下较为简单,配置好源的情况下,直接运行:
yum install librdkafka librdkafka-devel
为了在基于cmake2.8的工程用使用,在CMakeLists.txt加上:
include_directories(ProjectDemo /usr/include)
target_link_library(ProjectDeme /usr/lib64/librdkafka.so)
在windows下,则建议使用vcpkg工具。安装好,在vcpkg.exe的目录用用命令行运行:
#安装64位版本的librdkafka
.\vcpkg.exe install librdkafka:x64-windows
#为了能在visual studio中直接使用,因为安装64位的librdkafa,配置管理器一栏也要设施64位。
.\vcpkg.exe integrate install
(2) librdkafka的producer的样例代码(参考example中produce.cpp):
#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <csignal>
#include <cstring>
#if _AIX
#include <unistd.h>
#endif
#include <librdkafka/rdkafkacpp.h>
static volatile sig_atomic_t run = 1;
static void sigterm(int sig) {
run = 0;
}
//该类实现消息回显(callback)功能
class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
public:
void dr_cb(RdKafka::Message& message) {
/* If message.err() is non-zero the message delivery failed permanently
* for the message. */
if (message.err())
std::cerr << "% Message delivery failed: " << message.errstr() << std::endl;
else
std::cerr << "% Message delivered to topic " << message.topic_name() <<
" [" << message.partition() << "] at offset " <<
message.offset() << std::endl;
}
};
int main(int argc, char** argv) {
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <brokers> <topic>\n";
exit(1);
}
//kafka核心参数是brokers和top
//borkers可以是一系列地址和端口,例如:127.0.0.1:12345;127.0.0.2:123456
std::string brokers = argv[1];
//kafka的topic,例如:testtopic
std::string topic = argv[2];
printf(brokers.c_str());
printf(topic.c_str());
RdKafka::Conf* conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
std::string errstr;
if (conf->set("bootstrap.servers", brokers, errstr) !=
RdKafka::Conf::CONF_OK) {
std::cerr << errstr << std::endl;
exit(1);
}
signal(SIGINT, sigterm);
signal(SIGTERM, sigterm);
//设定消息回显功能
ExampleDeliveryReportCb ex_dr_cb;
if (conf->set("dr_cb", &ex_dr_cb, errstr) != RdKafka::Conf::CONF_OK) {
std::cerr << errstr << std::endl;
exit(1);
}
//在此建立producer实例,conf可以删除了,避免内存泄漏。
RdKafka::Producer* producer = RdKafka::Producer::create(conf, errstr);
if (!producer) {
std::cerr << "Failed to create producer: " << errstr << std::endl;
exit(1);
}
delete conf;
//从键盘输入消息,发送给producer
std::cout << "% Type message value and hit enter " <<
"to produce message." << std::endl;
for (std::string line; run && std::getline(std::cin, line);) {
if (line.empty()) {
producer->poll(0);
continue;
}
//消息发送,此次采用的是异步模式
retry:
RdKafka::ErrorCode err =
producer->produce(
// Topic 参数
topic,
// Partition 参数: 用于根据key参数分配topic;如果无key参数,则为随机。
RdKafka::Topic::PARTITION_UA,
// 复制一遍要发送的值
RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
// 要发送的数据,根据需要在此修改
const_cast<char*>(line.c_str()), line.size(),
// key 参数
NULL, 0,
// 时间戳,末日当前时间
0,
// 消息头
NULL,
// 每条消息的不透明值输出到报告中
NULL);
if (err != RdKafka::ERR_NO_ERROR) {
std::cerr << "% Failed to produce to topic " << topic << ": " <<
RdKafka::err2str(err) << std::endl;
if (err == RdKafka::ERR__QUEUE_FULL) {
//中间消息队列满了,再重发
producer->poll(1000/*block for max 1000ms*/);
goto retry;
}
}
else {
std::cerr << "% Enqueued message (" << line.size() << " bytes) " <<
"for topic " << topic << std::endl;
}
//调用poll函数发送报告
producer->poll(0);
}
//flush函数 功能是等待所有消息发送完
std::cerr << "% Flushing final messages..." << std::endl;
producer->flush(10 * 1000 /* wait for max 10 seconds */);
if (producer->outq_len() > 0)
std::cerr << "% " << producer->outq_len() <<
" message(s) were not delivered" << std::endl;
delete producer;
return 0;
}