前言:网上看到很多关于kafka在windows下的编译及使用cmd运行例子,也有c++在集群上和windows的kafka编程例子,之前自己编译好了librdkafka,然后也生成了lib和dll文件,例子程序库文件都配好了,但是运行不了。自己摸索了两天终于可以编程成功并且能够发送信息了。之前以为编译好librdkafka就可以运行了,然而并非如此,简单的说明一下他们的关系。如下所示,也就是说我们编程要发送消息成功,就需要通过kafka队列,而kafka并非我们通过librdkafka编程定义的,需要我们安装kafka。关系就是这样的。
总结下步骤:
1.kafka的安装及windows环境配置
2.librdkafka的编译。
1.kafka的安装及windows环境配置
https://www.cnblogs.com/lnice/p/9668750.html感觉这个老哥写的稍微好点,我偷懒了,
划重点,1)不管是通过cmd创建消费者生产者传递消息还是通过编程创建生成者消费者传递消息,下面两个窗口不要关掉。
也就是说,在编程的时候先要通过cmd运行zkserver和.\bin\windows\kafka-server-start.bat config\server.properties,后面运行指令需要先进入到kafka的压缩目录,如上面红色框所示。
重点2),在安装kafka时所有的软件(java,ZOOKEEPER)文件夹名字最好不要有空格,不然容易报错,
重点3),高版本的kafka和低版本的创建消费者cmd指令有差异,低版本的创建消费者指令在高版本无效。
2.https://blog.csdn.net/ke20100808/article/details/79224611参考这问老兄的哈哈,或者这个https://blog.csdn.net/jiao_mrswang/article/details/88201172,总之,先编译好librdkafka的lib和dll两个文件。这个熟悉c++编程的老哥应该不用我多说吧。
3.第三步当然是上实例啦,记得在编程的时候先要通过cmd运行zkserver和.\bin\windows\kafka-server-start.bat config\server.properties,
生产者例子
#include <iostream>
#include <string>
#include <list>
#include <stdint.h>
#include "../src-cpp/rdkafkacpp.h"
static bool run = true;
static bool exit_eof = false;
void dump_config(RdKafka::Conf* conf) {
std::list<std::string> *dump = conf->dump();
printf("config dump(%d):\n", (int32_t)dump->size());
for (auto it = dump->begin(); it != dump->end();) {
std::string name = *it++;
std::string value = *it++;
printf("%s = %s\n", name.c_str(), value.c_str());
}
printf("---------------------------------------------\n");
}
class my_event_cb : public RdKafka::EventCb {
public:
void event_cb(RdKafka::Event &event) override {
switch (event.type())
{
case RdKafka::Event::EVENT_ERROR:
std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
run = false;
break;
case RdKafka::Event::EVENT_STATS:
std::cerr << "\"STATS\": " << event.str() << std::endl;
break;
case RdKafka::Event::EVENT_LOG:
fprintf(stderr, "LOG-%i-%s: %s\n",
event.severity(), event.fac().c_str(), event.str().c_str());
break;
default:
std::cerr << "EVENT " << event.type() <<
" (" << RdKafka::err2str(event.err()) << "): " <<
event.str() << std::endl;
break;
}
}
};
class my_hash_partitioner_cb : public RdKafka::PartitionerCb {
public:
int32_t partitioner_cb(const RdKafka::Topic *topic, const std::string *key,
int32_t partition_cnt, void *msg_opaque) override {
return djb_hash(key->c_str(), key->size()) % partition_cnt;
}
private:
static inline unsigned int djb_hash(const char *str, size_t len) {
unsigned int hash = 5381;
for (size_t i = 0; i < len; i++)
hash = ((hash << 5) + hash) + str[i];
return hash;
}
};
namespace producer_ts {
class my_delivery_report_cb : public RdKafka::DeliveryReportCb {
public:
void dr_cb(RdKafka::Message& message) override {
printf("message delivery %d bytes, error:%s, key: %s\n",
(int32_t)message.len(), message.errstr().c_str(), message.key() ? message.key()->c_str() : "");
}
};
void producer_test() {
printf("producer test\n");
int32_t partition = RdKafka::Topic::PARTITION_UA;
printf("input brokers list(192.168.0.127:9092,192.168.0.128:9092,192.168.0.129:9092):\n");
std::string broker_list;
//std::cin >> broker_list;
broker_list = "127.0.0.1:9092";
printf("input partition:");
//std::cin >> partition;
partition = 0;
// config
RdKafka::Conf* global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
RdKafka::Conf* topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
my_hash_partitioner_cb hash_partitioner;
my_event_cb event_cb;
my_delivery_report_cb delivery_cb;
std::string err_string;
if (topic_conf->set("partitioner_cb", &hash_partitioner, err_string) != RdKafka::Conf::CONF_OK) {
printf("set partitioner_cb error: %s\n", err_string.c_str());
return;
}
global_conf->set("metadata.broker.list", broker_list, err_string);
global_conf->set("event_cb", &event_cb, err_string);
global_conf->set("dr_cb", &delivery_cb, err_string);
//global_conf->set("retry.backoff.ms", "10", err_string);
//global_conf->set("debug", "all", err_string);
//global_conf->set("debug", "topic,msg", err_string);
//global_conf->set("debug", "msg,queue", err_string);
dump_config(global_conf);
dump_config(topic_conf);
// create producer
RdKafka::Producer* producer = RdKafka::Producer::create(global_conf, err_string);
if (!producer) {
printf("failed to create producer, %s\n", err_string.c_str());
return;
}
printf("created producer %s\n", producer->name().c_str());
std::string topic_name;
while (true) {
printf("input topic to create:\n");
std::cin >> topic_name;
// create topic
RdKafka::Topic* topic =
RdKafka::Topic::create(producer, topic_name, topic_conf, err_string);
if (!topic) {
printf("try create topic[%s] failed, %s\n",
topic_name.c_str(), err_string.c_str());
return;
}
printf(">");
for (std::string line; run && std::getline(std::cin, line);) {
if (line.empty()) {
producer->poll(0);
continue;
}
if (line == "quit") {
run = false;
break;
}
std::string key = "kafka_test";
RdKafka::ErrorCode res = producer->produce(topic, partition,
RdKafka::Producer::RK_MSG_COPY,
(char*)line.c_str(), line.size(), key.c_str(), key.size(), NULL);
if (res != RdKafka::ERR_NO_ERROR) {
printf("produce failed, %s\n", RdKafka::err2str(res).c_str());
}
else {
printf("produced msg, bytes %d\n", (int32_t)line.size());
}
// do socket io
producer->poll(0);
printf("outq_len: %d\n", producer->outq_len());
//producer->flush(1000);
//while (run && producer->outq_len()) {
// printf("wait for write queue( size %d) write finish\n", producer->outq_len());
// producer->poll(1000);
//}
printf(">");
}
delete topic;
if (!run) {
break;
}
}
run = true;
while (run && producer->outq_len()) {
printf("wait for write queue( size %d) write finish\n", producer->outq_len());
producer->poll(1000);
}
delete producer;
}
}
运行时在main函数的cpp加上该代码的头文件,运行函数为void producer_test(),这代码别人的,写的有点乱,将就看吧,能运行。
消费者代码
#include <iostream>
#include <string>
#include <list>
#include <stdint.h>
#include "../src-cpp/rdkafkacpp.h"
class kafka_consumer_client
{
public:
kafka_consumer_client(const std::string& brokers, const std::string& topics, std::string groupid, int64_t offset);
//kafka_consumer_client();
virtual ~kafka_consumer_client();
bool initClient();
bool consume(int timeout_ms);
void finalize();
private:
void consumer(RdKafka::Message *msg, void *opt);
std::string brokers_;
std::string topics_;
std::string groupid_;
int64_t last_offset_ = 0;
RdKafka::Consumer *kafka_consumer_ = nullptr;
RdKafka::Topic *topic_ = nullptr;
int64_t offset_ = RdKafka::Topic::OFFSET_BEGINNING;
int32_t partition_ = 0;
};
#include "kafka_consumer_client.h"
bool run_ = true;
kafka_consumer_client::kafka_consumer_client(const std::string& brokers, const std::string& topics, std::string groupid, int64_t offset)
:brokers_(brokers),
topics_(topics),
groupid_(groupid),
offset_(offset){
}
//kafka_consumer_client::kafka_consumer_client(){}
kafka_consumer_client::~kafka_consumer_client(){}
bool kafka_consumer_client::initClient(){
RdKafka::Conf *conf = nullptr;
conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
if (!conf){
fprintf(stderr, "RdKafka create global conf failed\n");
return false;
}
std::string errstr;
/*设置broker list*/
if (conf->set("bootstrap.servers", brokers_, errstr) != RdKafka::Conf::CONF_OK){
fprintf(stderr, "RdKafka conf set brokerlist failed : %s\n", errstr.c_str());
}
/*设置consumer group*/
if (conf->set("group.id", groupid_, errstr) != RdKafka::Conf::CONF_OK){
fprintf(stderr, "RdKafka conf set group.id failed : %s\n", errstr.c_str());
}
std::string strfetch_num = "10240000";
/*每次从单个分区中拉取消息的最大尺寸*/
if (conf->set("max.partition.fetch.bytes", strfetch_num, errstr) != RdKafka::Conf::CONF_OK){
fprintf(stderr, "RdKafka conf set max.partition failed : %s\n", errstr.c_str());
}
/*创建kafka consumer实例*/
kafka_consumer_ = RdKafka::Consumer::create(conf, errstr);
if (!kafka_consumer_){
fprintf(stderr, "failed to ceate consumer\n");
}
delete conf;
RdKafka::Conf *tconf = nullptr;
/*创建kafka topic的配置*/
tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
if (!tconf){
fprintf(stderr, "RdKafka create topic conf failed\n");
return false;
}
/*kafka + zookeeper,当消息被消费时,会想zk提交当前groupId的consumer消费的offset信息,
当consumer再次启动将会从此offset开始继续消费.在consumter端配置文件中(或者是
ConsumerConfig类参数)有个"autooffset.reset"(在kafka 0.8版本中为auto.offset.reset),
有2个合法的值"largest"/"smallest",默认为"largest",此配置参数表示当此groupId下的消费者,
在ZK中没有offset值时(比如新的groupId,或者是zk数据被清空),consumer应该从哪个offset开始
消费.largest表示接受接收最大的offset(即最新消息),smallest表示最小offset,即从topic的
开始位置消费所有消息.*/
if (tconf->set("auto.offset.reset", "smallest", errstr) != RdKafka::Conf::CONF_OK){
fprintf(stderr, "RdKafka conf set auto.offset.reset failed : %s\n", errstr.c_str());
}
topic_ = RdKafka::Topic::create(kafka_consumer_, topics_, tconf, errstr);
if (!topic_){
fprintf(stderr, "RdKafka create topic failed : %s\n", errstr.c_str());
}
delete tconf;
RdKafka::ErrorCode resp = kafka_consumer_->start(topic_, partition_, offset_);
if (resp != RdKafka::ERR_NO_ERROR){
fprintf(stderr, "failed to start consumer : %s\n", RdKafka::err2str(resp).c_str());
}
return true;
}
void kafka_consumer_client::consumer(RdKafka::Message *message, void *opt){
switch (message->err()){
case RdKafka::ERR__TIMED_OUT:
break;
case RdKafka::ERR_NO_ERROR:
printf("%.*s\n",
static_cast<int>(message->len()),
static_cast<const char*>(message->payload()));
last_offset_ = message->offset();
break;
case RdKafka::ERR__PARTITION_EOF:
std::cerr << "%% Reached the end of the queue, offset: " << last_offset_ << std::endl;
break;
case RdKafka::ERR__UNKNOWN_TOPIC:
case RdKafka::ERR__UNKNOWN_PARTITION:
std::cerr << "Consume failed: " << message->errstr() << std::endl;
run_ = false;
break;
default:
std::cerr << "Consume failed: " << message->errstr() << std::endl;
run_ = false;
break;
}
}
bool kafka_consumer_client::consume(int timeout_ms){
RdKafka::Message *msg = nullptr;
while (run_){
msg = kafka_consumer_->consume(topic_, partition_, timeout_ms);
consumer(msg, nullptr);
kafka_consumer_->poll(0);
delete msg;
}
kafka_consumer_->stop(topic_, partition_);
if (topic_){
delete topic_;
topic_ = nullptr;
}
if (kafka_consumer_){
delete kafka_consumer_;
kafka_consumer_ = nullptr;
}
/*销毁kafka实例*/
RdKafka::wait_destroyed(5000);
return true;
}
int main()
{
/*consumer_ts::consumer_test();*/
std::string topics = "linlin";
std::string brokers = "127.0.0.1:9092";
std::string group = "1";
std::shared_ptr<kafka_consumer_client> kafka_consumer_client_ = std::make_shared<kafka_consumer_client>(brokers, topics, group, 0);
//std::shared_ptr<kafka_consumer_client> kafka_consumer_client_ = std::make_shared<kafka_consumer_client>();
if (!kafka_consumer_client_->initClient()){
fprintf(stderr, "kafka server initialize error\n");
}
else{
printf("start kafka consumer\n");
kafka_consumer_client_->consume(1000);
}
fprintf(stderr, "kafka consume exit! \n");
return 0;
}
上面是h文件,中间是cpp文件,最下面是main函数,好了看下效果吧。
左边是生产者,右边是消费者,初始学习kafka望大神指教