c++ 使用RdKafka::KafkaConsumer获取服务器配置信息与消费、同时消费多个topic、设置消费位置

3 篇文章 0 订阅

      Kafka是一个推送的消息框架,支持java、python、c/c++等待。本次与Kafka接触了一段时间,做个分享,主要是代码中的接口,与使用方法。

      Kafka使用topic消费,消息会保留一段时间;每次消费可以指定位置,从不同的位置开始消息。一个主题可以分为多个分区,其封装的c++库已经能够按顺序从不同的分区中将数据取出来了,所以还是相对方便的。

      我只写了一个简单的生产和消息实例,并没有重写事件,因为暂时还不需要。本文主要教会大家如何使用高级接口RdKafka::KafkaConsumer消息多个topic中的数据,并且能够设置启动时消费的位置,获取Server端的配置及状态信息等待

     生产者如下

#include <iostream>
#include <thread>
#include "rdkafkacpp.h"

int main()
{
	std::string brokers = "192.168.0.127:9092,192.168.0.128:9092,192.168.0.129:9092";
	std::string errorStr;
	RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
	RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
	if (!conf) {
		std::cout << "Create RdKafka Conf failed" << std::endl;
		return -1;
	}

	conf->set("message.max.bytes", "10240000", errorStr); //最大字节数
	conf->set("replica.fetch.max.bytes", "20485760", errorStr);
	conf->set("bootstrap.servers", brokers, errorStr);
	
	RdKafka::Producer *producer = RdKafka::Producer::create(conf,errorStr);
	if (!producer) {
		std::cout << "Create Producer failed" << std::endl;
		return -1;
	}
    //创建Topic
	RdKafka::Topic *topic = RdKafka::Topic::create(producer, "koala-stqf-03", tconf, errorStr);
	if (!topic) {
		std::cout << "Create Topic failed" << std::endl;
	}

	while (true)
	{   //发送消息
		RdKafka::ErrorCode resCode = producer->produce(topic, RdKafka::Topic::PARTITION_UA, RdKafka::Producer::RK_MSG_COPY, "123456789", 10, nullptr, nullptr);
		if (resCode != RdKafka::ERR_NO_ERROR) {
			std::cerr << "Produce failed: " << RdKafka::err2str(resCode) << std::endl;
		}

		std::this_thread::sleep_for(std::chrono::seconds(1));
	}

	delete conf;
	delete tconf;
	delete topic;
	delete producer;

	RdKafka::wait_destroyed(5000);
	return 0;
}

这个已经是很简单了,相信都看得懂吧。接下来是消费者

  

#include "rdkafkacpp.h"
#include <chrono>
#include <time.h>
#include <sstream>
#include <iomanip>
#include <iostream>
#include <algorithm>
#include <iterator>

void consume_cb(RdKafka::Message &message, void *opaque)
{
	switch (message.err()) {
	case RdKafka::ERR__TIMED_OUT:
		std::cout << "RdKafka::ERR__TIMED_OUT" << std::endl;
		break;
	case RdKafka::ERR_NO_ERROR:
		/* Real message */

		RdKafka::MessageTimestamp ts;
		ts = message.timestamp();
		if (ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
			std::string timeprefix;
			if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME) {
				timeprefix = "created time";
			}
			else if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME) {
				timeprefix = "log append time";
			}


			unsigned long long milli = ts.timestamp + (unsigned long long)8 * 60 * 60 * 1000;//此处转化为东八区北京时间,如果是其它时区需要按需求修改
			auto mTime = std::chrono::milliseconds(milli);
			auto tp = std::chrono::time_point<std::chrono::system_clock, std::chrono::milliseconds>(mTime);
			auto tt = std::chrono::system_clock::to_time_t(tp);
			tm timeinfo;
			::gmtime_s(&timeinfo, &tt);

			char s[60]{ 0 };
			::sprintf(s, "%04d-%02d-%02d %02d:%02d:%02d", timeinfo.tm_year + 1900, timeinfo.tm_mon + 1, timeinfo.tm_mday, timeinfo.tm_hour, timeinfo.tm_min, timeinfo.tm_sec);
#if 0
			std::stringstream ss;
			std::string dateStr;

			ss  << timeinfo.tm_year + 1900 << "-"
				<< timeinfo.tm_mon + 1 << "-"
				<< timeinfo.tm_mday;
			ss >> dateStr;

			ss.clear();
			ss	<< timeinfo.tm_hour << ":"
				<< timeinfo.tm_min << ":"
				<< timeinfo.tm_sec;
			std::string timeStr;
			ss >> timeStr;

			std::string dateTimeStr;
			dateTimeStr += dateStr;
			dateTimeStr.push_back(' ');
			dateTimeStr += timeStr;
#endif // 0

			std::cout << "TimeStamp" << timeprefix << " " << s << std::endl;
		}

		std::cout << message.topic_name() << " offset" << message.offset() << "  partion " << message.partition() << " message: " << reinterpret_cast<char*>(message.payload()) << std::endl;
		break;

	case RdKafka::ERR__PARTITION_EOF:
		/* Last message */
		std::cout << "EOF reached for" << std::endl;
		break;

	case RdKafka::ERR__UNKNOWN_TOPIC:
	case RdKafka::ERR__UNKNOWN_PARTITION:
		std::cout << "Consume failed: " << message.errstr();
		break;

	default:
		/* Errors */
		std::cout << "Consume failed: " << message.errstr();
		break;
	}
}
int main()
{	
	std::string brokers = "192.168.0.127:9092,192.168.0.128:9092,192.168.0.129:9092";
	std::string errstr;
	std::vector<std::string> topics{"koala-stqf-03",
		"klai-seim-alert-koala-test-03"
	};
	std::string group_id = "whl-consumer-group";

	RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
	if (conf->set("group.id", group_id, errstr)) {
		std::cout << errstr << std::endl;
		return -1;
	}

	conf->set("bootstrap.servers", brokers, errstr);
	conf->set("max.partition.fetch.bytes", "1024000", errstr);
	//conf->set("enable-auto-commit", "true", errstr);
	RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
	tconf->set("auto.offset.reset", "latest", errstr);
	conf->set("default_topic_conf", tconf, errstr);

	RdKafka::KafkaConsumer *m_consumer = RdKafka::KafkaConsumer::create(conf, errstr);
	if (!m_consumer) {
		std::cout << "failed to create consumer " << errstr << std::endl;
		return -1;
	}

#if 0 //从上一次消费结束的位置开始消费
	RdKafka::ErrorCode err = m_consumer->subscribe(topics);
	if (err != RdKafka::ERR_NO_ERROR) {
		std::cout << RdKafka::err2str(err) << std::endl;
		return -1;
	}
#else //指定每个topic的每个分区开始消费的位置

    //基本思路为先获取server端的状态信息,将与订阅相关的topic找出来,根据分区,创建TopicPartion;最后使用assign消费
	RdKafka::Metadata *metadataMap{ nullptr };
	RdKafka::ErrorCode err = m_consumer->metadata(true, nullptr, &metadataMap, 2000);
	if (err != RdKafka::ERR_NO_ERROR) {
		std::cout << RdKafka::err2str(err) << std::endl;
	}
	const RdKafka::Metadata::TopicMetadataVector *topicList = metadataMap->topics();
	std::cout << "broker topic size: " << topicList->size() << std::endl;
	RdKafka::Metadata::TopicMetadataVector subTopicMetaVec;
	std::copy_if(topicList->begin(), topicList->end(), std::back_inserter(subTopicMetaVec), [&topics](const RdKafka::TopicMetadata* data) {
		return std::find_if(topics.begin(), topics.end(), [data](const std::string &tname) {return data->topic() == tname; }) != topics.end();
	});
	std::vector<RdKafka::TopicPartition*> topicpartions;
	std::for_each(subTopicMetaVec.begin(), subTopicMetaVec.end(), [&topicpartions](const RdKafka::TopicMetadata* data) {
		auto parVec = data->partitions();
		std::for_each(parVec->begin(), parVec->end(), [&](const RdKafka::PartitionMetadata *value) {
			std::cout << data->topic() << " has partion: " << value->id() << " Leader is : " << value->leader() << std::endl;
			topicpartions.push_back(RdKafka::TopicPartition::create(data->topic(), value->id(), RdKafka::Topic::OFFSET_END));
		});
	});
	m_consumer->assign(topicpartions);
#endif // 0
	RdKafka::ErrorCode err = m_consumer->subscribe(topics);
	if (err != RdKafka::ERR_NO_ERROR) {
		std::cout << RdKafka::err2str(err) << std::endl;
		return -1;
	}
	
	while (true)
	{
		RdKafka::Message *msg = m_consumer->consume(6000);
		consume_cb(*msg, nullptr); //消息一条消息
		delete msg;
	}
	return 0;
}

   使用的是64位工程,源码至此下载

  • 2
    点赞
  • 21
    收藏
    觉得还不错? 一键收藏
  • 7
    评论
RdKafka中,您可以使用RdKafka::Conf对象来设置Kafka客户端的配置。要设置security.protocol,您可以使用以下代码: ```c++ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); std::string errstr; // 设置security.protocol为PLAINTEXT conf->set("security.protocol", "plaintext", errstr); // 或者设置security.protocol为SSL // conf->set("security.protocol", "ssl", errstr); // conf->set("ssl.ca.location", "/path/to/ca-cert.pem", errstr); // conf->set("ssl.certificate.location", "/path/to/client-cert.pem", errstr); // conf->set("ssl.key.location", "/path/to/client-key.pem", errstr); // 或者设置security.protocol为SASL_SSL // conf->set("security.protocol", "sasl_ssl", errstr); // conf->set("sasl.mechanisms", "PLAIN", errstr); // conf->set("sasl.username", "your-username", errstr); // conf->set("sasl.password", "your-password", errstr); // conf->set("ssl.ca.location", "/path/to/ca-cert.pem", errstr); // conf->set("ssl.certificate.location", "/path/to/client-cert.pem", errstr); // conf->set("ssl.key.location", "/path/to/client-key.pem", errstr); // 创建Kafka生产者 RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr); if (!producer) { std::cerr << "Failed to create producer: " << errstr << std::endl; exit(1); } ``` 这将设置Kafka客户端的security.protocol为PLAINTEXT。如果您需要更高级别的安全性,例如SSL或SASL_SSL,请相应地设置其他相关配置,例如ssl.ca.location、ssl.certificate.location、ssl.key.location、sasl.mechanisms、sasl.username和sasl.password

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值