生产者头文件
#include <iostream>
#include <string>
#include <list>
#include <stdint.h>
#include "../src-cpp/rdkafkacpp.h"
class producer
{
public:
producer(const std::string brokers, const std::string topics);
bool produce(std::string infomation);
void initializeprouder();
private:
std::string brokers_;
std::string topics;
RdKafka::Producer *producer_ = nullptr;
RdKafka::Topic *topic_ = nullptr;
};
生产者cpp文件
#include "producer.h"
#include <thread>
producer::producer(const std::string brokers, const std::string topics)
{
this->brokers_ = brokers;
this->topics = topics;
}
void producer::initializeprouder()
{
std::string errorStr;
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
if (!conf) {
std::cout << "Create RdKafka Conf failed" << std::endl;
return ;
}
conf->set("message.max.bytes", "10240000", errorStr); //最大字节数
conf->set("replica.fetch.max.bytes", "20485760", errorStr);
conf->set("bootstrap.servers", brokers_, errorStr);
producer_ = RdKafka::Producer::create(conf, errorStr);
if (!producer_) {
std::cout << "Create Producer failed" << std::endl;
return ;
}
//创建Topic
topic_ = RdKafka::Topic::create(producer_, topics, tconf, errorStr);
if (!topic_) {
std::cout << "Create Topic failed" << std::endl;
}
delete conf;
delete tconf;
}
bool producer::produce(std::string infomation)
{
RdKafka::ErrorCode resCode = producer_->produce(topic_, RdKafka::Topic::PARTITION_UA, RdKafka::Producer::RK_MSG_COPY, (char*)infomation.c_str(), infomation.size(), nullptr, nullptr);
if (resCode != RdKafka::ERR_NO_ERROR) {
std::cerr << "Produce failed: " << RdKafka::err2str(resCode) << std::endl;
}
std::this_thread::sleep_for(std::chrono::seconds(1));
return true;
}
生产者实例化,这断放到一个主函数即可
std::string bloker = "127.0.0.1:9092";
std::string topic = "crowdsend";
producer *informationsend = new producer(bloker, topic);
informationsend->initializeprouder();
生产者h文件
#include <iostream>
#include <string>
#include "../src-cpp/rdkafkacpp.h"
class signal_reception
{
public:
signal_reception(const std::string& brokers, const std::string& topics);
~signal_reception();
bool initconsume();
std::string consume(int timeout_ms);
std::string consume_cb(RdKafka::Message &message, void *opaque);
private:
std::string brokers_;
std::string topics_;
RdKafka::KafkaConsumer *consumer = nullptr;
};
生产者cpp文件
#include "signal_reception.h"
#include <algorithm>
#include <iterator>
#include <time.h>
#include <chrono>
signal_reception::signal_reception(const std::string& brokers, const std::string& topics)
{
this->brokers_ = brokers;
this->topics_ = topics;
}
signal_reception::~signal_reception(){};
bool signal_reception::initconsume()
{
std::string brokers = this->brokers_;
std::string errstr;
std::vector<std::string> topics{ this->topics_
};
std::string group_id = "whl-consumer-group";
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
if (conf->set("group.id", group_id, errstr)) {
std::cout << errstr << std::endl;
return false;
}
conf->set("bootstrap.servers", brokers, errstr);
conf->set("max.partition.fetch.bytes", "1024000", errstr);
//conf->set("enable-auto-commit", "true", errstr);
RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
tconf->set("auto.offset.reset", "latest", errstr);
conf->set("default_topic_conf", tconf, errstr);
consumer = RdKafka::KafkaConsumer::create(conf, errstr);
if (!consumer) {
std::cout << "failed to create consumer " << errstr << std::endl;
return false;
}
#if 0 //从上一次消费结束的位置开始消费
RdKafka::ErrorCode err = m_consumer->subscribe(topics);
if (err != RdKafka::ERR_NO_ERROR) {
std::cout << RdKafka::err2str(err) << std::endl;
return -1;
}
#else //指定每个topic的每个分区开始消费的位置
//基本思路为先获取server端的状态信息,将与订阅相关的topic找出来,根据分区,创建TopicPartion;最后使用assign消费
RdKafka::Metadata *metadataMap{ nullptr };
RdKafka::ErrorCode err = consumer->metadata(true, nullptr, &metadataMap, 2000);
if (err != RdKafka::ERR_NO_ERROR) {
std::cout << RdKafka::err2str(err) << std::endl;
}
const RdKafka::Metadata::TopicMetadataVector *topicList = metadataMap->topics();
std::cout << "broker topic size: " << topicList->size() << std::endl;
RdKafka::Metadata::TopicMetadataVector subTopicMetaVec;
std::copy_if(topicList->begin(), topicList->end(), std::back_inserter(subTopicMetaVec), [&topics](const RdKafka::TopicMetadata* data) {
return std::find_if(topics.begin(), topics.end(), [data](const std::string &tname) {return data->topic() == tname; }) != topics.end();
});
std::vector<RdKafka::TopicPartition*> topicpartions;
std::for_each(subTopicMetaVec.begin(), subTopicMetaVec.end(), [&topicpartions](const RdKafka::TopicMetadata* data) {
auto parVec = data->partitions();
std::for_each(parVec->begin(), parVec->end(), [&](const RdKafka::PartitionMetadata *value) {
std::cout << data->topic() << " has partion: " << value->id() << " Leader is : " << value->leader() << std::endl;
topicpartions.push_back(RdKafka::TopicPartition::create(data->topic(), value->id(), RdKafka::Topic::OFFSET_END));
});
});
consumer->assign(topicpartions);
#endif // 0
RdKafka::ErrorCode err1 = consumer->subscribe(topics);
if (err1 != RdKafka::ERR_NO_ERROR) {
std::cout << RdKafka::err2str(err1) << std::endl;
return -1;
}
}
std::string signal_reception::consume(int timeout_ms)
{
RdKafka::Message *msg = consumer->consume(6000);
return consume_cb(*msg, nullptr); //消息一条消息
delete msg;
}
std::string signal_reception::consume_cb(RdKafka::Message &message, void *opaque)
{
switch (message.err()) {
case RdKafka::ERR__TIMED_OUT:
std::cout << "RdKafka::ERR__TIMED_OUT" << std::endl;
break;
case RdKafka::ERR_NO_ERROR:
/* Real message */
RdKafka::MessageTimestamp ts;
ts = message.timestamp();
if (ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
std::string timeprefix;
if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME) {
timeprefix = "created time";
}
else if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME) {
timeprefix = "log append time";
}
unsigned long long milli = ts.timestamp + (unsigned long long)8 * 60 * 60 * 1000;//此处转化为东八区北京时间,如果是其它时区需要按需求修改
auto mTime = std::chrono::milliseconds(milli);
auto tp = std::chrono::time_point<std::chrono::system_clock, std::chrono::milliseconds>(mTime);
auto tt = std::chrono::system_clock::to_time_t(tp);
tm timeinfo;
::gmtime_s(&timeinfo, &tt);
char s[60]{ 0 };
::sprintf(s, "%04d-%02d-%02d %02d:%02d:%02d", timeinfo.tm_year + 1900, timeinfo.tm_mon + 1, timeinfo.tm_mday, timeinfo.tm_hour, timeinfo.tm_min, timeinfo.tm_sec);
#if 0
std::stringstream ss;
std::string dateStr;
ss << timeinfo.tm_year + 1900 << "-"
<< timeinfo.tm_mon + 1 << "-"
<< timeinfo.tm_mday;
ss >> dateStr;
ss.clear();
ss << timeinfo.tm_hour << ":"
<< timeinfo.tm_min << ":"
<< timeinfo.tm_sec;
std::string timeStr;
ss >> timeStr;
std::string dateTimeStr;
dateTimeStr += dateStr;
dateTimeStr.push_back(' ');
dateTimeStr += timeStr;
#endif // 0
std::cout << "TimeStamp" << timeprefix << " " << s << std::endl;
}
return (char*)message.payload();
/*std::cout << message.topic_name() << " offset" << message.offset() << " partion " << message.partition() << " message: " << reinterpret_cast<char*>(message.payload()) << std::endl;*/
break;
case RdKafka::ERR__PARTITION_EOF:
/* Last message */
std::cout << "EOF reached for" << std::endl;
break;
case RdKafka::ERR__UNKNOWN_TOPIC:
case RdKafka::ERR__UNKNOWN_PARTITION:
std::cout << "Consume failed: " << message.errstr();
break;
default:
/* Errors */
std::cout << "Consume failed: " << message.errstr();
break;
}
}
生产者实例化
std::string bloker = "127.0.0.1:9092";
std::string receive_topic = "crowdsend";
signal_reception *sign_receive = new signal_reception(bloker, receive_topic);
sign_receive->initconsume();
例外可以自己在类里面写一些新建主题的函数等,这样比较方便操作。