VS2013中使用kafka

编译librdkafka,下载地址:https://github.com/edenhill/librdkafka,不得不感激强大的GitHub,上面几乎你要的开源代码都有。另外需要下载zlib,openssl,下载地址:https://github.com/ShiftMediaProject/opensslhttps://github.com/ShiftMediaProject/zlib,三个工程解压到同一个目录下。

编译librdkafka可能会遇到的问题:
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: isdigit already defined in LIBCMT.lib(_ctype.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: isalnum already defined in LIBCMT.lib(_ctype.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: qsort already defined in LIBCMT.lib(qsort.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: memchr already defined in LIBCMT.lib(memchr.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: tolower already defined in LIBCMT.lib(tolower.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: strspn already defined in LIBCMT.lib(strspn.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: strerror_s already defined in LIBCMT.lib(strerror.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: _stat64i32 already defined in LIBCMT.lib(stati32.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: abort already defined in LIBCMT.lib(abort.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: _errno already defined in LIBCMT.lib(dosmap.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: fclose already defined in LIBCMT.lib(fclose.obj)
1>MSVCRT.lib(MSVCR120.dll) : error LNK2005: fflush already defined in LIBCMT.lib(fflush.obj)
这里只取了其中一个部分,函数重定义问题。解决办法:
这里写图片描述
把Runtime Library 改为:Multi-threaded DLL(/MD),再编译,通过。

封装produce:

#ifndef __KAFKA_PRODUCE_H
#define __KAFKA_PRODUCE_H

#include <string>
#include <iostream>
#include "kafka/rdkafkacpp.h"

class KafkaProduce :public RdKafka::EventCb, public RdKafka::DeliveryReportCb
{
public:
    KafkaProduce();
    ~KafkaProduce();
public:
    void event_cb(RdKafka::Event &event);
    void dr_cb(RdKafka::Message &message);
public:
    bool Init(const char*  topic, const char* broker);
    void ProduceMsg(std::string msg);
    void Destroy();
private:
    std::string mTopic;
    std::string mBroker;
private:
    RdKafka::Conf*  mConf;
    RdKafka::Conf*  mTConf;
    RdKafka::Producer*  mProducer;
    RdKafka::Topic *  mKTopic;
    int32_t           mPartition;
};

#endif

实现:

#include "KafkaProduce.h"
#include <iostream>

KafkaProduce::KafkaProduce()
{
    mConf = NULL;
    mTConf = NULL;
    mProducer = NULL;
    mKTopic = NULL;
}


KafkaProduce::~KafkaProduce()
{
    Destroy();
}

void KafkaProduce::event_cb(RdKafka::Event &event) 
{
    switch (event.type())
    {
    case RdKafka::Event::EVENT_ERROR:
        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
            event.str() << std::endl;
        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
            ;// run = false;
        break;

    case RdKafka::Event::EVENT_STATS:
        std::cerr << "\"STATS\": " << event.str() << std::endl;
        break;

    case RdKafka::Event::EVENT_LOG:
        fprintf(stderr, "LOG-%i-%s: %s\n",
            event.severity(), event.fac().c_str(), event.str().c_str());
        break;

    default:
        std::cerr << "EVENT " << event.type() <<
            " (" << RdKafka::err2str(event.err()) << "): " <<
            event.str() << std::endl;
        break;
    }
}

void KafkaProduce::dr_cb(RdKafka::Message &message) 
{
    std::cout << "Message delivery for (" << message.len() << " bytes): " <<
        message.errstr() << std::endl;
    if (message.key())
        std::cout << "Key: " << *(message.key()) << ";" << std::endl;
}

bool KafkaProduce::Init(const char*  topic, const char* broker)
{
    mBroker = broker;
    mTopic = topic;
    std::string errstr;
    std::string mode = "P";
    mPartition = RdKafka::Topic::PARTITION_UA;
    mConf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
    mTConf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);

    mConf->set("metadata.broker.list", mBroker, errstr);


    mConf->set("event_cb", (RdKafka::EventCb*)this, errstr);

    /* Set delivery report callback */
    mConf->set("dr_cb", (RdKafka::DeliveryReportCb*)this, errstr);
    mProducer = RdKafka::Producer::create(mConf, errstr);

    if (!mProducer)
    {
        return false;
    }
    std::cout << "% Created producer " << mProducer->name() << std::endl;

    /*
    * Create topic handle.
    */
    mKTopic = RdKafka::Topic::create(mProducer, mTopic,
        mTConf, errstr);
    if (!topic) {
        std::cerr << "Failed to create topic: " << errstr << std::endl;
        return false;
    }
    return true;
}

void KafkaProduce::ProduceMsg(std::string msg)
{   
    mProducer->poll(0);
    RdKafka::ErrorCode resp =
        mProducer->produce(mKTopic, mPartition,
        RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
        const_cast<char *>(msg.c_str()), msg.size(),
        NULL, NULL);
    if (resp != RdKafka::ERR_NO_ERROR)
        std::cerr << "% Produce failed: " <<
        RdKafka::err2str(resp) << std::endl;
    else
        std::cerr << "% Produced message (" << msg.size() << " bytes)" <<
        std::endl;

    mProducer->poll(0);
}

void KafkaProduce::Destroy()
{
    if (mProducer)
    {
        while (mProducer->outq_len() > 0)
        {
            //std::cerr << "Waiting for " << mProducer->outq_len() << std::endl;
            mProducer->poll(1000);
        }
    }
    if (mKTopic)
    {
        delete mKTopic;
        mKTopic = NULL;
    }
    if (mProducer)
    {
        delete mProducer;
        mProducer = NULL;
    }
    if (mTConf)
    {
        delete mTConf;
        mTConf = NULL;
    }
    if (mConf)
    {
        delete mConf;
        mConf = NULL;
    }
}

测试代码:

void TestProduce()
{
    KafkaProduce produce;
    const char* brokers = "127.0.0.1:9000";
    const char* topic = "TOPIC_TEST";
    if (!produce.Init(topic, brokers))
    {
        return;
    }
    /*
    * Read messages from stdin and produce to broker.
    */
    for (std::string line; std::getline(std::cin, line);)
    {
        if (line.size() == 0)
        {
            break;
        }   
        produce.ProduceMsg(line);
    }
    produce.Destroy();
}

comsume封装:

#ifndef  __KAFKA_CONSUMER_H
#define  __KAFKA_CONSUMER_H

#include <string>
#include <iostream>
#include "kafka/rdkafkacpp.h"

#ifdef _MSC_VER
#include <atltime.h>
#elif _AIX
#include <unistd.h>
#else
#include <getopt.h>
#include <unistd.h>
#endif

class KafkaConsumer :public RdKafka::RebalanceCb, public RdKafka::EventCb
{
public:
    KafkaConsumer();
    ~KafkaConsumer();
public:
    bool Init(const char*  topic, const char* broker, const char* group_id);
    void DoConsumeMsg();
    void Destroy();
    void MsgConsume(RdKafka::Message* message);
public:
    void event_cb(RdKafka::Event &event);
    void rebalance_cb(RdKafka::KafkaConsumer *consumer,
        RdKafka::ErrorCode err,
        std::vector<RdKafka::TopicPartition*> &partitions);
private:
    std::string mTopic;
    std::string mBroker;
private:
    RdKafka::KafkaConsumer* mConsumer;
};

#endif

comsumer实现:

#include "KafkaConsumer.h"


KafkaConsumer::KafkaConsumer()
{
    mConsumer = NULL;
}


KafkaConsumer::~KafkaConsumer()
{
}

bool KafkaConsumer::Init(const char*  topic, const char* broker, const char* group_id)
{  
    std::string errstr;
    std::vector<std::string> topics;
    RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
    RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
    conf->set("rebalance_cb", (RdKafka::RebalanceCb*)this, errstr);

    if (conf->set("group.id", group_id, errstr) != RdKafka::Conf::CONF_OK) {
        std::cerr << errstr << std::endl;
        return false;
    }
    topics.push_back(topic);
    conf->set("metadata.broker.list", broker, errstr);

    conf->set("event_cb", (RdKafka::EventCb*)this, errstr);
    conf->set("default_topic_conf", tconf, errstr);
    delete tconf;

    /*
    * Create consumer using accumulated global configuration.
    */
    mConsumer = RdKafka::KafkaConsumer::create(conf, errstr);
    if (!mConsumer) {
        std::cerr << "Failed to create consumer: " << errstr << std::endl;
        return false;
    }

    delete conf;

    std::cout << "% Created consumer " << mConsumer->name() << std::endl;


    /*
    * Subscribe to topics
    */
    RdKafka::ErrorCode err = mConsumer->subscribe(topics);
    if (err) {
        std::cerr << "Failed to subscribe to " << topics.size() << " topics: "
            << RdKafka::err2str(err) << std::endl;
        return false;
    }
    return true;
}

void KafkaConsumer::DoConsumeMsg()
{
    while (1) {
        RdKafka::Message *msg = mConsumer->consume(1000);
        MsgConsume(msg);
        delete msg;
    }
}

void KafkaConsumer::event_cb(RdKafka::Event &event)
{
    switch (event.type())
    {
    case RdKafka::Event::EVENT_ERROR:
        std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
            event.str() << std::endl;
        if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
            ;// run = false;
        break;

    case RdKafka::Event::EVENT_STATS:
        std::cerr << "\"STATS\": " << event.str() << std::endl;
        break;

    case RdKafka::Event::EVENT_LOG:
        fprintf(stderr, "LOG-%i-%s: %s\n",
            event.severity(), event.fac().c_str(), event.str().c_str());
        break;

    case RdKafka::Event::EVENT_THROTTLE:
        std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " <<
            event.broker_name() << " id " << (int)event.broker_id() << std::endl;
        break;

    default:
        std::cerr << "EVENT " << event.type() <<
            " (" << RdKafka::err2str(event.err()) << "): " <<
            event.str() << std::endl;
        break;
    }
}

void KafkaConsumer::rebalance_cb(RdKafka::KafkaConsumer *consumer,
    RdKafka::ErrorCode err,
    std::vector<RdKafka::TopicPartition*> &partitions)
{
    std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": ";

    //part_list_print(partitions);

    if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
        consumer->assign(partitions);
        //partition_cnt = (int)partitions.size();
    }
    else {
        consumer->unassign();
        //partition_cnt = 0;
    }
    //eof_cnt = 0;
}

void KafkaConsumer::MsgConsume(RdKafka::Message* message)
{
    switch (message->err()) {
    case RdKafka::ERR__TIMED_OUT:
        break;

    case RdKafka::ERR_NO_ERROR:
        /* Real message */
            printf("%.*s\n",
                static_cast<int>(message->len()),
                static_cast<const char *>(message->payload()));
        break;

    case RdKafka::ERR__PARTITION_EOF:
        /* Last message */
        //if (exit_eof && ++eof_cnt == partition_cnt) {
        //  std::cerr << "%% EOF reached for all " << partition_cnt <<
        //      " partition(s)" << std::endl;
        //  //run = false;
        //}
        break;

    case RdKafka::ERR__UNKNOWN_TOPIC:
    case RdKafka::ERR__UNKNOWN_PARTITION:
        std::cerr << "Consume failed: " << message->errstr() << std::endl;
        //run = false;
        break;

    default:
        /* Errors */
        std::cerr << "Consume failed: " << message->errstr() << std::endl;
        //run = false;
    }
}

void KafkaConsumer::Destroy()
{
    if (mConsumer)
    {
        delete mConsumer;
        mConsumer = NULL;
    }
}

comsumer测试:

void TestConsumer()
{
    KafkaConsumer consumer;
    const char* brokers = "127.0.0.1:9000";
    const char* topic = "TOPIC_TEST";
    if (!consumer.Init(topic, brokers,"0"))
    {
        return;
    }
    /*
    * Read messages from stdin and produce to broker.
    */
    consumer.DoConsumeMsg();
    consumer.Destroy();
}
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值