Kafka服务器的搭建、CMake的编译及C 调用的实现

服务器搭建

 参考如下配置:https://www.cnblogs.com/leju/articles/12787302.html

注意Linux发送的时候需要配置监听IP,要不然发送不成功,在server.properties里面打开这个配置并配置相应的IP

CMake 第三方库

linux下编译,解压第三方库,进入目录下

./configure --cc=/opt/hisi-linux/x86-arm/aarch64-himix100-linux/bin/aarch64-himix100-linux-gcc --prefix=/home/zhubili/Demo/librdkafka-1.4.4 --disable-ssl

(如果不清楚命令 可以输入./configure --help)

然后依次执行make  在src下会生成librdkafka.so和librdkafka.so.1 

producer.c

/*
 * librdkafka - Apache Kafka C library
 *
 * Copyright (c) 2017, Magnus Edenhill
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright notice,
 *    this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright notice,
 *    this list of conditions and the following disclaimer in the documentation
 *    and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

/**
 * Simple Apache Kafka producer
 * using the Kafka driver from librdkafka
 * (https://github.com/edenhill/librdkafka)
 */

#include <stdio.h>
#include <signal.h>
#include <string.h>


/* Typical include path would be <librdkafka/rdkafka.h>, but this program
 * is builtin from within the librdkafka source tree and thus differs. */
#include "rdkafka.h"


static volatile sig_atomic_t run = 1;

/**
 * @brief Signal termination of program
 */
static void stop (int sig) {
        run = 0;
        //fclose(stdin); /* abort fgets() */
}


/**
 * @brief Message delivery report callback.
 *
 * This callback is called exactly once per message, indicating if
 * the message was succesfully delivered
 * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
 * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
 *
 * The callback is triggered from rd_kafka_poll() and executes on
 * the application's thread.
 */
static void dr_msg_cb (rd_kafka_t *rk,
                       const rd_kafka_message_t *rkmessage, void *opaque) {
        if (rkmessage->err)
                fprintf(stderr, "%% Message delivery failed: %s\n",
                        rd_kafka_err2str(rkmessage->err));
        else
                fprintf(stderr,
                        "%% Message delivered (%zd bytes, "
                        "partition %"PRId32")\n",
                        rkmessage->len, rkmessage->partition);

        /* The rkmessage is destroyed automatically by librdkafka */
}

rd_kafka_t *rk = NULL;
rd_kafka_conf_t *conf = NULL;

void init()
{
    conf = rd_kafka_conf_new();
    
}

void destory()
{
    rd_kafka_destroy(rk);
    if (rk)
    {
        rk = NULL;
    }
}

int ProducerMsg(char *brokers, char *topic, char *buf, int len)
{
    char errstr[512];
    if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
    {
        printf("bootstrap.servers failed \n");
        return -1;
    }
    rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
    if (NULL == rk)
    {
        rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
    }
    
    if (!rk)
    {
        printf("%% Failed to create new producer: %s\n", errstr);
        return -1;
    }
    signal(SIGINT, stop);
    rd_kafka_resp_err_t err;
    err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
        RD_KAFKA_V_VALUE(buf, len), RD_KAFKA_V_OPAQUE(NULL), RD_KAFKA_V_END);
    if (err)
    {
        printf("%% Failed to produce to topic %s: %s\n", topic, rd_kafka_err2str(err));
    }
    else
    {
        printf("%% success to produce to topic %d bytes \n", len);
    }
    rd_kafka_poll(rk, 0);
    printf("%% Flushing final messages..\n");
    rd_kafka_flush(rk, 10 * 1000);

    if (rd_kafka_outq_len(rk) > 0)
    {
        printf("%% %d message(s) were not delivered\n", rd_kafka_outq_len(rk));
    }
}


int main (int argc, char **argv) {
     
        char errstr[512];       /* librdkafka API error reporting buffer */
        char buf[512] = "DDDDadasda";          /* Message value temporary buffer */

        char brokers[1024] = "10.73.1.186:9092";
        char topic[63] = "MDH_PASSINF";
        int len = strlen(buf);
        init();
        int ret = ProducerMsg(brokers, topic, buf, len);
        destory();

        return 0;
}
把生成的库和头文件拷贝到相应目录,然后编译CMakeLists.txt

  #cmake最低版本需求,不加入此行会受到警告信息
        CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
        PROJECT(kafka) #项目名称
        #把当前目录(.)下所有源代码文件和头文件加入变量SRC_LIST
        INCLUDE_DIRECTORIES(./inc)
        LINK_DIRECTORIES(./lib)
        AUX_SOURCE_DIRECTORY(. SRC_LIST)
        #生成应用程序 hello (在windows下会自动生成hello.exe)
        
        ADD_EXECUTABLE(kafka ${SRC_LIST})
        target_link_libraries(kafka rdkafka)        

目录结构如下:

然后在这个目录下

cmake .

make 

make install 

./kafka 就可以了

Window下Cmake的具体实现

打开源码的相应目录,然后点configure就出现以下界面,选择需要的功能选项

然后Generate,选择VS编译器,最后打开Open Project ,运行里面的demo就可以调试生产者或者接收者

注意(消费者运行时要先启动(zookeeper.properties),然后还要启动kafka服务。)

kafka其它的配置参考如下:https://www.orchome.com/472

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值