Kafka的 API 编程

目录

创建⼯程

java版本

kafka生产者的api操作

kafka消费者的api操作

工具类

Scala版本

工具类

生产者和消费着


创建⼯程

创建⼯程略,导⼊ Pom 依赖
<!-- 下⾯的依赖,包含了上⾯的kafka-clients,所以只需要引⼊下⾯即可 -->
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka_2.11</artifactId>
    <version>1.1.1</version>
</dependency>

java版本

kafka生产者的api操作

package org.aurora.kafka_01.day01;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
public class KafkaProducerDemo {
    public static void main(String[] args) {
        Properties props = new Properties();
        props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop01:9092");
          props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
        props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");

        KafkaProducer producer = new KafkaProducer<String, String>(props);
        ProducerRecord record = new ProducerRecord("test818", "hello kafka 100");

        producer.send(record);
        producer.flush();

    }
}

或者把配置写成配置文件,放在资源文件夹resources目录下

 producer.properties 文件如下

bootstrap.servers=hadoop01:9092  <!--自己的 主机ip:端口-->
compression.type=none
linger.ms=5000
batch.size=1024
buffer.memory=10240
key.serializer=org.apache.kafka.common.serialization.IntegerSerializer
value.serializer=org.apache.kafka.common.serialization.StringSerializer

java代码如下

public class KafkaProducerDemo2 {
    public static void main(String[] args) throws IOException, InterruptedException {
        Properties props = new Properties();
        props.load(KafkaProducerDemo2.class.getClassLoader().getResourceAsStream("producer.properties"));
        Producer<Integer, String> producer = new KafkaProducer<Integer, String> (props);
        ProducerRecord<Integer, String> record = new ProducerRecord("spark", "11111");
        producer.send(record);

        producer.close();
    }
}

kafka消费者的api操作

consumer.properties 文件如下

bootstrap.servers=hadoop01:9092 <!--直接 主机ip:端口-->

# consumer group id
group.id=g1

auto.offset.reset=earliest

key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
value.deserializer=org.apache.kafka.common.serialization.StringDeserializer

java代码

package org.aurora.kafka_01.day01;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Collections;
import java.util.Properties;
public class KafkaConsumerDemo {
    public static void main(String[] args) {

        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop01:9092");
        props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"g2");
        props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");//默认latest,指定的组是未知的组earliest
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");//提交偏移量

        KafkaConsumer consumer = new KafkaConsumer<String,String>(props);

        consumer.subscribe(Collections.singletonList("test818"));

        while (true){
            ConsumerRecords<String,String>  consumerRecords = consumer.poll(1000);
            for (ConsumerRecord<String,String> record : consumerRecords) {
                System.out.println(String.format("key=%s value=%s topic=%s partition=%d offset=%d timestamp=%d",record.key(),record.value(),record.topic(),record.partition(),record.offset(),record.timestamp()));
            }
            consumer.commitAsync();
        }

    }
}

我们可以自己写给个工具类,把获取生成和消费的创建读取配置文件等等重复性的代码写个工具类封装好,下次要使用就直接调方法就可以了

工具类

如下

package org.aurora.kafka_01;

import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;

import java.io.IOException;
import java.util.Properties;

public class KafkaUtil {
    private static final String PATH_PRODUCER_PROPERTIES = "producer.properties"; //生成配置文件路径
    private static final String PATH_CONSUMER_PROPERTIES = "consumer.properties"; //消费配置文件路径

    //获取生产者对象
    public static <K,V> KafkaProducer<K, V> getProducer(){
        return new KafkaProducer<K,V>(loadProperties(PATH_PRODUCER_PROPERTIES));
    }
    //获取消费者对象
    public static<K,V> KafkaConsumer<K, V> getConsumer(){
        return new KafkaConsumer<K,V>(loadProperties(PATH_CONSUMER_PROPERTIES));
    }
    /**
     * 读取配置文件
     * @param path 文件路径 
     * @return Properties对象
     */
    public static Properties loadProperties(String path)  {
        Properties properties = new Properties();
        try {
            properties.load(KafkaUtil.class.getClassLoader().getResourceAsStream(path));
        } catch (IOException e) {
            e.printStackTrace();
            System.out.println("文件加载错误");
        }
        return properties;
    }
}

这样我们生产一个消息 直接调用方法生产对象  类如修改上面的生产

public class KafkaProducerDemo2 {
    public static void main(String[] args) throws IOException, InterruptedException {
       
        KafkaProducer<Integer, String> producer = KafkaUtil.getProducer(); //生产这对象
        ProducerRecord<Integer, String> record = new ProducerRecord("spark", "11111"); //消息
        producer.send(record); //发送
        producer.close();

    }
}

是不是简单许多,消费者同理

Scala版本

工具类

package org.aurora.kafka_01

import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.clients.producer.KafkaProducer
import scala.collection.JavaConverters._
import java.util.Properties

object KafkaHelper {
    private val PATH_PRODUCER_PROPERTIES = "producer.properties"
    private val PATH_CONSUMER_PROPERTIES = "consumer.properties"

    // 获取一个生产者对象,使用默认的配置文件中的配置
    def getProducer: KafkaProducer[String, String] = new KafkaProducer[String, String](loadProperties(PATH_PRODUCER_PROPERTIES))
    // 获取一个消费者对象,使用默认的配置文件中的配置
    def getConsumer: KafkaConsumer[String, String] = new KafkaConsumer[String,String](loadProperties(PATH_CONSUMER_PROPERTIES))


    // 通过其他的配置,实例化一个生产者对象
    def getProducer(config:Map[String,String]):KafkaProducer[String,String]={
        val properties: Properties = loadProperties(PATH_PRODUCER_PROPERTIES)
        // 加载其他的配置
        // 需求: 把config中的键值对,添加到properties
        properties.putAll(config.asJava)
        new KafkaProducer[String,String](properties)
    }

    // 通过其他的配置,实例化一个消费者对象
    def getConsumer(config:Map[String,String]):KafkaConsumer[String,String]={
        val properties: Properties = loadProperties(PATH_CONSUMER_PROPERTIES)
        // 加载其他的配置
        // 需求: 把config中的键值对,添加到properties
        properties.putAll(config.asJava)
        new KafkaConsumer[String,String](properties)
    }


    /**
     * 读取指定路径下的配置文件,返回Properties对象
     * @param path 指定的配置文件的路径
     * @return Properties对象
     */
    private def loadProperties(path:String):Properties ={
        // 1. 实例化一个Properties对象
        val properties = new Properties()
        // 2. 加载指定的路径下的文件
        properties.load(KafkaHelper.getClass.getClassLoader.getResourceAsStream(path))
        properties
    }
}

生产者和消费着

我直接写一个类里面 @Test 里面

package org.aurora.kafka_01
import org.apache.kafka.clients.consumer.{ConsumerRecords, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.junit.Test
import java.util.Collections


object KafkaHelperTest {
    //生产者
    @Test def producerTest: Unit={
        // 1. 获取生产者
        val producer:KafkaProducer[String,String] = KafkaHelper.getProducer
        // 2. 生产数据
        val record = new ProducerRecord[String, String]("test818", "hello spark")
        producer.send(record)
    }

    //消费者
    @Test def consumer: Unit={
        //1.获取消费者
        val consuner:KafkaConsumer[String,String] = KafkaHelper.getConsumer
        //2.订阅主题
        consuner.subscribe(Collections.singleton("test818"))
        //3.循环读取
        while (true){
            val records : ConsumerRecords[String, String]= consuner.poll(1000)
            val iterator = records.iterator()
            while (iterator.hasNext){
                val record = iterator.next()
                println(s"topic = ${record.topic()}, partition = ${record.partition()}, offset = ${record.offset()}, key = ${record.key()}, value = ${record.value()}")
            }
        }
    }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值