最简单的kafka demo案例

1、kafka版本:kafka_2.11-1.0.1
2、配置pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>kafka-demo</groupId>
    <artifactId>kafka-demo</artifactId>
    <version>1.0-SNAPSHOT</version>

    <dependencies>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.11</artifactId>
            <version>1.0.1</version>
            <!--<exclusions>
                <exclusion>
                    <artifactId>jmxtools</artifactId>
                    <groupId>com.sun.jdmk</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>jmxri</artifactId>
                    <groupId>com.sun.jmx</groupId>
                </exclusion>
                <exclusion>
                    <artifactId>jms</artifactId>
                    <groupId>javax.jms</groupId>
                </exclusion>
            </exclusions>-->
        </dependency>
    </dependencies>
</project>

编写producer demo案例

package kafka;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

import java.util.Properties;

public class KafkaProducer {

    private final Producer<String, String> producer;
    public final static String TOPIC = "TEST-TOPIC";

    private KafkaProducer(){
        Properties props = new Properties();
        //此处配置的是kafka的端口
        props.put("metadata.broker.list", "192.168.18.140:9092");

        //配置value的序列化类
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        //配置key的序列化类
        props.put("key.serializer.class", "kafka.serializer.StringEncoder");

        //request.required.acks
        //0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
        //1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
        //-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
        props.put("request.required.acks","-1");

        producer = new Producer<String, String>(new ProducerConfig(props));
    }

    void produce() {
        int messageNo = 1000;
        final int COUNT = 10000;

        while (messageNo < COUNT) {
            String key = String.valueOf(messageNo);
            String data = "hello kafka message " + key;
            producer.send(new KeyedMessage<String, String>(TOPIC, key ,data));
            System.out.println(data);
            messageNo ++;
        }
    }

    public static void main( String[] args )
    {
        new KafkaProducer().produce();
    }
}

运行结果:

Connected to the target VM, address: '127.0.0.1:56726', transport: 'socket'
log4j:WARN No appenders could be found for logger (kafka.utils.VerifiableProperties).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
hello kafka message 1000
hello kafka message 1001
hello kafka message 1002
hello kafka message 1003

编写consumer demo 案例

package kafka;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

public class KafkaConsumer {

    private final ConsumerConnector consumer;

    private KafkaConsumer() {
        Properties props = new Properties();
        //zookeeper 配置
        props.put("zookeeper.connect", "192.168.18.140:2181");

        //group 代表一个消费组
        props.put("group.id", "jd-group");

        //zk连接超时
        props.put("zookeeper.session.timeout.ms", "4000");
        props.put("zookeeper.sync.time.ms", "200");
        props.put("auto.commit.interval.ms", "1000");
        props.put("auto.offset.reset", "smallest");
        //序列化类
        props.put("serializer.class", "kafka.serializer.StringEncoder");

        ConsumerConfig config = new ConsumerConfig(props);

        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);
    }

    void consume() {
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(KafkaProducer.TOPIC, new Integer(1));

        StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
        StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

        Map<String, List<KafkaStream<String, String>>> consumerMap =
                consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);
        KafkaStream<String, String> stream = consumerMap.get(KafkaProducer.TOPIC).get(0);
        ConsumerIterator<String, String> it = stream.iterator();
        while (it.hasNext()) {
            System.out.println(it.next().message());
        }
    }

    public static void main(String[] args) {
        new KafkaConsumer().consume();
    }
}

运行结果:

"C:\Program Files\Java\jdk1.8.0_101\bin\java.exe" -javaagent:D:\Installed\ideaIU-2018.1.5.win-scala\lib\idea_rt.jar=56756:D:\Installed\ideaIU-2018.1.5.win-scala\bin -Dfile.encoding=UTF-8 -classpath "C:\Program Files\Java\jdk1.8.0_101\jre\lib\charsets.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\deploy.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\access-bridge-64.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\cldrdata.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\dnsns.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\jaccess.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\jfxrt.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\localedata.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\nashorn.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\sunec.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\sunjce_provider.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\sunmscapi.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\sunpkcs11.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\ext\zipfs.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\javaws.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\jce.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\jfr.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\jfxswt.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\jsse.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\management-agent.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\plugin.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\resources.jar;C:\Program Files\Java\jdk1.8.0_101\jre\lib\rt.jar;E:\workspace\kafka\kafka-demo\target\classes;D:\maven3.3\localRepository\org\apache\kafka\kafka_2.11\1.0.1\kafka_2.11-1.0.1.jar;D:\maven3.3\localRepository\org\apache\kafka\kafka-clients\1.0.1\kafka-clients-1.0.1.jar;D:\maven3.3\localRepository\org\lz4\lz4-java\1.4\lz4-java-1.4.jar;D:\maven3.3\localRepository\org\xerial\snappy\snappy-java\1.1.4\snappy-java-1.1.4.jar;D:\maven3.3\localRepository\org\slf4j\slf4j-api\1.7.25\slf4j-api-1.7.25.jar;D:\maven3.3\localRepository\com\fasterxml\jackson\core\jackson-databind\2.9.1\jackson-databind-2.9.1.jar;D:\maven3.3\localRepository\com\fasterxml\jackson\core\jackson-annotations\2.9.0\jackson-annotations-2.9.0.jar;D:\maven3.3\localRepository\com\fasterxml\jackson\core\jackson-core\2.9.1\jackson-core-2.9.1.jar;D:\maven3.3\localRepository\net\sf\jopt-simple\jopt-simple\5.0.4\jopt-simple-5.0.4.jar;D:\maven3.3\localRepository\com\yammer\metrics\metrics-core\2.2.0\metrics-core-2.2.0.jar;D:\maven3.3\localRepository\org\scala-lang\scala-library\2.11.12\scala-library-2.11.12.jar;D:\maven3.3\localRepository\org\slf4j\slf4j-log4j12\1.7.25\slf4j-log4j12-1.7.25.jar;D:\maven3.3\localRepository\log4j\log4j\1.2.17\log4j-1.2.17.jar;D:\maven3.3\localRepository\com\101tec\zkclient\0.10\zkclient-0.10.jar;D:\maven3.3\localRepository\org\apache\zookeeper\zookeeper\3.4.10\zookeeper-3.4.10.jar" kafka.KafkaConsumer
log4j:WARN No appenders could be found for logger (kafka.utils.VerifiableProperties).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
hello kafka message 1000
hello kafka message 1002
hello kafka message 1004
hello kafka message 1006

代码来源:http://outofmemory.cn/code-snippet/33051/java-kafka-producer-consumer-example

以下是使用 Flink SQL 读取 Kafka 数据源的一个简单示例: ```java import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.descriptors.Kafka; import org.apache.flink.table.descriptors.Schema; import org.apache.flink.table.descriptors.SchemaValidator; import org.apache.flink.table.descriptors.StreamTableDescriptor; public class KafkaDemo { public static void main(String[] args) throws Exception { // 设置执行环境 StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); // 定义 Kafka 数据源 StreamTableDescriptor tableDescriptor = tEnv.connect( new Kafka() .version("universal") .topic("input-topic") .startFromLatest() .property("bootstrap.servers", "localhost:9092") .property("group.id", "flink-group") ).withFormat(new Json().failOnMissingField(false).deriveSchema()).withSchema( new Schema() .field("id", DataTypes.STRING()) .field("name", DataTypes.STRING()) ); // 创建表 tEnv.createTable("input_table", tableDescriptor); // 执行查询 Table result = tEnv.sqlQuery("SELECT * FROM input_table WHERE id = '1'"); result.printSchema(); // 执行任务 env.execute("KafkaDemo"); } } ``` 在上述代码中,我们首先创建了一个 `StreamExecutionEnvironment` 实例和一个 `StreamTableEnvironment` 实例,分别用于定义 Flink 执行环境和 Flink SQL 执行环境。 然后,我们定义了一个 Kafka 数据源,其中包括 Kafka 的连接信息、topic 名称和消费者组 ID。我们还指定了数据源的格式为 JSON,并定义了数据源中包含的字段和字段类型。 接下来,我们使用 `createTable()` 方法创建了一个名为 `input_table` 的表,并将 Kafka 数据源和数据格式和字段信息传递给 `withFormat()` 和 `withSchema()` 方法。 最后,我们使用 Flink SQL 查询语句 `SELECT * FROM input_table WHERE id = '1'` 查询 Kafka 中 id 为 1 的数据,并将查询结果打印出来。 需要注意的是,上述示例中使用的是 Flink 1.13 版本的新 API,如果使用的是旧版本的 Flink,可能需要使用不同的 API 进行操作。
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值