需求:使用StreamAPI获取test这个topic当中的数据,然后将数据全部转为大写,写入到test2这个topic当中去
第一步:创建一个topic
node01服务器使用以下命令来常见一个topic 名称为test2
cd /export/servers/kafka_2.11-1.0.0/
bin/kafka-topics.sh --create --partitions 3 --replication-factor 2 --topic test2 --zookeeper node01:2181,node02:2181,node03:2181
第二步:开发StreamAPI
public class StreamAPI {
public static void main(String[] args) {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "node01:9092");
props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
KStreamBuilder builder = new KStreamBuilder();
builder.stream("test").mapValues(line -> line.toString().toUpperCase()).to("test2");
KafkaStreams streams = new KafkaStreams(builder, props);
streams.start();
}
}
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import java.util.Properties;
public class Stream {
public static void main(String[] args) {
Properties props = new Properties();
//设置程序的唯一标识
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application");
//设置kafka集群
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "node01:9092");
//设置序列化与反序列化
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
//实例一个计算逻辑
StreamsBuilder streamsBuilder = new StreamsBuilder();
//设置计算逻辑
streamsBuilder.stream("18BD12").mapValues(line->line.toString().toUpperCase()).to("18BD12-1");
//构建Topology对象(拓扑,流程)
final Topology topology = streamsBuilder.build();
//实例 kafka流
KafkaStreams streams = new KafkaStreams(topology, props);
//启动流计算
streams.start();
}
}
第三步:生产数据
node01执行以下命令,向test这个topic当中生产数据
cd /export/servers/kafka_2.11-1.0.0
bin/kafka-console-producer.sh --broker-list node01:9092,node02:9092,node03:9092 --topic test
第四步:消费数据
node02执行一下命令消费test2这个topic当中的数据
cd /export/servers/kafka_2.11-1.0.0
bin/kafka-console-consumer.sh --from-beginning --topic test2 --zookeeper node01:2181,node02:2181,node03:2181