kafka-Streaming即将一个topic的数据经过业务处理后传输到另一个topic中
一、无业务流程
- 创建topic
kafka-topics.sh --zookeeper 192.168.184.40:2181 --create --topic mystreamin --partitions 3 --replication-factor 1
kafka-topics.sh --zookeeper 192.168.184.40:2181 --create --topic mystreamout --partitions 3 --replication-factor 1cala
- kafkaStreaming代码除业务部分格式固定
package kafka_stream;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import scala.collection.immutable.Stream;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
/**
* @Author Cai
* @date 2020/12/15
* @Des
*/
public class MyStream {
public static void main(String[] args) {
Properties prop = new Properties();
prop.put(StreamsConfig.APPLICATION_ID_CONFIG,"mystream");
prop.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.184.40:9092");
prop.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
prop.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,Serdes.String().getClass());
//创建流构造器
StreamsBuilder builder = new StreamsBuilder();
//构建好builder 将topic的数据写入另一个topic中,无业务流程
builder.stream("mystreamin").to("mystreamout");
Topology topo = builder.build();
final KafkaStreams streams = new KafkaStreams(topo, prop);
final CountDownLatch latch = new CountDownLatch(1);
Runtime.getRuntime().addShutdownHook(new Thread("stream"){
@Override
public void run() {
streams.close();
latch.countDown();
}
});
streams.start();
try {
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
System.exit(0);
}
}
- producer
kafka-console-producer.sh --topic mystreamin --broker-list 127.0.0.1:9092
- consumer
kafka-console-consumer