package com.kafka.stream;
import com.sun.org.apache.bcel.internal.generic.RETURN;
import com.sun.scenario.effect.Merge;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serde;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.utils.Bytes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.kstream.*;
import org.apache.kafka.streams.state.KeyValueStore;
import scala.collection.immutable.Vector1;
import scala.math.Equiv;
import sun.awt.image.IntegerComponentRaster;
import sun.java2d.pipe.SpanIterator;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
public class KStream1 {
public static void main(String[] args) {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-temperature");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.232.145:9092");
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
StreamsBuilder builder = new StreamsBuilder();
try {
KStream<String, String> source = builder.stream("t1");
// source.foreach((k,v)->{
// System.out.println(v);
//
// System.out.println(" *************** ");
// });
// source.filter((k,v)->Integer.parseInt(v)>567).foreach((k,v)-> System.out.println(v));
// filter 过滤 参数函数描述:(k,v)->boolean;
// selectKey 操作key 参数描述符 (k,v) -> R;
// KTable<String, Long> count = source.filter((k, v) -> Integer.parseInt(v) > 678).selectKey((k, v) -> v)
// .groupByKey().count();
//
// count.toStream().foreach((k,v)->{
// System.out.println("k:" + k);
// System.out.println("v:" + v);
// });
// map 可以操作k v 函数描述符 (k,v)->R ;但R必须是KeyValue类型的
// source.map((k,v)->{
// return new KeyValue<>(k,v+1);
// }).foreach((k,v)->{
// System.out.println(v);
// });
//mapValue 只操作value的一种rdd 函数描述符 (V)->R
// source.mapValues((v)->v+": v").foreach((k,v)->{
// System.out.println(v);
// });
//flatMap 将list中的数据进行操作不并形成新的list 函数描述 (k,v) ->R R是一个可迭代的集合 每个元素时keyvalue类型
// source.flatMap((k,v)->{
// List<KeyValue<String,String>> keyValues = new ArrayList<>();
// List<String> list = Arrays.asList(v.split("5"));
// for (String v1 : list) {
// keyValues.add(new KeyValue<>(k,v1));
// }
// return keyValues;
// }).foreach((KTable,v)->{
// System.out.println(v +" : value");
// });
//flatMapValues 函数描述符 (k,v) ->R R为可迭代就好
// source.flatMapValues((k,v)->{
// return Arrays.asList(v.split("1"));
// }).foreach((k,v)-> System.out.println(v));
//打印
// source.print(Printed.toSysOut());
//遍历 函数描述符 (k,v)->void
// source.foreach((k,v)->{
// System.out.println(v);
// });
//查看数据 一般用作debug 函数描述符 (k,v)->void
// source.peek((k,v)->{
// System.out.println(v);
// }).print(Printed.toSysOut());
//branch 按条件分割流
// KStream<String, String>[] branch = source.branch((k, v) -> {
// return v.startsWith("1");
// },(k,v)->{
// return v.startsWith("2");
// });
//
// branch[0].print(Printed.toSysOut());
// branch[1].print(Printed.toSysOut());
//Merge 合并连个流
// KStream<String, String> merge = branch[0].merge(branch[1]);
// source.groupBy((k, v) -> {
// return v;
// }).count().toStream().print(Printed.toSysOut());
// source.groupBy((k,v)->v).aggregate(()-> 0L,
// (k,v,agg) -> 1L+agg,
// Materialized.<String,Long, KeyValueStore<Bytes,byte[]>>as("c152")
// .withKeySerde(Serdes.String())
// .withValueSerde(Serdes.Long())).toStream().print(Printed.toSysOut());
// source.peek((k,v)-> System.out.println(v));
KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props);
kafkaStreams.start();
}catch (Exception e){
e.printStackTrace();
}
}
}
KStream API
最新推荐文章于 2022-02-10 22:03:05 发布