stream方法将数据从 kafka->kafka
本文主要将数据进行处理,然后上传至kafka
1、创建stream目录并编写
编写ICustomTopology 接口
package nj.zb.stream;
import org.apache.kafka.streams.Topology;
import java.util.Properties;
/**
* @author: 03-CJH
* @date:2020/5/27
* @desc:
*/
public interface ICustomTopology {
public Topology buildCustomTopology(Properties prop);
}
编写StreamHandler类
package nj.zb.stream;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
/**
* @author: 03-CJH
* @date:2020/5/27
* @desc:
*/
public class StreamHandler {
private ICustomTopology topology;
public StreamHandler(ICustomTopology topology) {
this.topology=topology;
}
public void execute(){
Properties prop = new Properties();
prop.put(StreamsConfig.APPLICATION_ID_CONFIG,"fei");
prop.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.48.141:9092");
prop.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
prop.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,Serdes.String().getClass());
Topology topo = this.topology.buildCustomTopology(prop);
KafkaStreams streams = new KafkaStreams(topo, prop);
CountDownLatch countDownLatch = new CountDownLatch(1);
Runtime.getRuntime().addShutdownHook(new Thread(){
@Override
public void run(){
streams.close();
countDownLatch.countDown();
}
});
try {
streams.start();
countDownLatch.await();
} catch (InterruptedException e) {
e.printStackTrace();
System.exit(1);
}finally {
streams.close();
}
}
}
编写UserFriendsTopology 类继承 ICustomTopology 接口
package nj.zb.stream;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.Topology;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
/**
* @author: 03-CJH
* @date:2020/5/27
* @desc:
*/
public class UserFriendsTopology implements ICustomTopology{
@Override
public Topology buildCustomTopology(Properties prop) {
StreamsBuilder builder = new StreamsBuilder();
builder.stream("user_friends_raw")
.filter((k,v)->(!v.toString().startsWith("user") && v.toString().split(",").length==2))
.flatMap((k,v)->{
System.out.println(k+" "+v);
List<KeyValue<String,String>> keyValues = new ArrayList<>();
String[] info = v.toString().split(",");
String[] friends = info[1].split(" ");
for (String friend : friends) {
keyValues.add(new KeyValue<>(null,info[0]+","+friend));
}
return keyValues;
}).to("user_friends2");
Topology topo = builder.build();
return topo;
}
}
编写测试类StreamDriver
package nj.zb;
import nj.zb.stream.StreamHandler;
import nj.zb.stream.UserFriendsTopology;
/**
* @author: 03-CJH
* @date:2020/5/27
* @desc:
*/
public class StreamDriver {
public static void main(String[] args){
UserFriendsTopology topology = new UserFriendsTopology();
StreamHandler handler = new StreamHandler(topology);
handler.execute();
}
}
2、使用kafka-topic查看
创建topic
ps:创建topic用下面的命令:
[root@cjh1 ~]# kafka-topics.sh --create --topic user_friends2--zookeeper 192.168.48.141:2181 --partitions 1 --replication-factor 1
查看topic消息数量
[root@cjh1 kafka-logs]# kafka-run-class.sh kafka.tools.GetOffsetShell --broker-list 192.168.48.141:9092 --topic user_friends2 --time -1 --offsets 1
ps:至此,使用stream方法将数据进行kafka上传至kafka完成!
ps:望多多支持,后续文章还在持续更新中…