建maven-quist工程
先启动 zookeeper , kafka
在 kafka 创建 topic (users和users_raw,数据传至users,在java-api的作用下清洗,在把清洗完的数据传到users_raw)
本文用 idea 写工程
建立 Maven - quickstart 工程
添加依赖
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.0.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>2.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-streams</artifactId>
<version>2.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>1.2.0</version>
</dependency>
</dependencies>
生产者
- 连接 kafka 生产者 ,输入数据,在消费者中查看
package kafka.mypro_and_con;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
public class MyProducer {
public static void main(String[] args){
Properties prop = new Properties();
prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.101.130:9092");
prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);
// prop.put(ProducerConfig.ACKS_CONFIG,1); //默认是 1
KafkaProducer<String, String> producer = new KafkaProducer<>(prop);
//try-catch 快捷键:shift+alt+z 或 ctrl+alt+t
try {
for (int i = 10; i < 20; i++) {
ProducerRecord<String, String> producerRecord = new ProducerRecord<>("caicai", "caicai love luozi" + i);
producer.send(producerRecord);
Thread.sleep(500);
}
} catch (Exception e) {
e.printStackTrace();
}finally {
producer.close();
}
}
}
消费者
- 查看 topic 中的数据
package kafka.mypro_and_con;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
public class MyConsumer {
public static void main(String[] args){
Properties prop = new Properties();
prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.101.130:9092");
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
//配置自动提交,false则为自动提交关闭,需要手动提交
prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "5000");
//有 earliest 等三种(未完待续)
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// id 每次运行前要进行刷新
prop.put(ConsumerConfig.GROUP_ID_CONFIG,"luozi9");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
consumer.subscribe(Collections.singletonList("user_friends_raw"));
ConsumerRecords<String, String> records = null;
while (true){
boolean tag = false;
records = consumer.poll(100);
for (ConsumerRecord record : records) {
System.out.println("offset:,"+record.offset()
+ ", key:"+record.key()+", value:"+record.value());
tag = true;
}
if (tag){
consumer.commitAsync(new OffsetCommitCallback() {
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> map, Exception e) {
System.out.println("offset submit complete");
System.out.println(map.keySet().toString()+" "+map.values().toString());
}
});
}
}
}
}
数据清洗
从kafka中的一个 topic 拉取数据,清洗以后放入另一个 topic,本文介绍了两个数据清洗案例
- 对 user_friends.csv 进行清洗
package kafka.stream;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
public class User_friendsStream {
public static void main(String[] args) {
Properties prop = new Properties();
// id每次运行要刷新
prop.put(StreamsConfig.APPLICATION_ID_CONFIG,"hgg2");
prop.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.101.130:9092");
prop.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
prop.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
StreamsBuilder builder = new StreamsBuilder();
builder.stream("user_friends_raw1")
.filter((k,v)->(!v.toString().startsWith("user") && v.toString().split(",").length==2))
.flatMap((k,v)->{
System.out.println(k+" "+v);
List<KeyValue<String,String>> keyValues = new ArrayList<>();
String[] info = v.toString().split(",");
String[] friends = info[1].split(" ");
for (String friend : friends) {
keyValues.add(new KeyValue<>(null,info[0]+","+friend));
}
return keyValues;
}).to("user_friends1");
Topology topo = builder.build();
KafkaStreams streams = new KafkaStreams(topo, prop);
CountDownLatch countDownLatch = new CountDownLatch(1);
Runtime.getRuntime().addShutdownHook(new Thread(){
@Override
public void run(){
streams.close();
countDownLatch.countDown();
}
});
try {
streams.start();
countDownLatch.await();
} catch (InterruptedException e) {
e.printStackTrace();
System.exit(1);
}finally {
streams.close();
}
}
}
对 event_attendees.csv 进行清洗
package kafka.stream;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.*;
import org.apache.kafka.streams.kstream.KStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
public class Event_attendeesStream {
public static void main(String[] args) {
Properties prop = new Properties();
prop.put(StreamsConfig.APPLICATION_ID_CONFIG, "gea6");
prop.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.101.130:9092");
prop.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
prop.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
StreamsBuilder builder = new StreamsBuilder();
KStream<Object, Object> ea = builder.stream("event_attendees_raw1")
.filter((k, v) -> (!v.toString().startsWith("event") && v.toString().split(",").length > 1));
//yes
KStream<String, String> yesStream = ea.flatMap((k, v) -> {
System.out.println(k + " " + v);
List<KeyValue<String, String>> lst = new ArrayList<>();
String[] splits = v.toString().split(",");
if (splits.length >= 2 && !splits[1].trim().equals("")) {
String[] yes = splits[1].split(" ");
for (String y : yes) {
lst.add(new KeyValue<>(null, splits[0] + "," + y + ",yes"));
}
}
return lst;
});
//maybe
KStream<String, String> maybeStream = ea.flatMap((k, v) -> {
System.out.println(k + " " + v);
List<KeyValue<String, String>> lst = new ArrayList<>();
String[] splits = v.toString().split(",");
if (splits.length >= 3 && !splits[2].trim().equals("")) {
String[] maybe = splits[2].split(" ");
for (String m : maybe) {
lst.add(new KeyValue<>(null, splits[0] + "," + m + ",maybe"));
}
}
return lst;
});
//invited
KStream<String, String> invitedStream = ea.flatMap((k, v) -> {
System.out.println(k + " " + v);
List<KeyValue<String, String>> lst = new ArrayList<>();
String[] splits = v.toString().split(",");
if (splits.length >= 4 && !splits[3].trim().equals("")) {
String[] invited = splits[3].split(" ");
for (String inv : invited) {
lst.add(new KeyValue<>(null, splits[0] + "," + inv + ",invited"));
}
}
return lst;
});
//no
KStream<String, String> noStream = ea.flatMap((k, v) -> {
System.out.println(k + " " + v);
List<KeyValue<String, String>> lst = new ArrayList<>();
String[] splits = v.toString().split(",");
if (splits.length >= 5 && !splits[4].trim().equals("")) {
String[] no = splits[4].split(" ");
for (String n : no) {
lst.add(new KeyValue<>(null, splits[0] + "," + n + ",no"));
}
}
return lst;
});
KStream<String, String> res = yesStream.merge(invitedStream).merge(maybeStream).merge(noStream);
res.to("event_attendees1");
Topology topo = builder.build();
KafkaStreams streams = new KafkaStreams(topo, prop);
CountDownLatch countDownLatch = new CountDownLatch(1);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run(){
streams.close();
countDownLatch.countDown();
}
});
try {
streams.start();
countDownLatch.await();
} catch (InterruptedException e) {
System.exit(1);
}
System.exit(0);
}
}