flume 传递数据给Kafka ,然后Spark 从Kafka 中接收数据进行处理.
本文使用netcat 工具作为flume 的输入源 , 话不多说,直接贴代码.
1、flume
配置文件配置:
a1.sources = r1
a1.sinks = k1
a1.channels = c1
a1.sources.r1.type=netcat
a1.sources.r1.bind=localhost
a1.sources.r1.port=8888
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.kafka.topic = test3
a1.sinks.k1.kafka.bootstrap.servers = big-data-2:6667
a1.sinks.k1.kafka.flumeBatchSize = 20
a1.sinks.k1.kafka.producer.acks = 1
a1.channels.c1.type=memory
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
客户端使用 nc localhost 8888 , 就可以给flume 发送消息了。
2、Kafka
监听的端口是6667 (使用Ambari 工具,默认的端口就是这个).
创建一个topic
./kafka-topics.sh --create --zookeeper big-data-2:2181 --replication-factor 3 --partitions 3 --topic test3
然后用如下命令可以查看创建的topic
./kafka-topics.sh --list --zookeeper big-data-2:2181
3、SparkStreaming
package com.it18zhang.spark.java;
import java.util.*;
import org.apache.spark.SparkConf;
import org.apache.spark.TaskContext;
import org.apache.spark.api.java.*;
import org.apache.spark.api.java.function.*;
import org.apache.spark.streaming.Seconds;
import org.apache.spark.streaming.api.java.*;
import org.apache.spark.streaming.kafka010.*;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import scala.Tuple2;
/**
*/
public class KafkaSparkStreamingDemo {
public static void main(String[] args) throws InterruptedException {
SparkConf conf = new SparkConf();
conf.setAppName("kafkaSpark");
conf.setMaster("local[4]");
//创建Spark流应用上下文
JavaStreamingContext streamingContext = new JavaStreamingContext(conf, Seconds.apply(5));
Map<String, Object> kafkaParams = new HashMap<>();
kafkaParams.put("bootstrap.servers", "big-data-2:6667");
kafkaParams.put("key.deserializer", StringDeserializer.class);
kafkaParams.put("value.deserializer", StringDeserializer.class);
kafkaParams.put("group.id", "g6");
kafkaParams.put("auto.offset.reset", "latest");
kafkaParams.put("enable.auto.commit", false);
Collection<String> topics = Arrays.asList("test3");
final JavaInputDStream<ConsumerRecord<String, String>> stream =
KafkaUtils.createDirectStream(
streamingContext,
LocationStrategies.PreferConsistent(),
ConsumerStrategies.<String, String>Subscribe(topics, kafkaParams)
);
//压扁
JavaDStream<String> wordsDS = stream.flatMap(new FlatMapFunction<ConsumerRecord<String,String>, String>() {
public Iterator<String> call(ConsumerRecord<String, String> r) throws Exception {
String value = r.value();
List<String> list = new ArrayList<String>();
String[] arr = value.split(" ");
for (String s : arr) {
list.add(s);
}
return list.iterator();
}
});
//映射成元组
JavaPairDStream<String, Integer> pairDS = wordsDS.mapToPair(new PairFunction<String, String, Integer>() {
public Tuple2<String, Integer> call(String s) throws Exception {
return new Tuple2<String, Integer>(s, 1);
}
});
//聚合
JavaPairDStream<String, Integer> countDS = pairDS.reduceByKey(new Function2<Integer, Integer, Integer>() {
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
//打印
countDS.print();
streamingContext.start();
streamingContext.awaitTermination();
}
}
附录:
flume 利用 spooldir 监控目录 ,传递数据到Kafka 和hdfs
a1.sources = r1
a1.sinks = k1 k2
a1.channels = c1
# properties of r1
a1.sources.r1.type = spooldir
a1.sources.r1.spoolDir =/data/flow_data
a1.sources.r1.inputCharset=utf-8
# properties of c1
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# properties of k1
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = hdfs://mycluster/flume/events/%y-%m-%d/%H/%M/%S
a1.sinks.k1.hdfs.filePrefix = events-
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
a1.sinks.k1.hdfs.roundUnit = minute
a1.sinks.k1.hdfs.useLocalTimeStamp=true
a1.sinks.k1.hdfs.rollSize=0
a1.sinks.k1.hdfs.rollCount=0
#a1.sinks.k1.hdfs.rollInterval=30
# a1-Sink Kafka
a1.sinks.k2.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k2.topic = DBFile
a1.sinks.k2.brokerList = big-data-2:6667,big-data-3:6667,big-data-4:6667
a1.sinks.k2.requiredAcks = 1
a1.sinks.k2.batchSize = 20
# set channel for sources, sinks
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c1