Spark Streaming入门

为了初始化Spark Streaming程序,一个StreamingContext对象必需被创建,它是Spark Streaming所有流操作的主要入口。一个StreamingContext 对象可以用SparkConf对象创建。 可以使用SparkConf对象创建JavaStreamingContext对象:

SparkConf conf = new SparkConf().setMaster("local").setAppName("testLocal");
JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(2));

JavaStreamingContext对象也可以从现有的JavaSparkContext创建:

SparkConf conf = new SparkConf().setMaster("local").setAppName("testLocal");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaStreamingContext ssc = new JavaStreamingContext(sc, Durations.seconds(2));


实验:Flume +kafka+SparkStreaming

  1. 启动kafka:./bin/kafka-server-start.sh ../config/server.properties
  2. 启动flume:./bin/flume-ng agent –conf conf –conf-file ./conf/kafka.conf -name a1 -Dflume.root.logger=DEBUG,console
  3. 启动kafka consumer:./kafka-console-consumer.sh –zookeeper vm04:2181 –topic test_m_brokers –from-beginning
  4. 在eclipse里运行程序,会在控制台显示运行结果

kafka.conf

# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = exec
a1.sources.r1.command = tail -f /root/data/flume/data-produce.log

# Describe the sink
a1.sinks.k1.type= org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.brokerList=vm04:9092
a1.sinks.k1.topic=test_m_brokers
#a1.sinks.k1.serializer.class=kafka.serializer.StringEncoder

# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1

package test;

import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import kafka.serializer.StringDecoder;
import scala.Tuple2;

public class KafkaTest {
    public static void main(String[] args) throws InterruptedException {
        String brokers = "192.168.122.250:6667";        
        SparkConf conf = new SparkConf().setAppName("kafkatest").setMaster("local[4]");
        JavaStreamingContext ssc = new JavaStreamingContext(conf,Durations.seconds(2));

        Map<String, String> kafkaParams = new HashMap<String, String>();
        //zookeeper集群的配置信息
        kafkaParams.put("bootstrap.servers",
                "vm04:9092,vm05:9092,vm06:9092");
        Set<String> topics = new HashSet<String>();
        topics.add("test_m_brokers");
        JavaPairInputDStream<String, String> lines = KafkaUtils.
                createDirectStream(ssc, String.class, String.class,
                        StringDecoder.class, StringDecoder.class, kafkaParams, topics);
        JavaDStream<String> words = lines.flatMap(new FlatMapFunction<Tuple2<String,String>, String>() {
            public Iterator<String> call(Tuple2<String, String> tuple) throws Exception {
                return Arrays.asList(tuple._2.split(",")).iterator();
            }
        });

        JavaPairDStream<String, Integer> pairs = words.mapToPair(new PairFunction<String, String, Integer>() {

            public Tuple2<String, Integer> call(String word) throws Exception {
                return new Tuple2<String, Integer>(word, 1);
            }
        });

        JavaPairDStream<String, Integer> word_count= pairs.reduceByKey(new Function2<Integer, Integer, Integer>() {

            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1+v2;
            }
        });
        //      word_count.foreachRDD(rdd -> {
//          rdd.foreach(x -> {
//              System.out.println(x);
//          });
//      });
        word_count.print();
        ssc.start();
        ssc.awaitTermination();

    }

}

一开始报了错,我使用的是Spark 2.1.0,把meteadata.broker.list换成bootstrap.servers就可以了,具体原因还不是很清楚。
这里写图片描述

结果如下:
这里写图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值