SparkStreaming读取kafka数据(3)-Receiver方式

SparkStreaming读取kafka数据(1)-2种方式介绍

SparkStreaming读取kafka数据(2)-DirectStream方式

Receiver方式和DirectStream方式和优缺点见上面2篇文章

项目依赖

<dependency>
      <groupId>org.apache.spark</groupId>
      <artifactId>spark-core_2.11</artifactId>
      <version>2.3.0</version>
    </dependency>
 
    <dependency>
      <groupId>org.apache.spark</groupId>
      <artifactId>spark-sql_2.11</artifactId>
      <version>2.3.0</version>
    </dependency>
 
    <dependency>
      <groupId>org.apache.spark</groupId>
      <artifactId>spark-streaming_2.11</artifactId>
      <version>2.3.0</version>
    </dependency>
 
    <dependency>
      <groupId>org.apache.spark</groupId>
      <artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
      <version>2.3.0</version>
    </dependency>

入门示例代码

每10秒采集kafka信息,重新进行计算,计算结果不包含上一次计算结果

package com.chy.streaming;

import com.chy.util.SparkUtil;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;

import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Pattern;

/**
* @Title: KafkaStream
* @Description: 每10秒采集kafka信息,重新进行计算,计算结果不包含上一次计算结果
kafka发送:
11
12
11
19
spark 计算:
(19,1)
(11,2)
(12,1)
* @author chy
* @date 2018/11/21 0:36
*/
public class KafkaStream {
    private static final Pattern SPACE = Pattern.compile(" ");

    public static void main(String[] args) {

        String zk = "localhost:2181";
        String groupid = "spark_streaming_group";
        String topics = "spark_topic";

        int numThreads = 1;
        Map<String, Integer> topicMap = new HashMap<>();
        String[] topicArry = topics.split(",");
        for (String topic : topicArry) {
            topicMap.put(topic, numThreads);
        }

        JavaStreamingContext jssc=SparkUtil.getJavaStreamingContext(10000);

        JavaPairReceiverInputDStream<String, String> kafkaStream =
                KafkaUtils.createStream(jssc,zk, groupid, topicMap);


        JavaDStream<String> lines = kafkaStream.map(Tuple2::_2);

        lines.print();

        JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator());

        words.print();

        JavaPairDStream<String, Integer> wordCounts = words.mapToPair(s -> new Tuple2<>(s, 1))
                .reduceByKey((i1, i2) -> i1 + i2);

        wordCounts.print();

        jssc.start();

        try {
            jssc.awaitTermination();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

    }

}

checkpoint示例代码

 * 基于Receiver方式实现会利用Kakfa的高层消费API
 * 在默认的配置下,这种方式在失败的情况下,会丢失数据,如果要保证零数据丢失,需要启用WAL(Write Ahead Logs)。
 * 它同步将接受到数据保存到分布式文件系统上比如HDFS。 所以数据在出错的情况下可以恢复出来。
 * 如果启用了WAL,接收到的数据会被持久化一份到日志中,因此需要将storage_lever设置成StorgeLevel.MEMORY_AND_DISK_SER
 * @Description: 滑动窗口,每10秒滑动计算前20秒的信息,增量计算,减去离开的加上新增的数据

package com.chy.streaming;

import com.chy.util.SparkUtil;
import org.apache.spark.api.java.function.Function0;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;

import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Pattern;

/**
 * @Title: KafkaStreamAndWindow
 * 基于Receiver方式实现会利用Kakfa的高层消费API
 * 在默认的配置下,这种方式在失败的情况下,会丢失数据,如果要保证零数据丢失,需要启用WAL(Write Ahead Logs)。
 * 它同步将接受到数据保存到分布式文件系统上比如HDFS。 所以数据在出错的情况下可以恢复出来。
 * 如果启用了WAL,接收到的数据会被持久化一份到日志中,因此需要将storage_lever设置成StorgeLevel.MEMORY_AND_DISK_SER
 * @Description: 滑动窗口,每10秒滑动计算前20秒的信息,增量计算,减去离开的加上新增的数据
 * @author chy
 * @date 2018/11/21 23:30
 */
public class KafkaStreamAndWindow_Two {

    private static final Pattern SPACE = Pattern.compile(" ");

    public static void main(String[] args) {
        String zk = "localhost:2181";
        String groupid = "spark_streaming_group";
        String topics = "spark_topic";

        int numThreads = 1;
        Map<String, Integer> topicMap = new HashMap<>();
        String[] topicArry = topics.split(",");
        for (String topic : topicArry) {
            topicMap.put(topic, numThreads);
        }

        String directory="src/main/resources/checkpoint/KafkaStreamAndWindow_Two";

        JavaStreamingContext jssc=JavaStreamingContext.getOrCreate(directory, new Function0<JavaStreamingContext>() {
            @Override
            public JavaStreamingContext call() throws Exception {

                final JavaStreamingContext jssc= SparkUtil.getJavaStreamingContext(10000,true);

                //保证元数据恢复,就是Driver端挂了之后数据仍然可以恢复
                jssc.checkpoint(directory);

                JavaPairReceiverInputDStream<String, String> kafkaStream =
                        KafkaUtils.createStream(jssc,zk, groupid, topicMap, StorageLevel.MEMORY_AND_DISK_SER_2());


                JavaDStream<String> lines = kafkaStream.map(Tuple2::_2);

                lines.print();

                JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(SPACE.split(x)).iterator());

                words.print();

                JavaPairDStream<String, Integer> wordCounts = words.mapToPair(s -> new Tuple2<>(s, 1))
                        //每隔10秒(slideInterval),统计前20秒(windowLength)
                        //slideInterval 不小于 SparkUtil 读取流的间隔时间
                        .reduceByKeyAndWindow((i1, i2) -> i1 + i2,(i1, i2) -> i1 - i2,new Duration(20000),new Duration(10000));

                wordCounts.print();

                return jssc;
            }
        });

        jssc.start();

        try {
            jssc.awaitTermination();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }


        Runtime.getRuntime().addShutdownHook(new Thread() {
            @Override
            public void run() {
                System.out.println("Shutdown hook run!");
                jssc.stop(true,true);
            }
        });
    }
}

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值