头歌 SparkStreaming--Java

第1关 QueueStream

package net.educoder;
import org.apache.spark.SparkConf;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.concurrent.LinkedBlockingQueue;
public class Step1 {
private static SparkConf conf;
static {
conf = new SparkConf().setMaster("local[*]").setAppName("Step1");
conf.set("spark.streaming.stopGracefullyOnShutdown", "true");
}
public static void main(String[] args) throws InterruptedException {
/*********begin*********/
//1.初始化JavaStreamingContext并设置处理批次的时间间隔,Durations.seconds(1) --> 1秒一个批次
JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(1));
//2.获取QueueStream流
LinkedBlockingQueue queue = QueueStream.queueStream(ssc);
JavaDStream<String> dStream = ssc.queueStream(queue);

SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
JavaDStream<String> map = dStream.map(x -> {
String[] split = x.split(",");
String ip = split[0];
String time = simpleDateFormat.format(new Date(new Long(split[1])));
String startUrl = split[2].split(" ")[1];
String targetUrl = split[3];
String statusCode = split[4];
return "Ip:" + ip + ",visitTime:" + time + ",startUrl:" + startUrl + ",targetUrl:" + targetUrl + ",statusCode:" + statusCode;
});
//4.判断rdd是否为空,如果为空,调用 ssc.stop(false, false)与sys.exit(0) 两个方法,反之将结果数据存储到mysql数据库中,调用JdbcTools.saveData(Iterat
map.foreachRDD(rdd -> {
if (rdd.isEmpty()) {
ssc.stop(false, false);
System.exit(1);
} else {
rdd.foreachPartition(partitionOfRecords -> {
JdbcTools.saveData(partitionOfRecords);
});
}
});
//5.启动SparkStreaming
ssc.start();
//6.等待计算结束
ssc.awaitTermination();
/*********end*********/
}
}

第2关 File Streams

package com.educoder;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import scala.Tuple2;
import java.io.Serializable;
import java.sql.*;
import java.util.Arrays;
import java.util.Iterator;
public class SparkStreaming {
public static void main(String[] args) throws Exception {
SparkConf conf=new SparkConf().setAppName("edu").setMaster("local");
/********** Beign **********/
//1.初始化StreamingContext,设置时间间隔为1s
JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(1));
//2.设置文件流,监控目录/root/step11_fils
JavaDStream<String> DStream = ssc.textFileStream("file:///root/step11_fils");
/* *数据格式如下:hadoop hadoop spark spark
*切割符为空格
*需求:
*累加各个批次单词出现的次数
*将结果导入Mysql
*判断MySQL表中是否存在即将要插入的单词,不存在就直接插入,存在则把先前出现的次数与本次出现的次数相加后插入
*库名用educoder,表名用step,单词字段名用word,出现次数字段用count
*/
//3.对数据进行清洗转换
JavaPairDStream<String, Integer> wordcount = DStream.flatMap(x -> Arrays.asList(x.split(" ")).iterator())
.mapToPair(x -> new Tuple2<String, Integer>(x, 1))
.reduceByKey((x, y) -> x + y);
//4.将结果导入MySQL,
wordcount.foreachRDD(rdd->{
rdd.foreachPartition(new VoidFunction<Iterator<Tuple2<String, Integer>>>() {
@Override
public void call(Iterator<Tuple2<String, Integer>> r) throws Exception {
Connection connection= myconn();
while(r.hasNext()){
Tuple2<String, Integer> record = r.next();
String querySql = "SELECT t.count FROM step t WHERE t.word = '" + record._1 + "'";
ResultSet queryResultSet = connection.createStatement().executeQuery(querySql);
Boolean hasNext = queryResultSet.next();
if (!hasNext) {
String insertSql = "insert into step(word,count) values('" + record._1 + "'," + record._2 + ")";
connection.createStatement().execute(insertSql);
} else {
Integer newWordCount = queryResultSet.getInt("count") + record._2;
String updateSql = "UPDATE step SET count = " + newWordCount + " where word = '" + record._1 + "'";
connection.createStatement().execute(updateSql);
}
}
connection.close();
}
});
});
//5.启动SparkStreaming
ssc.start();
/********** End **********/
Thread.sleep(15000);
ssc.stop();
ssc.awaitTermination();
}
/**
*获取mysql连接
*@return
*/
public static Connection myconn()throws SQLException,Exception{
Class.forName("com.mysql.jdbc.Driver");
Connection conn= DriverManager.getConnection("jdbc:mysql://localhost:3306/educoder","root","123123");
return conn;
}
}

第3关 socketTextStream

    package com;
    import java.util.Arrays;
    import java.util.List;
    import org.apache.spark.SparkConf;
    import org.apache.spark.api.java.Optional;
    import org.apache.spark.api.java.function.Function2;
    import org.apache.spark.streaming.Durations;
    import org.apache.spark.streaming.api.java.JavaDStream;
    import org.apache.spark.streaming.api.java.JavaPairDStream;
    import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
    import org.apache.spark.streaming.api.java.JavaStreamingContext;
    import scala.Tuple2;
    import java.sql.Connection;
    import java.sql.DriverManager;
    public class JSocketSpark {
        public static void main(String[] args) throws InterruptedException{
            SparkConf conf = new SparkConf().setAppName("socketSparkStreaming").setMaster("local[*]");
            conf.set("spark.streaming.stopGracefullyOnShutdown", "true");
            JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(2));
            /**********begin**********/
            //1.连接socket流 主机名:localhost 端口:5566
            JavaReceiverInputDStream<String> lines = ssc.socketTextStream("localhost", 5566);
            //2.切分压平
            JavaDStream<String> rdd1 = lines.flatMap(x -> Arrays.asList(x.split(" ")).iterator());
            //3.组装
            JavaPairDStream<String, Integer> rdd2 = rdd1.mapToPair(x -> new Tuple2<>(x, 1));
            //4.设置检查点
            ssc.checkpoint("/root/check");
            //5.每个时间窗口内得到的统计值都累加到上个时间窗口得到的值,将返回结果命名为reduced
            JavaPairDStream<String, Integer> rdd3 = rdd2.updateStateByKey(new Function2<List<Integer>, Optional<Integer>, Optional<Integer>>() {
                //对相同的Key,进行Value的累计(包括Local和Reducer级别同时Reduce)
                @Override
                public Optional<Integer> call(List<Integer> values, Optional<Integer> state)
                        throws Exception {
                        //第一个参数就是key传进来的数据,第二个参数是曾经已有的数据
                        //如果第一次,state没有,updatedValue为0,如果有,就获取
                        Integer updatedValue = 0;
                        if (state.isPresent()) {
                            updatedValue = state.get();
                        }
                        //遍历batch传进来的数据可以一直加,随着时间的流式会不断去累加相同key的value的结果。
                        for (Integer value : values) {
                            updatedValue += value;
                        }
                    return Optional.of(updatedValue);//返回更新的值
                }
            });
            //6.将结果写入MySQL
            // 语法:如果存在这个单词就更新它所对应的次数
            //      如果不存在将其添加
            rdd3.foreachRDD(rdd -> {
                rdd.foreachPartition(x -> {
                    Connection myconn = myconn();
                        while (x.hasNext()){
                            Tuple2<String, Integer> record = x.next();
                            String sql = "insert into wordcount (word,wordcount) values('" + record._1 + "',"+record._2+") on DUPLICATE key update wordcount="+record._2;
                            myconn.createStatement().execute(sql);
                        }
                        myconn.close();
                });
            });
            /********** End **********/
            ssc.start();
            ssc.awaitTermination();
        }
        public static Connection myconn()throws Exception{
            Class.forName("com.mysql.jdbc.Driver");
            Connection conn= DriverManager.getConnection("jdbc:mysql://localhost:3306/edu","root","123123");
            return conn;
        }
    }

第4关 KafkaStreaming

package net.educoder;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import java.text.SimpleDateFormat;
import java.util.*;

public class Step2 {
    private static SparkConf conf;
    static {
        conf = new SparkConf().setMaster("local[*]").setAppName("Step2");
        conf.set("spark.streaming.stopGracefullyOnShutdown", "true");
    }

    public static void main(String[] args) throws InterruptedException {
        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put("bootstrap.servers", "127.0.0.1:9092");
        kafkaParams.put("key.deserializer", StringDeserializer.class);
        kafkaParams.put("value.deserializer", StringDeserializer.class);
        kafkaParams.put("group.id", "sparkStreaming");
        kafkaParams.put("enable.auto.commit", "false");
        TopicPartition topicPartition = new TopicPartition("test", 0);
        List<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
        HashMap<TopicPartition, Long> offsets = new HashMap<>();
        offsets.put(topicPartition, 0l);
        
        /********** Begin **********/
            //1.初始化JavaStreamingContext并设置处理批次的时间间隔,Durations.seconds(1)  --> 1秒一个批次
            JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(1));
            //2.使用 KafkaUtils 对象创建流,使用 Assign 订阅主题(Topic),上面已经为你定义好了 Topic列表:topicPartitions,kafka参数:kafkaParams,偏移量:offsets
            JavaInputDStream<ConsumerRecord<String, String>> javaInputDStream = KafkaUtils.createDirectStream(
                    ssc,
                    LocationStrategies.PreferConsistent(),
                    ConsumerStrategies.Assign(topicPartitions, kafkaParams, offsets));
            JavaDStream<String> dStream = javaInputDStream.map(x -> x.value());
            /**
             *
             * 数据格式如下:
             *      100.143.124.29,1509116285000,'GET www/1 HTTP/1.0',https://www.baidu.com/s?wd=反叛的鲁鲁修,404
             * 数据从左往右分别代表:用户IP、访问时间戳、起始URL及相关信息(访问方式,起始URL,http版本)、目标URL、状态码
             *
             *
             * 原始数据的切割符为逗号,(英文逗号)
             *
             * 需求:
             *      1.将时间戳转换成规定时间(格式为:yyyy-MM-dd HH:mm:ss )
             *      2.提取数据中的起始URL(切割符为空格)
             *      3.拼接结果数据,格式如下:
             * Ip:124.132.29.10,visitTime:2019-04-22 11:08:33,startUrl:www/2,targetUrl:https://search.yahoo.com/search?p=反叛的鲁鲁修,statusCode:200
             *      4.判断rdd是否为空,如果为空,调用  ssc.stop(false, false)与sys.exit(0) 两个方法,反之将结果数据存储到mysql数据库中,调用JdbcTools.saveData2(Iterator[String])即可
             */
            //3.获取kafka流中的数据,进行清洗、转换(按照上面的需求)
            SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
            JavaDStream<String> map = dStream.map(x -> {
                String[] split = x.split(",");
                String ip = split[0];
                String time = simpleDateFormat.format(new Date(new Long(split[1])));
                String startUrl = split[2].split(" ")[1];
                String targetUrl = split[3];
                String statusCode = split[4];
                return "Ip:" + ip + ",visitTime:" + time + ",startUrl:" + startUrl + ",targetUrl:" + targetUrl + ",statusCode:" + statusCode;
            });
            //4.判断rdd是否为空,如果为空,调用  ssc.stop(false, false)与sys.exit(0) 两个方法,反之将结果数据存储到mysql数据库中,调用JdbcTools.saveData2(Iterator[String])即可
            map.foreachRDD(rdd -> {
                if (rdd.isEmpty()) {
                    ssc.stop(false, false);
                    System.exit(0);
                } else {
                    rdd.foreachPartition(partitionOfRecords -> {
                        JdbcTools.saveData2(partitionOfRecords);
                    });
                }
            });
            //5.启动SparkStreaming
            ssc.start();
            //6.等待计算结束
            ssc.awaitTermination();
            /********** End **********/

    }
}

  • 6
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值