Structured Streaming demo

​
package com.unistack.tamboo.compute.process.impl;
 
import com.alibaba.fastjson.JSONArray;
import com.google.common.collect.Maps;
import com.unistack.tamboo.compute.process.StreamProcess;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
 
/**
 * @author  XXXX
 * spark sql processing streaming data
 */
public class SqlProcess implements StreamProcess{
    private static Logger LOGGER = LoggerFactory.getLogger(SqlProcess.class);
 
    private Properties outputInfo;
    private String toTopic;
 
    /**
     * {"datasources":[{"password":"welcome1","port":"3308","ip":"192.168.1.192","dbName":"test","dbType":"MYSQL","dataSourceName":"191_test","username":"root","tableName":"t1"},
     * {"password":"welcome1","port":"3308","ip":"192.168.1.191","dbName":"test","dbType":"MYSQL","dataSourceName":"191_test","username":"root","tableName":"t1"}]
           *, "sql": "select * from ....", "windowLen": "Time range, multiple of 2 seconds", "windowSlide": "Scroll interval, multiple of 2"}
     */
 
    public SqlProcess(Properties outputInfo,String toTopic){
        this.outputInfo = outputInfo;
        this.toTopic = toTopic;
    }
 
 
    @Override
    public void logic(JavaRDD<ConsumerRecord<String, String>> rdd) {
        rdd.foreachPartition(itr->{
            while(itr.hasNext()){
                String recored = itr.next().value();
 
 
 
            }
        });
    }
 
 
    public static void main(String[] args) throws InterruptedException  {
        try{
            Class.forName("com.mysql.jdbc.Driver");
        } catch (ClassNotFoundException e){
            e.printStackTrace();
        }
 
        SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount");
        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(1));
        SparkSession spark = SparkSession.builder().appName("test_kane").getOrCreate();
 
 
        Map<String,String> map = Maps.newHashMap();
//        map.put("url", "jdbc:mysql://x.x.x.x:3309/test?user=root&password=welcome1&characterEncoding=UTF8");
 
        map.put("url","jdbc:mysql://x.x.x.x:3309/test?characterEncoding=UTF8");
        map.put("user","root");
        map.put("password", "welcome1");
        map.put("dbtable", "t2");
        Dataset<Row> hiveJob = spark.read().format("jdbc").options(map).load();
        hiveJob.createOrReplaceTempView("t2");
 
        System.setProperty("java.security.auth.login.config","/Users/frank/Desktop/shell/lyh.conf");
        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put("bootstrap.servers", "x.x.x.x:9999");
        kafkaParams.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        kafkaParams.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        kafkaParams.put("group.id",String.valueOf(System.currentTimeMillis()));
        kafkaParams.put("auto.offset.reset","earliest");
        kafkaParams.put("enable.auto.commit",true);
        kafkaParams.put("sasl.mechanism","PLAIN");
        kafkaParams.put("security.protocol","SASL_PLAINTEXT");
 
 
        Collection<String> topics = Arrays.asList("xxTopic");
        JavaInputDStream<ConsumerRecord<String,String>> stream = KafkaUtils.createDirectStream(jssc,
                LocationStrategies.PreferConsistent(),
                ConsumerStrategies.<String,String>Subscribe(topics,kafkaParams));
 
        stream.flatMap(r->Arrays.asList(new String(r.value())).iterator())
              .foreachRDD((JavaRDD<String> rdd) ->{
                    if(rdd.count() > 0){
                        Dataset<Row> df = spark.read().json(spark.createDataset(rdd.rdd(),Encoders.STRING()));
                        df.createOrReplaceTempView("streamData");
                        df.cache();
 
                        try{
                            Dataset<Row>  aggregators = spark.sql("select a.*,b.* from streamData a  join  t2 b on  a.id = b.id");
                            String[] colsName = aggregators.columns();
                            Iterator<Row> itr = aggregators.toLocalIterator();
                            while(itr.hasNext()){
                                Row row = itr.next();
                                for(int i=0;i<colsName.length;i++){
                                    String cn = colsName[i];
                                    Object as = row.getAs(cn);
                                    System.out.print(cn+"="+as+",   ");
                                }
                                System.out.println();
                            }
                        }catch(Exception e){
                            System.out.println("::::::::::::::::::::::::::::::::::::::::err::::::::::::::::::::::::::::::::::::::::::::");
                            e.printStackTrace();
                        }
                    }
              });
 
        jssc.start();
        jssc.awaitTermination();
    }
}
 
​

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值