spark streaming + redis : 实时统计日注册率

使用spark streaming 实时统计新注册的用户流程如下:
在这里插入图片描述
代码如下:

1, 添加maven依赖

<!--hive依赖-->
<dependency>
	<groupId>mysql</groupId>
	<artifactId>mysql-connector-java</artifactId>
	<version>5.1.17</version>
</dependency>
<dependency>
	<groupId>org.apache.hive</groupId>
	<artifactId>hive-exec</artifactId>
	<version>2.1.0</version>
</dependency>

<!--spark sql 依赖-->
<dependency>
	<groupId>org.apache.spark</groupId>
	<artifactId>spark-hive_2.11</artifactId>
	<version>2.1.0</version>
</dependency>
<dependency>
	<groupId>org.apache.spark</groupId>
	<artifactId>spark-sql_2.11</artifactId>
	<version>2.1.0</version>
</dependency>

<!--spark streaming 依赖-->
<dependency>
	<groupId>org.apache.spark</groupId>
	<artifactId>spark-streaming_2.11</artifactId>
	<version>2.1.0</version>
</dependency>
<dependency>
	<groupId>org.apache.spark</groupId>
	<artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
	<version>2.1.0</version>
</dependency>

<!--redis 依赖-->
<dependency>
	<groupId>redis.clients</groupId>
	<artifactId>jedis</artifactId>
	<version>2.9.0</version>
</dependency>

2,启动spark流计算

SparkConf conf = new SparkConf() ;
conf.setAppName("kafka") ;
conf.setMaster("local[3]") ;

// 先创建SparkSession
final SparkSession spark = SparkSession.builder().config(conf).enableHiveSupport().getOrCreate() ;

//创建java streaming上下文
JavaStreamingContext ssc = new JavaStreamingContext(
new JavaSparkContext(spark.sparkContext()) , Durations.seconds(2)) ;

//kafka参数
Map<String,Object> kafkaParams = new HashMap<String, Object>();
kafkaParams.put("bootstrap.servers" , "localhost:9092") ;
kafkaParams.put("key.deserializer" , "org.apache.kafka.common.serialization.StringDeserializer") ;
kafkaParams.put("value.deserializer" , "org.apache.kafka.common.serialization.StringDeserializer") ;
kafkaParams.put("auto.offset.reset" , "latest") ;
kafkaParams.put("group.id" , "raw_logs") ;
kafkaParams.put("enable.auto.commit" ,"true") ;


//位置策略 , 控制消费者在哪个主机上启动
//消费者策略 , 控制消费哪个主题,哪个分区,哪个偏移量
LocationStrategy ls = LocationStrategies.PreferConsistent() ;
List<TopicPartition> tps = new ArrayList<TopicPartition>( ) ;
tps.add(new TopicPartition("raw_log_handleTopic" , 0)) ;
ConsumerStrategy cs = ConsumerStrategies.Assign(tps , kafkaParams) ;

//kafka消息流
JavaDStream<ConsumerRecord<String,String>> ds1 = KafkaUtils.createDirectStream(ssc , ls ,cs) ;

//3, 过滤原始日志,提取startUp表数据
//4, 在表中查询数据,存入redis

ssc.start();
ssc.awaitTermination();

3, 过滤原始日志,提取startUp表数据

//提取到日志串#.#.#.#.
JavaDStream<Row> ds2 = ds1.map(new Function<ConsumerRecord<String,String>, Row>() {
	public Row call(ConsumerRecord<String, String> v1) throws Exception {
		String topic = v1.topic() ;
		int par = v1.partition() ;
		long offset = v1.offset() ;
		String value = v1.value();

              String mesg="topic= "+topic + ", partition= "+par + ", offset= "+offset + ", value= "+value;
              System.out.println("mesg===> " +mesg);

              String[] arr = value.split("#");
              return RowFactory.create(
				Float.parseFloat(arr[0]),
				arr[1],
				arr[2],
				Long.parseLong(arr[3]),
				Integer.parseInt(arr[4]),
				arr[5]) ;
	}
}) ;
ds2.print();

ds2.foreachRDD(new VoidFunction<JavaRDD<Row>>() {
public void call(JavaRDD<Row> rdd) throws Exception {
SparkSession spark = SparkSession.builder()
						 .config(rdd.context().getConf())
						 .enableHiveSupport()
						 .getOrCreate();

StructField[] fields = new StructField[6];
fields[0] = new StructField("servertimems", DataTypes.FloatType, false, Metadata.empty());
fields[1] = new StructField("servertimestr", DataTypes.StringType, false, Metadata.empty());
fields[2] = new StructField("clientip", DataTypes.StringType, false, Metadata.empty());
fields[3] = new StructField("clienttimems", DataTypes.LongType, false, Metadata.empty());
fields[4] = new StructField("status", DataTypes.IntegerType, false, Metadata.empty());
fields[5] = new StructField("log", DataTypes.StringType, false, Metadata.empty());
StructType type = new StructType(fields);

//过滤无效数据
Dataset<Row> df1 = spark.createDataFrame(rdd, type);
df1.createOrReplaceTempView("_temp");
Dataset<Row> df2 = spark.sql("select forkstartuplogs(servertimestr , clienttimems , clientip , log) from _temp");
df2.createOrReplaceTempView("_temp2");

4, 在表中查询数据,存入redis

String aggSql = "select concat(appid,'#',appversion,'#',brand,'#',appplatform,'#',devicestyle,'#',ostype,'#',deviceid) key," +
					"min(createdatms) mn," +
					"max(createdatms) mx  from _temp2 group by " +
					"concat(appid,'#',appversion,'#',brand,'#',appplatform,'#',devicestyle,'#',ostype,'#',deviceid)" ;
//在sql语句中聚合rdd内的最值
spark.sql(aggSql).foreachPartition(new ForeachPartitionFunction<Row>() {
public void call(Iterator<Row> t) throws Exception {
	//创建redis实例
	Jedis redis = new Jedis("s101", 6379);
	redis.select(1);

	while(t.hasNext()){
		Row row = t.next() ;
		String key = row.getAs("key") ;
		long mn = row.getAs("mn") ;
		long mx = row.getAs("mx") ;

		String oldvalue = redis.get(key);
		if (oldvalue == null) {
			redis.set(key, mn + "," + mx);
		} else {
			String[] arr = oldvalue.split(",");
			long oldMin = Long.parseLong(arr[0]);
			long oldMax = Long.parseLong(arr[1]);
			redis.set(key, Math.min(mn, oldMin) + "," + Math.max(mx, oldMax));
		}
	}
	redis.close();
}
});
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

根哥的博客

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值