基于Spark-Streaming滑动窗口实现——实时排名与统计
1、主流程
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("StockerRealRank"); //.setMaster("local[5]");
val sc = new SparkContext(conf);
val ssc = new StreamingContext(sc, Seconds(5));
//缓存2天的数据
ssc.remember(Minutes(60 * 48));
val sqlContext = new HiveContext(sc);
Logger.getLogger("org.apache.spark").setLevel(Level.WARN);
Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.ERROR);
//1.注册UDF
val udf = UDFUtils();
udf.registerUdf(sqlContext);
//2.kafka数据处理
val kafkaService = KakfaService();
val urlClickLogPairsDStream = kafkaService.kafkaDStreamForStocker(ssc);
//3.缓存hive中的数据
val cacheUtils = CacheUtils();
cacheUtils.cacheStockInfoData(sqlContext);
//4.缓存窗口函数数据处理
val urlClickCountsDStream = urlClickLogPairsDStream.reduceByKeyAndWindow(
(v1: Int, v2: Int) => {
v1 + v2
},
Minutes(60 * 2),
Seconds(25));
//第二次消费kafka数据
val urlClickCountsDStreamByDay = urlClickLogPairsDStream.reduceByKeyAndWindow(
(v1: Int, v2: Int) => {
v1 + v2
},
Minutes(60 * 48),
Seconds(35));
//5.处理业务逻辑
urlClickCnt(urlClickCountsDStream, sqlContext);
urlClickCntByDay(urlClickCountsDStreamByDay, sqlContext);
//6.启动streaming任务
ssc.start();
ssc.awaitTermination();
}
2、注册UDF,采用scala的伴生对象实现
import org.apache.spark.sql.hive.HiveContext
import java.util.regex.Pattern
/**
* @author Administrator
*/
class UDFUtils {
def registerUdf(sqlContext: HiveContext) {
sqlContext.udf.register("strLen", (str: String) => str.length())
sqlContext.udf.register("concat", (str1: String, str2: String, str3: String) => str1 + str2 + str3)
sqlContext.udf.register("concat4", (str1: String, str2: String, str3: String, str4: String) => str1 + str2 + str3 + str4)
sqlContext.udf.register("regexp_extract", (str: String, pattern: String) => {
val matcher = Pattern.compile(pattern, 1).matcher(str)
var res = ""
while (matcher.find()) {
res = matcher.group()
}
res
})
sqlContext.udf.register("getHost", (url: String) => {
var strURL = "";
try {
strURL = url.toString();
if (strURL.contains("://") && (strURL.indexOf("://") < 6) && strURL.length() > (strURL.indexOf("://") + 4)) {
strURL = strURL.substring(strURL.indexOf("://") + 3);
}
if (strURL.contains("/")) {
strURL = strURL.substring(0, strURL.indexOf("/"));
}
if (strURL.contains(":")) {
strURL = strURL.substring(0, strURL.indexOf(":"));
}
} catch {
case e: Exception => println("registerUdf Exception")
}
strURL;
})
}
}
object UDFUtils {
def apply() = new UDFUtils();
}
3、kafka数据处理
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka.KafkaUtils
import kafka.serializer.StringDecoder
import java.util.regex.Pattern
/**
* @author Administrator
*/
class KakfaService {
def kafkaDStream(ssc: StreamingContext): DStream[(String, Int)] = {
val topics = Set("teststreaming");
val brokers = "bdc46.hexun.com:9092,bdc53.hexun.com:9092,bdc54.hexun.com:9092";
val kafkaParams = Map[String, String]("metadata.broker.list" -> brokers, "serializer.class" -> "kafka.serializer.StringEncoder");
// Create a direct stream
val kafkaStream = KafkaUtils.createDirectStrea