java+web+pv+uv+统计_网站日志实时分析之Flink处理实时热门和PVUV统计

package com.ongbo.hotAnalysisimport java.sql.Timestampimport java.util.Propertiesimport org.apache.flink.api.common.functions.AggregateFunctionimport org.apache.flink.api.common.serialization.SimpleStringSchemaimport org.apache.flink.api.common.state.{ListState, ListStateDescriptor}import org.apache.flink.api.java.tuple.Tupleimport org.apache.flink.configuration.Configurationimport org.apache.flink.streaming.api.TimeCharacteristicimport org.apache.flink.streaming.api.functions.KeyedProcessFunctionimport org.apache.flink.streaming.api.scala._import org.apache.flink.streaming.api.scala.function.WindowFunctionimport org.apache.flink.streaming.api.windowing.time.Timeimport org.apache.flink.streaming.api.windowing.windows.TimeWindowimport org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerimport org.apache.flink.util.Collectorimport scala.collection.mutable.ListBuffer/**定义输入数据的样例类*/case class UserBehavior(userId: Long, itemId: Long, cateGoryId: Int,behavior: String, timestamp: Long)//定义窗口聚合结果样例类case class ItemViewCount(itemId: Long, windowEnd: Long, count: Long)object HotItems {def main(args: Array[String]): Unit = {//1:创建执行环境val env = StreamExecutionEnvironment.getExecutionEnvironmentenv.setParallelism(1)//设置为事件事件env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)//2:读取数据/*kafka源*/val properties = new Properties()properties.setProperty("bootstrap.servers","114.116.219.197:5008,114.116.220.98:5008,114.116.199.154:5008")properties.setProperty("group.id","web-consumer-group")properties.setProperty("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer")properties.setProperty("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer")properties.setProperty("auto.offset.reset","latest")val dataStream = env.addSource(new FlinkKafkaConsumer[String]("weblog", new SimpleStringSchema(),properties))// val dataStream = env.readTextFile("/Users/ongbo/Maven/bin/UserBehaviorAnalysis/HotItemAnalysis/src/main/resources/UserBehavior.csv").map(data =>{System.out.println("data:"+data)val dataArray = data.split(",")// if(dataArray(0).equals("ij"))UserBehavior(dataArray(0).trim.toLong, dataArray(1).trim.toLong, dataArray(2).trim.toInt, dataArray(3).trim, dataArray(4).trim.toLong)}).assignAscendingTimestamps(_.timestamp * 1000L)//3:transform处理数据val processStream = dataStream//筛选出埋点pv数据.filter(_.behavior.equals("pv"))//先对itemID进行分组.keyBy(_.itemId)//然后设置timeWindow,size为1小时,步长为5分钟的滑动窗口.timeWindow(Time.seconds(20), Time.seconds(10))//窗口聚合,按道理说应该不用窗口聚合,但是因为达到的数据可能时间顺序会扰乱,所以聚合后要keyby.aggregate(new CountAgg(), new WindowResult()).keyBy(_.windowEnd) //按照窗口分组.process(new TopNHotItems(10))//sink:输出数据processStream.print("processStream::")// dataStream.print()//执行env.execute("hot Items Job")}}/*自定义预聚合函数*/class CountAgg() extends AggregateFunction[UserBehavior, Long, Long]{//累加器初始值override def createAccumulator(): Long = 0//每来一次就加一override def add(in: UserBehavior, acc: Long): Long = acc+1//override def getResult(acc: Long): Long = accoverride def merge(acc: Long, acc1: Long): Long = acc + acc1}//自定义窗口函数,输出ItemViewCountclass WindowResult() extends WindowFunction[Long,ItemViewCount, Long, TimeWindow]{override def apply(key: Long, window: TimeWindow, input: Iterable[Long], out: Collector[ItemViewCount]): Unit = {out.collect(ItemViewCount(key,window.getEnd,input.iterator.next()))}}//自定义处理函数class TopNHotItems(topsize: Int) extends KeyedProcessFunction[Long, ItemViewCount, String] {private var itemState: ListState[ItemViewCount] = _override def open(parameters: Configuration): Unit = {itemState = getRuntimeContext.getListState(new ListStateDescriptor[ItemViewCount]("item-state", classOf[ItemViewCount]))}override def processElement(value: ItemViewCount, ctx: KeyedProcessFunction[Long, ItemViewCount, String]#Context, out: Collector[String]): Unit = {//把每条数据存入状态列表itemState.add(value)//注册一个定时器ctx.timerService().registerEventTimeTimer(value.windowEnd + 1)}//定时器触发时,对所有的数据排序,并输出结果override def onTimer(timestamp: Long, ctx: _root_.org.apache.flink.streaming.api.functions.KeyedProcessFunction[Long, _root_.com.ongbo.hotAnalysis.ItemViewCount, _root_.scala.Predef.String]#OnTimerContext, out: _root_.org.apache.flink.util.Collector[_root_.scala.Predef.String]): Unit = {//将所有state中的数据取出,放到一个list Buffer中val allItems: ListBuffer[ItemViewCount] = new ListBuffer()import scala.collection.JavaConversions._for(item

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: 以下是使用Java版本的Flink读取Kafka数据并实时计算UVPV的完整代码实现: ```java import org.apache.flink.api.common.functions.FlatMapFunction; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.KeyedProcessFunction; import org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.util.Collector; public class UVAndPVCalculator { public static void main(String[] args) throws Exception { // 设置执行环境 StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // 设置事件时间特性 env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); // 从Kafka获取数据流 DataStream<Tuple2<String, Long>> dataStream = env .addSource(new FlinkKafkaConsumer<>("topic", new SimpleStringSchema(), properties)) .flatMap(new MessageSplitter()) .assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Tuple2<String, Long>>() { @Override public long extractAscendingTimestamp(Tuple2<String, Long> element) { return element.f1; } }); // 按照消息中的key进行分组,并计算UV dataStream .keyBy(0) .process(new UVCounter()) .print(); // 根据时间窗口进行分组,并计算PV dataStream .timeWindowAll(Time.minutes(1)) .process(new PVCounter()) .print(); // 执行任务 env.execute("UV and PV Calculator"); } // 自定义flatMap函数,将每条消息拆分为单词进行处理 public static class MessageSplitter implements FlatMapFunction<String, Tuple2<String, Long>> { @Override public void flatMap(String message, Collector<Tuple2<String, Long>> out) { String[] words = message.split(" "); for (String word : words) { out.collect(new Tuple2<>(word, System.currentTimeMillis())); } } } // 自定义KeyedProcessFunction函数,用于计算UV public static class UVCounter extends KeyedProcessFunction<Tuple, Tuple2<String, Long>, Tuple2<String, Long>> { private Set<String> uniqueVisitors = new HashSet<>(); @Override public void processElement(Tuple2<String, Long> value, Context ctx, Collector<Tuple2<String, Long>> out) { uniqueVisitors.add(value.f0); out.collect(new Tuple2<>("UV", (long) uniqueVisitors.size())); } } // 自定义ProcessWindowFunction函数,用于计算PV public static class PVCounter extends ProcessAllWindowFunction< Tuple2<String, Long>, Tuple2<String, Long>, TimeWindow> { @Override public void process(Context context, Iterable<Tuple2<String, Long>> input, Collector<Tuple2<String, Long>> out) { long pvCount = 0L; for (Tuple2<String, Long> element : input) { pvCount += 1; } out.collect(new Tuple2<>("PV", pvCount)); } } } ``` 请注意,上述代码假定你已经在项目中引入了Flink和Kafka的相关依赖,并且你需要根据实际情况更改代码中的一些参数,例如Kafka的topic以及其他的配置项。 另外,上述代码中的实现仅作为示例,将每个单词作为UV统计单位,并未考虑分区的情况。在实际业务中,你可能需要根据具体需求进行更改。 ### 回答2: 下面是一个使用Java版本的Flink读取Kafka数据实时计算UVPV的完整代码实例: ```java import org.apache.flink.api.common.functions.MapFunction; import org.apache.flink.api.common.serialization.SimpleStringSchema; import org.apache.flink.streaming.api.TimeCharacteristic; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase; import org.apache.kafka.clients.consumer.ConsumerConfig; import java.util.Properties; public class KafkaUVAndPV { public static void main(String[] args) throws Exception { // 设置执行环境 StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); // 配置Kafka消费者 Properties properties = new Properties(); properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "test-group"); // 添加Kafka源 DataStream<String> stream = env.addSource(new FlinkKafkaConsumer<>("topic", new SimpleStringSchema(), properties)); // 将输入数据转换为UserBehavior实体类 DataStream<UserBehavior> userBehaviorStream = stream.map(new MapFunction<String, UserBehavior>() { @Override public UserBehavior map(String value) throws Exception { String[] fields = value.split(","); long userId = Long.parseLong(fields[0]); long itemId = Long.parseLong(fields[1]); String behavior = fields[2]; long timestamp = Long.parseLong(fields[3]); return new UserBehavior(userId, itemId, behavior, timestamp); } }); // 提取时间戳和生成Watermark DataStream<UserBehavior> withTimestampsAndWatermarks = userBehaviorStream .assignTimestampsAndWatermarks(new UserBehaviorTimestampExtractor()); // 计算UV DataStream<Long> uvStream = withTimestampsAndWatermarks .filter(userBehavior -> userBehavior.getBehavior().equals("pv")) .map(userBehavior -> userBehavior.getUserId()) .keyBy(userId -> userId) .countWindow(Time.hours(1)) .trigger(new UVWindowTrigger()) .process(new UVWindowProcessFunction()); // 计算PV DataStream<Long> pvStream = withTimestampsAndWatermarks .filter(userBehavior -> userBehavior.getBehavior().equals("pv")) .windowAll(TumblingEventTimeWindows.of(Time.minutes(1))) .trigger(new PVWindowTrigger()) .process(new PVWindowProcessFunction()); // 输出结果 uvStream.print("UV: "); pvStream.print("PV: "); // 执行计算 env.execute("Kafka UV and PV"); } } ``` 以上代码实现了从Kafka读取数据,并根据用户行为计算UVPV。首先,我们设置执行环境并配置Kafka消费者。然后,我们添加Kafka源并将输入数据转换为UserBehavior对象。接下来,我们提取时间戳和生成Watermark,并使用filter和map操作来筛选出用户PV行为,然后使用keyBy和countWindow对用户进行分组并计算UV。对于PV计算,我们使用filter和windowAll操作来处理所有的用户行为,并使用TumblingEventTimeWindows指定1分钟的窗口大小。最后,我们输出结果并执行计算。 请根据实际环境和需求修改参数和逻辑。 ### 回答3: 下面是使用Java版本的Flink读取Kafka数据并实时计算UVPV的完整代码实现: 首先,您需要确保已经安装好并正确配置了JavaFlink和Kafka。 import org.apache.flink.api.common.functions.FlatMapFunction; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer; import org.apache.flink.util.Collector; import java.util.Properties; public class KafkaUVAndPV { public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); Properties properties = new Properties(); properties.setProperty("bootstrap.servers", "localhost:9092"); properties.setProperty("group.id", "flink-kafka-consumer"); FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<>("your-kafka-topic", new SimpleStringSchema(), properties); DataStream<String> kafkaStream = env.addSource(consumer); DataStream<Tuple2<String, Integer>> pvStream = kafkaStream.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() { @Override public void flatMap(String value, Collector<Tuple2<String, Integer>> out) { out.collect(new Tuple2<>("pv", 1)); } }); DataStream<Tuple2<String, Integer>> uvStream = kafkaStream.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() { @Override public void flatMap(String value, Collector<Tuple2<String, Integer>> out) { // 在这里实现UV的计算逻辑 // 将每个用户的唯一标识添加到Collector中 } }).keyBy(0).sum(1); pvStream.print(); uvStream.print(); env.execute("Kafka UV and PV"); } } 请注意,上述代码中的"your-kafka-topic"需要替换为您要从其读取数据的Kafka主题。此外,在flatMap函数中的UV计算逻辑实现可能因具体业务需求而有所不同,请根据实际情况修改。 以上代码将从Kafka主题读取数据流,然后通过flatMap函数将每条数据转换为Tuple2对象,并将其添加到计数器中。最后,使用keyBy和sum函数对计数器进行分组并求和,以分别计算出PVUV。 请注意,此代码仅为示例,您可能需要根据实际需求和数据格式进行适当的修改和调整。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值