新一代大数据计算引擎 Flink从入门到实战 (18) - 项目实战(4)-结果写入es

1 es 部署

  • nosql,存储数据
    https://www.elastic.co/cn/downloads/elasticsearch
    在这里插入图片描述

  • 注意要使用普通用户
    在这里插入图片描述

在这里插入图片描述

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

  • 后台启动
    在这里插入图片描述

2 Kibana 部署

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

  • 启动
    在这里插入图片描述
    在这里插入图片描述

3 flink 数据 sink 到 Elasticsearch

https://ci.apache.org/projects/flink/flink-docs-release-1.6/dev/connectors/elasticsearch.html

	<dependency>
			<groupId>org.apache.flink</groupId>
			<artifactId>flink-connector-elasticsearch6_2.11</artifactId>
			<version>${flink.version}</version>
		</dependency>

3.1 创建索引库

curl -XPUT 'http://master:9200/cdn'
在这里插入图片描述


curl -H "Content-Type: application/json" -XPOST 'http://master:9200/cdn/traffic/_mapping?pretty' -d '{
"traffic":{
	"properties":{ 
		"domain":{"type":"text"},
		"traffics":{"type":"long"},
		"time":{"type":"date","format": "yyyy-MM-dd HH:mm"}
		}
    }
}
'

在这里插入图片描述

4 flink 清洗程序

<dependency>
			<groupId>org.apache.flink</groupId>
			<artifactId>flink-connector-elasticsearch6_2.11</artifactId>
			<version>${flink.version}</version>
		</dependency>

		<dependency>
			<groupId>org.apache.httpcomponents</groupId>
			<artifactId>httpclient</artifactId>
			<version>4.5.8</version>
		</dependency>
package test.flink.scala.scalaproject

import java.text.SimpleDateFormat
import java.util.{Date, Properties}

import org.apache.flink.api.common.functions.RuntimeContext
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.api.scala.createTypeInformation
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.scala.function.WindowFunction
import org.apache.flink.streaming.api.watermark.Watermark
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.streaming.connectors.elasticsearch.{ElasticsearchSinkFunction, RequestIndexer}
import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
import org.apache.flink.util.Collector
import org.apache.http.HttpHost
import org.elasticsearch.action.index.IndexRequest
import org.elasticsearch.client.Requests
import org.slf4j.LoggerFactory

import scala.collection.mutable.ArrayBuffer

object LogAnalysis {

    // 生产上记录日志建议采用这种方法
    val logger = LoggerFactory.getLogger("LogAnalysis")


    def main(args: Array[String]): Unit = {
        val env = StreamExecutionEnvironment.getExecutionEnvironment

        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)


        val topic = "tzbtest"

        val prop = new Properties()
        prop.setProperty("bootstrap.servers", "master:9092");
        prop.setProperty("group.id", "test-tzb-group")

        // 接收 kafka 的数据
        val consumer = new FlinkKafkaConsumer011[String](topic, new SimpleStringSchema(), prop)

        // 接收 kafka 的数据
        val data = env.addSource(consumer)

        val logData = data.map(x => {
            val splits = x.split("\t")
            val level = splits(2)
            val timeStr = splits(3)

            var time = 0l

            try {
                val sourceFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
                time = sourceFormat.parse(timeStr).getTime
            } catch {
                case e: Exception => {
                    logger.error(s"time parse error: $timeStr", e.getMessage)
                }
            }


            val domain = splits(5)
            val traffic = splits(6).toLong

            // 返回 tuple
            (level, time, domain, traffic)
        }).filter(_._2 != 0).filter(_._1 == "E")
          .map(x => {
              (x._2, x._3, x._4) // 1 level(抛弃)  2 time  3 domain   4 traffic
          })

        //logData.print().setParallelism(1)

        // 为数据添加水印
        val resData = logData.assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks[(Long, String, Long)] {

            val maxOutOfOrderness = 10000L // 10 seconds

            var currentMaxTimestamp: Long = _ // 占位符

            override def getCurrentWatermark: Watermark = {
                // return the watermark as current highest timestamp minus the out-of-orderness bound
                new Watermark(currentMaxTimestamp - maxOutOfOrderness)
            }

            override def extractTimestamp(element: (Long, String, Long), previousElementTimestamp: Long): Long = {
                val timestamp = element._1
                currentMaxTimestamp = Math.max(timestamp, currentMaxTimestamp)
                timestamp
            }
        }).keyBy(1) // 此处按照域名 keyBy
          .window(TumblingEventTimeWindows.of(Time.seconds(60)))
          .apply(new WindowFunction[(Long, String, Long), (String, String, Long), Tuple, TimeWindow] {
              override def apply(key: Tuple, window: TimeWindow, input: Iterable[(Long, String, Long)], out: Collector[(String, String, Long)]): Unit = {
                  val domain = key.getField(0).toString

                  var sum = 0l

                  val times = ArrayBuffer[Long]()

                  val iterator = input.iterator
                  while (iterator.hasNext) {
                      val next = iterator.next()
                      sum += next._3 // traffic 求和

                      // TODO 可以拿到 window 里的时间, next._1
                      times.append(next._1)

                  }

                  /*
                  参数1:1分钟时间  2020-10-01 10:10
                  参数2:域名
                  参数3:traffic 的 和
                   */
                  val time = new SimpleDateFormat("yyyy-MM-dd HH:mm").format(new Date(times.max))
                  out.collect((time, domain, sum))
              }
          }) //.print().setParallelism(1)


        val httpHosts = new java.util.ArrayList[HttpHost]
        httpHosts.add(new HttpHost("192.168.10.100", 9200, "http"))

        val esSinkBuilder = new ElasticsearchSink.Builder[(String, String, Long)](
            httpHosts,
            new ElasticsearchSinkFunction[(String, String, Long)] {
                def createIndexRequest(element: (String, String, Long)): IndexRequest = {
                    val json = new java.util.HashMap[String, Any]
                    json.put("time", element._1)
                    json.put("domain", element._2)
                    json.put("traffics", element._3)

                    val id = element._1 + "-" + element._2

                    return Requests.indexRequest()
                      .index("cdn")
                      .`type`("traffic")
                      .id(id)
                      .source(json)
                }

                override def process(t: (String, String, Long), runtimeContext: RuntimeContext, requestIndexer: RequestIndexer): Unit = {
                    requestIndexer.add(createIndexRequest(t))
                }
            }
        )

        // configuration for the bulk requests; this instructs the sink to emit after every element, otherwise they would be buffered
        esSinkBuilder.setBulkFlushMaxActions(1)

        // finally, build and add the sink to the job's pipeline
        resData.addSink(esSinkBuilder.build) //.setParallelism(5)


        env.execute("LogAnalysis")

    }

}



  • 启动生产者
  • 启动消费者

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

  • 修改时区
    在这里插入图片描述
    在这里插入图片描述

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值