Flink写入es(开启x-pack认证)

package kafka2flink2es

import java.text.SimpleDateFormat
import java.util.{Properties, UUID}

import caseclass.CaseClass.oLogRunning
import com.alibaba.fastjson.{JSON, JSONArray, JSONObject}
import org.apache.flink.api.common.functions.{FlatMapFunction, RuntimeContext}
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.elasticsearch.{ElasticsearchSinkFunction, RequestIndexer}
import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.flink.util
import org.apache.http.HttpHost
import org.elasticsearch.client.Requests

object OLogRunning {
  def main(args: Array[String]): Unit = {
    //TODO 准备连接kafka环境
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val properties = new Properties()
        properties.setProperty("bootstrap.servers","192.168.193.150:9092")
        properties.setProperty("group.id","consumer-group")

        val sourceData: DataStream[String] = env
          .addSource(new FlinkKafkaConsumer[String]("o-log-running",new SimpleStringSchema(),properties))


    //数据加工处理,放入样例类对象中
    val data: DataStream[oLogRunning] = sourceData.flatMap(new FlatMapFunction[String, oLogRunning] {
      override def flatMap(in: String, out: util.Collector[oLogRunning]): Unit = {
        val jsonArray: JSONArray = JSON.parseArray(in)
        for (i <- 0 until jsonArray.size()) {
          val jsonObject: JSONObject = jsonArray.getJSONObject(i)
          val timestamp = jsonObject.getString("timestamp")
          val serviceName = jsonObject.getString("serviceName")
          val version = jsonObject.getString("version")
          val nodeIp = jsonObject.getString("nodeIp")
          val logLevel = jsonObject.getString("logLevel")
          val traceId = jsonObject.getString("traceId")
          val message = jsonObject.getString("message")
          val path = jsonObject.getString("path")
          val operation = oLogRunning(timestamp,serviceName,version,nodeIp
            ,logLevel,traceId,message,path)
          out.collect(operation)
        }
      }
    })

    data.print()


    //定义httpHosts
    import java.util.ArrayList
    val httpHosts = new ArrayList[HttpHost]
    httpHosts.add(new HttpHost("192.168.193.150",9200))


    //自定义写入es的EsSinkFunction
    val myEsSinkFunc = new ElasticsearchSinkFunction[oLogRunning] {
      override def process(t: oLogRunning, runtimeContext: RuntimeContext, requestIndexer: RequestIndexer): Unit = {
        import java.util.HashMap
        val datamap = new HashMap[String, String]()
        val df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
        datamap.put("logType","07")
        datamap.put("status","01")
        datamap.put("time",df.format(System.currentTimeMillis()))
        datamap.put("timestamp",t.timestamp)
        datamap.put("serviceName",t.serviceName)
        datamap.put("version",t.version)
        datamap.put("nodeIp",t.nodeIp)
        datamap.put("logLevel",t.logLevel)
        datamap.put("traceId",t.traceId)
        datamap.put("message",t.message.toString)
        datamap.put("path",t.path)

        //创建index request,用于发送http请求
        val indexRequest = Requests.indexRequest()
          .index("o-log-running")
          .`type`("readingdata")
          .source(datamap)


        //用indexer发送请求
        requestIndexer.add(indexRequest)
      }
    }

    val esSinkBulider = new ElasticsearchSink
    .Builder[oLogRunning](httpHosts,myEsSinkFunc)

    //每来一条数据就处理一条
    esSinkBulider.setBulkFlushMaxActions(1)

    data.addSink(esSinkBulider.build())

  env.execute()
  }
}

以上代码不带es用户名密码认证。

Flink写入es(带x-pack认证)
关键代码
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值