flink1.11 消费kafka 流式写入hive

目前网上对于flinksql1.11消费kafka流式写入hive资料比较少,且大多是直接在flink中新建kafka表写入hive,但对多层嵌套的json解析不知支持力度几何 ,我这是使用streaming api 消费kafka先将json解析拉平,再转成临时表,最后流式写入hive,且看代码:

package com.xxx.xxx

import java.sql.Timestamp
import java.util.Properties
import java.time.Duration
import java.util
import java.util.{Date, Properties}

import com.fasterxml.jackson.databind.node.JsonNodeType
import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
import org.apache.flink.configuration.RestOptions
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend
import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.api.scala._
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions
import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks
import org.apache.flink.streaming.api.functions.timestamps.{AscendingTimestampExtractor, BoundedOutOfOrdernessTimestampExtractor}
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.scala.function.ProcessWindowFunction
import org.apache.flink.streaming.api.watermark.Watermark
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.streaming.api.{CheckpointingMode, TimeCharacteristic}
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.flink.table.api.EnvironmentSettings

import scala.collection.JavaConversions._
import scala.util.{Failure, Success, Try}
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.catalog.hive.HiveCatalog


object NginxLog2HivePartitionTime {

  def main(args: Array[String]): Unit = {

    val ifCreateHiveTable = args(0)
    val parallelism = 3
    val kafkaBrokers = "x.x.x.x:9092"
    val jobName = "xxx"
    val topicNames = List("xxx")
    val groupName = "xxx"
    val properties = new Properties()
    properties.setProperty("bootstrap.servers", kafkaBrokers)
    properties.setProperty("group.id", groupName)
    //流处理的环境构造
    val conf: Configuration = new Configuration()
    val streamEnv = StreamExecutionEnvironment.getExecutionEnvironment
    streamEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
    val rocksDBStateBackend = new RocksDBStateBackend("hdfs:///user/hdfs/flink1_11backend", true)
    streamEnv.setStateBackend(rocksDBStateBackend) ///D:/filebackend  /opt/ops/flinkbackend
    streamEnv.enableCheckpointing(1000)
    streamEnv.setParallelism(parallelism)
    // table环境构造
    val blinkTableEnvSettings = EnvironmentSettings.newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()

    val tableEnv = StreamTableEnvironment.create(streamEnv, blinkTableEnvSettings)
    tableEnv.getConfig.getConfiguration.set(ExecutionCheckpointingOptions.CHECKPOINTING_MODE, CheckpointingMode.EXACTLY_ONCE)
    tableEnv.getConfig.getConfiguration.set(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofSeconds(60))
    tableEnv.getConfig.setSqlDialect(SqlDialect.HIVE)


    // 构造hive catalog
    // Catalog名称
    val name = "myhive"
    // 默认数据库名称
    val defaultDatabase = "default"
    // hive-site.xml路径
    val hiveConfDir = "/etc/hive/conf.cloudera.hive";
    //  D:\idea-workspace\flink-nginx-logs\src\main\resources
    // Hive版本号
    val version = "2.1.1";
    val hiveCatalog = new HiveCatalog(name, defaultDatabase, hiveConfDir, version)
    tableEnv.registerCatalog(name, hiveCatalog)
    tableEnv.useCatalog(name)
    val myConsumer = new FlinkKafkaConsumer[String](topicNames, new SimpleStringSchema(), properties)
    //从kafka最新offset开始消费
    // myConsumer.setStartFromLatest()

    val mapper = new ObjectMapper

    val srcStream = streamEnv.addSource(myConsumer)
      .filter(_.nonEmpty)
      .map(line => {
        val rootNode = mapper.readTree(line)
        val keys = rootNode.fieldNames().toList
        val eventTime = DateUtil.dateFormatUTC2Local(rootNode.get("@timestamp").asText())
        val eventTimeStamp = DateUtil.dateTimeToTimestampJdk8(eventTime)
        //timestamp Long 转timestamp
        //flinksql内部使用utc时间比事件时间晚8个小时,造成分区会晚8个小时比如本来是18点的分区,进入hive后会变成10点的分区
        //故在事件时间上加8个小时作为分区时间,这才是我们需要的时间
        val eventTimeStamp1 = new Timestamp(eventTimeStamp + 8 * 60 * 60 * 1000L)
        val eventLocalTime = DateUtil.timeStampTodateTimeJdk8(eventTimeStamp, isHyphen = true)
        val systemLocalTime = DateUtil.timeStampTodateTimeJdk8(System.currentTimeMillis, isHyphen = true)
        val path = ""
        val upstream_response_time = if (keys.contains("upstream_response_time")) rootNode.get("upstream_response_time").asText() else ""
        val status = if (keys.contains("status")) rootNode.get("status").asInt() else 999
        val body_bytes_sent = if (keys.contains("body_bytes_sent")) rootNode.get("body_bytes_sent").asInt() else 999

        (systemLocalTime, eventLocalTime, eventTimeStamp1, path, upstream_response_time, status, body_bytes_sent)
      })
      .assignAscendingTimestamps(row => {
        row._3.getTime
      })

    val nginxTmpTable = tableEnv.fromDataStream(srcStream, 'systemLocalTime, 'eventLocalTime, 'eventTimeStamp1.rowtime(), 'path, 'upstream_response_time, 'status, 'body_bytes_sent)

    tableEnv.createTemporaryView("nginxTmpTable", nginxTmpTable)
    //    tableEnv.sqlQuery("select systemLocalTime,eventLocalTime, eventTimeStamp1 from nginxTmpTable").toAppendStream[Row].print("www")


    val createSql =
      """create table  xxx.ods_nginx_log_partition_time(
        |process_time String,
        |event_time String,
        |path String,
        |upstream_response_time String,
        |status Int,
        |body_bytes_sent BigInt
        |) PARTITIONED BY (
        |  pday STRING,
        |  phour STRING,
        |  pminute string
        |) STORED AS PARQUET
        |TBLPROPERTIES (
        |  'sink.partition-commit.trigger' = 'partition-time',
        |  'sink.partition-commit.delay' = '5s',
        |  'sink.partition-commit.policy.kind' = 'metastore,success-file',
        |  'format' = 'parquet',
        |  'parquet.compression'='SNAPPY',
        |  'partition.time-extractor.timestamp-pattern' = '$pday $phour:$pminute:00'
        |
        |)""".stripMargin

    if (ifCreateHiveTable.toInt == 1) {
      tableEnv.executeSql("drop table if exists xxx.ods_nginx_log_partition_time")
      tableEnv.executeSql(createSql)
    }

    tableEnv.executeSql(
      """
        insert into xxx.ods_nginx_log_partition_time
        |select
        |systemLocalTime,
        |eventLocalTime ,
        |path ,
        |upstream_response_time ,
        |status ,
        |body_bytes_sent 
        |DATE_FORMAT(eventTimeStamp1,'yyyy-MM-dd'),
        |DATE_FORMAT(eventTimeStamp1,'HH'),
        |DATE_FORMAT(eventTimeStamp1,'mm')
        |from nginxTmpTable
        |""".stripMargin
    )

    streamEnv.execute()

  }

}

POM.XML

 <properties>
        <scala.version>2.11.12</scala.version>
        <flink.version>1.11.1</flink.version>
        <jredis.version>2.9.0</jredis.version>
        <maven.version.min>3.5.2</maven.version.min>
        <scala.minor.version>2.11.12</scala.minor.version>
        <scala.complete.version>${scala.minor.version}</scala.complete.version>
    </properties>
    <dependencies>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-scala_2.11</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-json</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>

        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-scala_2.11</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-scala_2.11</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-scala-bridge_2.11</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_2.11</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-statebackend-rocksdb_2.11</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-common</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-kafka_2.11</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-hive_2.11</artifactId>
            <version>${flink.version}</version>
           <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.hive</groupId>
            <artifactId>hive-exec</artifactId>
            <version>2.1.1</version>
           <scope>provided</scope>
            <exclusions>
                <exclusion>
                    <groupId>org.codehaus.janino</groupId>
                    <artifactId>janino</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>org.codehaus.janino</groupId>
                    <artifactId>commons-compiler</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

        <!-- https://mvnrepository.com/artifact/org.apache.hive/hive-metastore -->
        <dependency>
            <groupId>org.apache.hive</groupId>
            <artifactId>hive-metastore</artifactId>
            <version>2.1.1</version>
           <scope>provided</scope>
        </dependency>

        <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>2.2.1</version>
           <scope>provided</scope>
        </dependency>


                <dependency>
                    <groupId>org.apache.flink</groupId>
                    <artifactId>flink-shaded-hadoop-2-uber</artifactId>
                    <version>2.6.5-8.0</version>

                </dependency>

        <dependency>
            <groupId>com.fasterxml.jackson.core</groupId>
            <artifactId>jackson-core</artifactId>
            <version>2.9.8</version>
           <scope>provided</scope>
        </dependency>

        <dependency>
            <groupId>com.fasterxml.jackson.core</groupId>
            <artifactId>jackson-databind</artifactId>
            <version>2.9.8</version>
           <scope>provided</scope>
        </dependency>

依赖在pom.xml都设成provided,将相关jar放入flink的lib目录下,如下如图.否则将依赖打入FAT包会造成各种各样乱七八糟的jar包冲突在这里插入图片描述

提交:

export HADOOP_CLASSPATH=`hadoop classpath`

/opt/flink/flink-1.11.1/bin/flink run \
-m yarn-cluster  \
-ytm 1024 \
-ynm flink-xxxx \
-c com.xxx.xxx.NginxLog2HivePartitionTime \
flink-xxxx-1.0-SNAPSHOT-jar-with-dependencies.jar

结果:

  • 3
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值