针对flink任务刚启动时立即挂掉且查看不到日志情况,将运行日志输到kafka

日志依赖包(logback): apm-toolkit-logback-1.x-6.2.0.jar、kafka-logback-appender.jar
日志依赖包(log4j): kafka-log4j-appender-2.0.1.jar

1.7及1.12该方法需要将kafka-log4j-appender-2.0.1.jar包放在flink客户端的lib目录下

flink1.7

log4j.rootLogger=info,console,RFA,kafka

# appender kafka
log4j.appender.kafka=org.apache.kafka.log4jappender.KafkaLog4jAppender
# 数据发送的topic
log4j.appender.kafka.topic=test
log4j.appender.kafka.syncSend=false
# multiple brokers are separated by comma ",".
log4j.appender.kafka.brokerList=master:9092
# 安全版本配置
#log4j.appender.kafka.saslKerberosServiceName = kafka
#log4j.appender.kafka.securityProtocol = SASL_PLAINTEXT
# 配置krb5.config所在路径,安全场景下必须配置
#log4j.appender.kafka.kerb5ConfPath = D:\\work\\test\\kafka\\src\\resource\\conf\\krb5.conf
# 配置jaas.conf路径,安全场景下必须配置
#log4j.appender.kafka.clientJaasConfPath = C:\\xxx\\xxx\\xxx\\xxx\\xxx\\xxx\\jaas.conf
log4j.appender.kafka.layout=org.apache.log4j.PatternLayout
#log4j.appender.kafka.layout.ConversionPattern=%d [%-5p] [%t] - [%l] %m%n
# 自定义日志格式
log4j.appender.kafka.layout.ConversionPattern={"log_level":"%p",\
  "log_timestamp":"%d{ISO8601}",\
  "log_package":"%C",\
  "log_thread":"%t",\
  "log_file":"%F",\
  "log_line":"%L",\
  "log_message":"%m",\
  "log_path":"%X{log_path}",\
  "flink_job_name":"${flink_job_name}",\
  "appId":"${env:_APP_ID}"\
  }

#输出日志到控制台
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.Threshold=all
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%-d{yyyy-MM-dd HH\:mm\:ss} [%c\:%L]-[%p] %m%n

# Log all infos in the given file
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
log4j.appender.RFA.File=${log.file}
log4j.appender.RFA.MaxFileSize=1MB
log4j.appender.RFA.Append=true
log4j.appender.RFA.MaxBackupIndex=10
log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
log4j.appender.RFA.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %t %-5p %-60c %x - %m%n
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
log4j.logger.org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline=ERROR, RFA

#kafka
#log4j.logger.com.teligen=info,kafka
#log4j.logger.akka=info,kafka
#不可以打印kafka日志,因为kafka jar包也配有logger,自身使用自身会出问题
#log4j.logger.org.apache.kafka=error,kafka
#log4j.logger.org.apache.hadoop=info,kafka
#log4j.logger.org.apache.zookeeper=info,kafka
#log4j.logger.org.apache.flink=info,kafka

flink1.12

# This affects logging for both user code and Flie
rootLogger.appenderRef.rolling.ref = RollingFile
rootLogger.appenderRef.kafka.ref = Kafka
rootLogger.level = ERROR

# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = ERROR
logger.kafka.name= org.apache.kafka
logger.kafka.level = ERROR
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = ERROR
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = ERROR

# Log all infos in the given rolling file
appender.rolling.type = RollingFile
appender.rolling.name = RollingFile
appender.rolling.append = false
appender.rolling.fileName = ${sys:log.file}
appender.rolling.filePattern = ${sys:log.file}.%i
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
appender.rolling.policies.type = Policies
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size = 200MB
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.max = 10


# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF

# kafka appender config
appender.kafka.type=Kafka
appender.kafka.name=Kafka
appender.kafka.topic=flink_runtime_job_logs
appender.kafka.property.type=Property
appender.kafka.property.name=bootstrap.servers
appender.kafka.property.value=xxxxxxxxxxxx:9092
## kafka的输出的日志pattern
appender.kafka.layout.type=PatternLayout
appender.kafka.layout.pattern=%d{yyyy-MM-dd HH:mm:ss,SSS}  ${sys:log.file}  ${sys:flink_per_job_name} %-5p %-60c %x - %m%n
## 输出json格式的日志
#appender.kafka.layout.type=JSONLayout
#appender.kafka.layout.compact=true
#appender.kafka.layout.complete=false
#appender.kafka.layout.includeTimeMillis=true
#appender.kafka.layout.additionalField1.type=KeyValuePair
#appender.kafka.layout.additionalField1.key=logdir
#appender.kafka.layout.additionalField1.value=${sys:log.file}
#appender.kafka.layout.additionalField2.type=KeyValuePair
#appender.kafka.layout.additionalField2.key=flink_per_job_name
#appender.kafka.layout.additionalField2.value=${sys:flink_per_job_name}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值