log4j2写入带kerberos认证的kafka
使用springboot使用log4j2,写入带kerbores认证的kafka
yml配置文件
异步写入
可以参考文章log4j2写入带kerberos认证的kafka
pom引入依赖
<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-log4j2</artifactId> </dependency> 提出springboot自带的logback依赖 <!-- 加上这个才能辨认到log4j2.yml文件 --> <dependency> <groupId>com.fasterxml.jackson.dataformat</groupId> <artifactId>jackson-dataformat-yaml</artifactId> <version>2.10.1</version> </dependency> 引入distruptor依赖,log4j2采用distruptor实现异步模式 <!-- https://mvnrepository.com/artifact/com.lmax/disruptor --> <dependency> <groupId>com.lmax</groupId> <artifactId>disruptor</artifactId> <version>3.4.2</version> </dependency> 引入自己需要的kafka的client包
以上pom.xml完成 ,开始配置yml文件
# 共有8个级别,按照从低到高为:ALL < TRACE < DEBUG < INFO < WARN < ERROR < FATAL < OFF。 Configuration: status: warn monitorInterval: 30 Properties: # 定义全局变量 Property: # 缺省配置(用于开发环境)。其他环境需要在VM参数中指定,如下: #测试:-Dlog.level.console=warn -Dlog.level.xjj=trace #生产:-Dlog.level.console=warn -Dlog.level.xjj=info - name: log.level.console value: info - name: log.path value: log - name: project.name value: opendoc - name: log.pattern value: "%d{yyyy-MM-dd HH:mm:ss.SSS} -%5p ${PID:-} [%15.15t] %-30.30C{1.} : %m%n" Appenders: Console: #输出到控制台 name: CONSOLE target: SYSTEM_OUT PatternLayout: pattern: ${log.pattern} Kafka: name: KAFKA_ZULL topic: log-test PatternLayout: pattern: ${log.pattern} Property: - name: bootstrap.servers value: node5:6667,node6:6667,node7:6667 - name: acks value: 1 - name: security.protocol value: SASL_PLAINTEXT - name: sasl.mechanism value: GSSAPI - name: sasl.kerberos.service.name value: kafka # 启动日志 RollingFile: - name: ROLLING_FILE fileName: ${log.path}/${project.name}.log filePattern: "${log.path}/historyRunLog/$${date:yyyy-MM}/${project.name}-%d{yyyy-MM-dd}-%i.log" PatternLayout: pattern: ${log.pattern} Filters: # 一定要先去除不接受的日志级别,然后获取需要接受的日志级别 ThresholdFilter: - level: error onMatch: DENY onMismatch: NEUTRAL - level: info onMatch: ACCEPT onMismatch: DENY Policies: TimeBasedTriggeringPolicy: # 按天分类 modulate: true interval: 1 DefaultRolloverStrategy: # 文件最多100个 max: 100 # 平台日志 - name: PLATFORM_ROLLING_FILE ignoreExceptions: false fileName: ${log.path}/platform/${project.name}_platform.log filePattern: "${log.path}/platform/$${date:yyyy-MM}/${project.name}-%d{yyyy-MM-dd}-%i.log" PatternLayout: pattern: ${log.pattern} Policies: TimeBasedTriggeringPolicy: # 按天分类 modulate: true interval: 1 DefaultRolloverStrategy: # 文件最多100个 max: 100 # 业务日志 - name: BUSINESS_ROLLING_FILE ignoreExceptions: false fileName: ${log.path}/business/${project.name}_bussiness.log filePattern: "${log.path}/business/$${date:yyyy-MM}/${project.name}-%d{yyyy-MM-dd}-%i.log" PatternLayout: pattern: ${log.pattern} Policies: TimeBasedTriggeringPolicy: # 按天分类 modulate: true interval: 1 DefaultRolloverStrategy: # 文件最多100个 max: 100 # 错误日志 - name: EXCEPTION_ROLLING_FILE ignoreExceptions: false fileName: ${log.path}/exception/${project.name}_exception.log filePattern: "${log.path}/exception/$${date:yyyy-MM}/${project.name}-%d{yyyy-MM-dd}-%i.log" ThresholdFilter: level: error onMatch: ACCEPT onMismatch: DENY PatternLayout: pattern: ${log.pattern} Policies: TimeBasedTriggeringPolicy: # 按天分类 modulate: true interval: 1 DefaultRolloverStrategy: # 文件最多100个 max: 100 # DB 日志 - name: DB_ROLLING_FILE ignoreExceptions: false fileName: ${log.path}/db/${project.name}_db.log filePattern: "${log.path}/db/$${date:yyyy-MM}/${project.name}-%d{yyyy-MM-dd}-%i.log" PatternLayout: pattern: ${log.pattern} Policies: TimeBasedTriggeringPolicy: # 按天分类 modulate: true interval: 1 DefaultRolloverStrategy: # 文件最多100个 max: 100 Loggers: AsyncRoot: level: info AppenderRef: 异步调用 - ref: CONSOLE - ref: ROLLING_FILE - ref: EXCEPTION_ROLLING_FILE AsyncLogger: # 异步调用 - name: platform level: info additivity: false AppenderRef: - ref: CONSOLE - ref: PLATFORM_ROLLING_FILE - name: business level: info additivity: false AppenderRef: - ref: BUSINESS_ROLLING_FILE - ref: KAFKA_ZULL - name: exception level: debug additivity: true AppenderRef: - ref: EXCEPTION_ROLLING_FILE - name: db level: info additivity: false AppenderRef: - ref: DB_ROLLING_FILE
如果kafka带有kerbores认证
在springboot启动类里进行认证
@SpringBootApplication
@EnableZuulProxy
@EnableHystrixDashboard
public class BigdataZuulApplication {
public static void main(String[] args) {
ConfigApplication.setConfig();
SpringApplication.run(BigdataZuulApplication.class, args);
}
@Bean
public ZuulFilter simpleFilter(){
return new RequestLogFilter();
}
}
public class ConfigApplication { public static void setConfig(){ YamlPropertiesFactoryBean yaml = new YamlPropertiesFactoryBean(); yaml.setResources(new ClassPathResource("application.yml")); Properties properties=yaml.getObject(); String version = properties.getProperty("kerberos.version"); String path=""; String osName = System.getProperty("os.name"); if (osName.startsWith("Lin")) { path = ""+version+"/"; }else { path = ""+version+"\\"; } System.out.println(path); System.setProperty("java.security.auth.login.config", path+"kafka_jaas.conf"); System.setProperty("java.security.krb5.conf", path+"krb5.conf"); } }
public enum LogEnum {
BUSINESS("business"),
PLATFORM("platform"),
DB("db"),
EXCEPTION("exception"),
;
private String category;
LogEnum(String category) {
this.category = category;
}
public String getCategory() {
return category;
}
public void setCategory(String category) {
this.category = category;
}
}
public class LogUtils {
/**
* 获取业务日志logger
*
* @return
*/
public static Logger getBusinessLogger() {
return LoggerFactory.getLogger(LogEnum.BUSINESS.getCategory());
}
/**
* 获取平台日志logger
*
* @return
*/
public static Logger getPlatformLogger() {
return LoggerFactory.getLogger(LogEnum.PLATFORM.getCategory());
}
/**
* 获取数据库日志logger
*
* @return
*/
public static Logger getDBLogger() {
return LoggerFactory.getLogger(LogEnum.DB.getCategory());
}
/**
* 获取异常日志logger
*
* @return
*/
public static Logger getExceptionLogger() {
return LoggerFactory.getLogger(LogEnum.EXCEPTION.getCategory());
}
}
example:
protected Logger logger = LogUtils.getBusinessLogger(); logger.info(format.format(new Date())+"---"+ ip+"---"+url+"---"+requestBody);
以上经测试通过kerbores写入kafka