#端口号 server.port=8080 #分页行数 pager.size=10 #log logging.config=classpath:logback.xml #thymelea spring.thymeleaf.prefix=classpath:/templates/ spring.thymeleaf.suffix=.html spring.thymeleaf.mode=HTML5 spring.thymeleaf.encoding=UTF-8 spring.thymeleaf.cache=false # mysql spring.datasource.url=jdbc:mysql://192.168.1.141:3306/bigdata?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai spring.datasource.driver-class-name=com.mysql.cj.jdbc.Driver spring.datasource.username=bigdata spring.datasource.password=Goodwe!123 #redis spring.redis.host=192.168.1.183 spring.redis.port=6385 spring.redis.password=goodwe #kafka #连接地址 spring.kafka.bootstrap-servers=192.168.1.43:9092 #消费者组 spring.kafka.consumer.group-id=group_gops_data3 # earliest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费 # latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 # none:topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常 spring.kafka.consumer.auto-offset-reset=earliest #如果为true,消费者的偏移量将在后台定期提交 spring.kafka.consumer.enable-auto-commit=true #设置自动提交周期 spring.kafka.consumer.auto-commit-interval=5000 #此设置限制每次调用poll返回的消息数 spring.kafka.consumer.max-poll-records=10 #每个监听器的消费者个数 spring.kafka.listener.concurrency= 5 #数据标签 spring.kafka.topic= datatest # 最大取多少块缓存到消费者(默认10) spring.kafka.queued.max.message.chunks=50 #hbase hbase.zookeeper.quorum=slave1,slave2,slave3 hbase.zookeeper.port=2181 # 静态资源 spring.mvc.static-path-pattern=/static/** # 配置slq打印日志 mybatis-plus.configuration.log-impl=org.apache.ibatis.logging.stdout.StdOutImpl
转载于:https://my.oschina.net/u/4100033/blog/3041327