ELK分布式日志收集

一、环境部署

es 要求Linux 的 参数 vm.max_map_count 至少为 262144,目录需要赋权限: chmod -R 777 search

新建docker-compose.yml

version: '3.1'
services:
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
    container_name: elasticsearch
    restart: always
    volumes:
      - ./search/es/esdata:/usr/share/elasticsearch/data
      #- ./search/config/es01/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - ./search/es/logs/elasticsearch:/usr/share/elasticsearch/logs
      - ./search/es/plugins/analysis-ik:/usr/share/elasticsearch/plugins/analysis-ik
    environment:
      - discovery.type=single-node
      - "ES_JAVA_OPTS=-Xms4g -Xmx4g"
      - TZ=Asia/Shanghai
    ports:
      - "9200:9200"
      - "9300:9300"
    networks:
      - elk

  kibana:
    image: docker.elastic.co/kibana/kibana:7.6.2
    links:
      - elasticsearch
    environment:
      ELASTICSEARCH_HOSTS: http://elasticsearch:9200
    volumes:
      - ./kibana_config/:/usr/local/kibana/config/
    ports:
      - "5601:5601"
    depends_on:
      - elasticsearch 
    networks:
      - elk
    container_name: kibana
    restart: always

  logstash:
    image: docker.elastic.co/logstash/logstash:7.6.2
    links:
      - elasticsearch
    command: logstash -f /etc/logstash/config/logstash.conf  #logstash 启动时使用的配置文件
    volumes:
      - ./search/logstash/config/logstash.conf:/etc/logstash/config/logstash.conf  #logstash 配文件位置
      - ./search/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml  #logstash 配文件位置
     # - ./search/logstash/log/pv.log:/logs/pv.log #挂载要读取日志文件
    depends_on:
      - elasticsearch  #后于elasticsearch启动
    ports:
      - "4560:4560"
      # - "8065:8065"
      # - "9600:9600"
    networks:
      - elk 
    container_name: logstash

networks:
  elk:

新建./search/es/plugins/analysis-ik安装ik分词器

https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.6.2/elasticsearch-analysis-ik-7.6.2.zip

新建./search/logstash/config/logstash.conf

input {
	tcp {
		mode => "server"
		port => 4560
		codec => json #json格式读取
	}
}
filter {
	json{
		source => "message"
		remove_field => ["host","port","message","@version"] #去掉不要的字段
  }
}

output {
	elasticsearch {
		action => "index"
		hosts  => "172.18.42.142:9200"
		index  => "test_log"
	}
}

新建./search/logstash/config/logstash.yml

http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: ["http://172.0.0.1:9200"] #设置es地址

二、与项目整合

springboot微服务的pom文件添加

<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-data-elasticsearch</artifactId>
</dependency>
<dependency>
    <groupId>net.logstash.logback</groupId>
    <artifactId>logstash-logback-encoder</artifactId>
    <version>6.6</version>
</dependency>

resources/application.yml加入

spring:
    elasticsearch:
        rest:
          uris: http://172.0.0.1:9200 #es地址

resources/logback.xml

<?xml version="1.0" encoding="UTF-8"?>
<!--该日志将日志级别不同的log信息保存到不同的文件中-->
<configuration>
	<include resource="org/springframework/boot/logging/logback/defaults.xml" />

<!--	<springProperty scope="context" name="destination" source="net.logstash.server.hosts"  />-->
	<springProperty scope="context" name="springAppName" source="spring.application.name"  />

	<!-- 控制台输出	-->
	<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
		<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
			<level>INFO</level>
		</filter>
		<!-- 日志输出编码 -->
		<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
			<providers class="net.logstash.logback.composite.loggingevent.LoggingEventJsonProviders">
				<pattern>
					<pattern>
						{
						"date":"%date{\"yyyy-MM-dd HH:mm:ss.SSS\",UTC}",
						"level":"%level",
						"msg":"%msg"
						}
					</pattern>
				</pattern>
			</providers>
		</encoder>
	</appender>

	<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
		<destination>172.0.0.1:4560</destination>
		<encoder charset="UTF-8"
				 class="net.logstash.logback.encoder.LogstashEncoder">
		</encoder>
	</appender>

	<!--异步日志-->
	<appender name="async" class="ch.qos.logback.classic.AsyncAppender">
		<!--指定摸个具体的 appender-->
		<appender-ref ref="LOGSTASH" />
	</appender>

	<root level="INFO">
<!--		<appender-ref ref="LOGSTASH" />-->
		<appender-ref ref="CONSOLE" />
	</root>

	<!-- 自定义 logger 对象
	name="com.demo.weblog" 设置传入logstash的日志
	additivity="false" 自定义 logger 对象是否继承 rootlogger
	-->
	<logger name="com.demo.weblog" level="info" additivity="false">
	    <appender-ref ref="async" />
	</logger>

</configuration>

es的整合

# domain/WebLog
@Data
@Document(indexName = "test_log",shards = 1,replicas = 1)
public class WebLog {

    @Id
    private String id;

    @Field(type = FieldType.Integer)
    private Integer port;

    @Field(type = FieldType.Text)
    private String message;

    @Field(name = "@version",type = FieldType.Keyword)
    private String version;

    @Field(name = "@timestamp", type = FieldType.Date,format = DateFormat.date_time)
    private Date timestamp;

    @Field(type = FieldType.Keyword)
    private String host;

    private LogMessage lm;
}

# repository/TestRepository
public interface TestRepository extends ElasticsearchRepository<WebLog,String> {
}

# controller/WebLogController
@Api(tags = "es信息接口")
@RestController
public class WebLogController {
    @Autowired
    private TestRepository testRepository;
    
    @ApiOperation(value = "test详情")
    @GetMapping("/testfindById/{id}")
    public R<Optional<WebLog>> testfindById(@PathVariable String id) {
        return R.ok(testRepository.findById(id));
    }
}

三、esAPI用法

pom.xml配置

<properties>
    <java.version>1.8</java.version>
    <elasticsearch.version>7.6.2</elasticsearch.version>
</properties>
<dependency>
   <groupId>org.elasticsearch.client</groupId>
   <artifactId>elasticsearch-rest-high-level-client</artifactId>
   <version>${elasticsearch.version}</version>
</dependency>

测试代码

public static void main(String[] args) throws IOException {
        RestHighLevelClient client = new RestHighLevelClient(
                RestClient.builder(
                        new HttpHost("172.0.0.1", 9200, "http")));

        SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
        TermQueryBuilder termQueryBuilder  = QueryBuilders.termQuery("serviceName", "11111");
        sourceBuilder.query(termQueryBuilder);//精确
//        sourceBuilder.from(0);
        sourceBuilder.size(1);
//        sourceBuilder.collapse(new CollapseBuilder("serviceName.keyword"));
//        QueryBuilders.rangeQuery("requestTime").gte("2021-10-27T15:38:00").lte("2021-10-27T15:39:00");
//        sourceBuilder.query(QueryBuilders.matchAllQuery());
//        AggregationBuilder aggregationBuilder = AggregationBuilders.dateHistogram("by_time").field("requestTime").fixedInterval(DateHistogramInterval.hours(5));
//        SumAggregationBuilder aggregation =
//                AggregationBuilders
//                        .sum("timeConsuming")
//                        .field("timeConsuming");
//        aggregationBuilder.subAggregation(aggregation);
//        sourceBuilder.aggregation(aggregationBuilder);
        // 2.创建并设置SearchRequest对象
        // 设置request要搜索的索引和类型
        SearchRequest searchRequest = new SearchRequest("test_log");
        // 设置SearchSourceBuilder查询属性
        sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));
        searchRequest.source(sourceBuilder);


        // 3.查询
        SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT);
        log.info(JSON.toJSONString(searchResponse.getHits(), true));
        System.out.println("==============================");
        for (SearchHit documentFields : searchResponse.getHits().getHits()){
            System.out.println(documentFields.getSourceAsMap().get("status"));
        }

        client.close();

    }

四、es问题修复

1,text转keyword

PUT /test_log/_mapping?include_type_name=false
{
  "properties": {
    "type": { 
      "ignore_above": 10, //大于10个字符将不进行索引,即无法检索
      "type":     "keyword"
    }
  }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值