docker下部署elk用到的脚本

1、docker-compose.yml

version: '2.2'
services:
  zookeeper:
    image: zookeeper:latest
    container_name: zookeper
    ports:
      - "2181:2181"                 
  kafka:
    image: wurstmeister/kafka:latest 
    container_name: kafka
    volumes: 
        - /etc/localtime:/etc/localtime 
    ports:
      - "9092:9092"
    environment:
      KAFKA_ADVERTISED_HOST_NAME: 10.30.0.12   ## kafka所在的宿主机的ip
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181       
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_LOG_RETENTION_HOURS: 120
      KAFKA_MESSAGE_MAX_BYTES: 10000000
      KAFKA_REPLICA_FETCH_MAX_BYTES: 10000000
      KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS: 60000
      KAFKA_NUM_PARTITIONS: 3
      KAFKA_DELETE_RETENTION_MS: 1000                
  elasticsearch:
    image: elasticsearch:7.8.0   ## 此版本可以按需提升
    restart: always
    container_name: es
    environment:
     - discovery.type=single-node #单点启动,实际生产不允许
     - "ES_JAVA_OPTS=-Xms2048m -Xmx2048m"
     - "ELASTIC_PASSWORD=elastic"  ## 配置es的登录密码
    ports:
    - 9200:9200
    volumes:
       # 这里注意一下  如果你想吧docker中es的数据 映射出来 你本地的 /home/elasticsearch 必须拥有 777权限
      - /home/elk/elasticsearch/:/usr/share/elasticsearch/data
      - /home/elk/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
  kibana:
    image: kibana:7.8.0   ## 此版本可以按需提升
    restart: always
    container_name: kibana
    ports:
    - 5601:5601
    volumes:
      ###将本机目录/home/elk/kibana/kibana.yml下的文件映射到docker容器里面
      - /home/elk/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml
    depends_on:
      - elasticsearch
  logstash:
    image: logstash:7.8.0   ## 此版本可以按需提升
    volumes:
        -  /home/elk/logstash/pipeline/:/usr/share/logstash/pipeline/
        -  /home/elk/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml 
        -  /home/elk/logstash/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml 
    restart: always
    container_name: logstash
    ports:
    - 9600:9600
    depends_on:
      - elasticsearch

2、/home/elk/elasticsearch/elasticsearch.yml

## Default Elasticsearch configuration from Elasticsearch base image.
## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: "docker-cluster"
network.host: 0.0.0.0

## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
#
xpack.license.self_generated.type: trial
xpack.security.enabled: true
xpack.monitoring.collection.enabled: true

3、/home/elk/kibana/kibana.yml

## Default Kibana configuration from Kibana base image.
### https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js
server.name: kibana
server.host: 0.0.0.0
##汉化kibana 这个不能写在最下面 具体原因不清楚  写在最下面没作业 
i18n.locale: "zh-CN"
elasticsearch.hosts: [ "http://10.30.0.12:9200" ]
#elasticsearch 这里写的是你的ip
monitoring.ui.container.elasticsearch.enabled: true
#
### X-Pack security credentials
##
elasticsearch.username: elastic
elasticsearch.password: elastic

4、/home/elk/logstash/config/logstash.yml

## Default Logstash configuration from Logstash base image.
### https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://10.30.0.12:9200" ]   ## 配置es的主机地址,ip需要更换
#elasticsearch 这里写的是你的ip
## X-Pack security credentials
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: elastic

5、/home/elk/logstash/config/pipelines.yml

# List of pipelines to be loaded by Logstash
#
# This document must be a list of dictionaries/hashes, where the keys/values are pipeline settings.
# Default values for omitted settings are read from the `logstash.yml` file.
# When declaring multiple pipelines, each MUST have its own `pipeline.id`.
#
# Example of two pipelines:
  - pipeline.id: kafka
    pipeline.workers: 2 #线程数默认与cpu核数一致
    pipeline.batch.size: 1 #批量处理的条数默认125
    path.config: "/usr/share/logstash/pipeline"

6、/home/elk/logstash/pipeline/logstash-kafka.conf

input {
  kafka {
    bootstrap_servers => "10.30.0.12:9092"   # kafka所在的宿主机的ip地址,根据实际的ip,进行更换
    topics => ["tst-store-center"]   ## 发送消息时的主题
 }
}
filter {
  #Only matched data are send to output
}
output  {
  elasticsearch{
    hosts => ["10.30.0.12:9200"] # es所在的宿主机的ip地址,根据实际的ip,进行更换
    index => "tst-log-%{+YYYY-MM-dd}"  #  指定索引的格式
    user => "elastic"
    password => "elastic"
  }
  stdout{
    codec => rubydebug
  }
}

7、springboot中kafka的配置
yml文件中的配置如下:

kafka:
  # 生产者配置
  producer:
    servers: 10.30.0.12:9092
    retries: 0
    batchSize: 4096
    linger: 1
    bufferMemory: 1048576

  # 消费者配置
  consumer:
    servers: 10.30.0.12:9092
    concurrency: 3
    enableAutoCommit: false
    sessionTimeout: 6000

java代码配置

package com.tst.config;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.stereotype.Component;

@Slf4j
@Component
public class KafkaSenderListener implements ProducerListener {

    public void onSuccess(String topic, Integer partition, Object key, Object value, RecordMetadata recordMetadata) {
        log.info(String.format(
                "生产者监听器,主题:%s,分区:%d,偏移量:%d," + "key:%s,value:%s",
                recordMetadata.topic(),recordMetadata.partition(),
                recordMetadata.offset(),key,value));
    }

    public void onError(String topic, Integer partition, Object key, Object value, Exception exception) {
        exception.printStackTrace();
    }

    public boolean isInterestedInSuccess() {
        return true;
    }
}

package com.tst.config;

import com.tst.property.KafkaProducerProperty;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
public class KafkaProducerConfig {

    @Autowired
    private KafkaProducerProperty producerProperty;


    @Autowired
    private KafkaSenderListener senderListener;

    public Map<String, Object> producerConfigs() {
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, producerProperty.getServers());
        props.put(ProducerConfig.RETRIES_CONFIG, producerProperty.getRetries());
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, producerProperty.getBatchSize());
        // 等所有的副本都确认ok,才认为发送成功
        props.put(ProducerConfig.ACKS_CONFIG, "all");
        props.put(ProducerConfig.LINGER_MS_CONFIG, producerProperty.getLinger());
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, producerProperty.getBufferMemory());
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 30000);
        return props;
    }

    public ProducerFactory<String, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(producerConfigs());
    }

    @Bean
    public KafkaTemplate kafkaTemplate() {
        KafkaTemplate kafkaTemplate = new KafkaTemplate(producerFactory()) ;
        kafkaTemplate.setProducerListener(senderListener); // 可以使用这种方式指定发送者监听器
        return kafkaTemplate;
    }
}

package com.tst.config;

import com.tst.property.KafkaConsumerProperty;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;
import java.util.HashMap;
import java.util.Map;


@Configuration
@EnableKafka
public class KafkaConsumerConfig {

    @Autowired
    private KafkaConsumerProperty consumerProperty;

    // =======================================================手动确认模式的配置 开始==============================================================
    public Map<String, Object> consumerConfigsAck() {
        Map<String, Object> propsMap = new HashMap<>();
        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, consumerProperty.getServers());
        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return propsMap;
    }

    public ConsumerFactory<String, String> consumerFactoryAck() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigsAck());
    }

    @Bean("listenerAck")
    public CustomListenerAck listenerAck() {
        return new CustomListenerAck();
    }

    @Bean("factoryAckContainer")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactoryAck() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactoryAck());
        factory.setConcurrency(consumerProperty.getConcurrency());
        factory.getContainerProperties().setPollTimeout(500);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        return factory;
    }
}

package com.tst.config;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;

@Slf4j
public class CustomListenerAck {

//    @KafkaListener(groupId = "business-log", topics = {"tst-store-center"},containerFactory = "factoryAckContainer")
    public void listen(ConsumerRecord<?, ?> record, Acknowledgment ack) {
        try {
            log.info("自行确认方式收到消息的key: " + record.key());
            log.info("自行确认方式收到消息的value: " + record.value().toString());

            // todo 做具体的业务

            log.info("消息确认!");
            ack.acknowledge();
        } catch (Exception e){
            ack.nack(10000);
        }
    }
}

package com.tst.config;

import com.tst.util.ExceptionUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Component;

@Component
public class KafkaSender {

    @Autowired
    private KafkaTemplate kafkaTemplate;

    public void messageSender(String tpoic,String key,Object message){
        try {
            kafkaTemplate.send(tpoic,key,message);
        } catch (Exception e) { kafkaTemplate.send(tpoic,key, ExceptionUtil.printExStack(e));
        }
    }
}

package com.tst.property;

import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.cloud.context.config.annotation.RefreshScope;

@Data
@RefreshScope
@ConfigurationProperties(prefix = "kafka.producer")
public class KafkaProducerProperty {

    /**
     * 服务器地址
     */
    private String servers;

    /**
     * 重试次数
     */
    private Integer retries;

    private Integer batchSize;

    private Integer linger;

    private String bufferMemory;
}

package com.tst.property;

import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.cloud.context.config.annotation.RefreshScope;

@Data
@RefreshScope
@ConfigurationProperties(prefix = "kafka.consumer")
public class KafkaConsumerProperty {

    /**
     * 服务器地址
     */
    private String servers;

    /**
     * 并发度
     */
    private Integer concurrency;

    private Boolean enableAutoCommit;

    private Integer sessionTimeout;
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值