docker部署elk

docker部署elk

es1镜像

两个容器 171.111.0.11

172.111.0.12

npm可以不装

l logstash

k 也有

docker安装es1
[root@docker2 es1]# ls
Dockerfile  elasticsearch-6.7.2.rpm  elasticsearch.yml  node-v8.2.1.tar.gz  phantomjs-2.1.1-linux-x86_64.tar.bz2
​
[root@docker2 es1]# vim Dockerfile 
FROM centos:7
RUN rm -rf /etc/yum.repos.d/*
ADD http://mirrors.aliyun.com/repo/Centos-7.repo /etc/yum.repos.d/Centos-7.repo
RUN yum clean all && yum makecache
RUN yum -y install epel-release
RUN yum -y install gcc gcc-c++ make
RUN yum -y install java
RUN yum clean all
COPY elasticsearch-6.7.2.rpm /opt
RUN rpm -ivh /opt/elasticsearch-6.7.2.rpm
COPY elasticsearch.yml /etc/elasticsearch/elasticsearch.yml
EXPOSE 9200
CMD ["/usr/share/elasticsearch/bin/elasticsearch"]
​
WORKDIR /opt/
ADD node-v8.2.1.tar.gz /opt/
RUN cd node-v8.2.1 \
    && ./configure \
    && make -j 2 \
    && make install
​
ADD phantomjs-2.1.1-linux-x86_64.tar.bz2 /opt/es1
RUN ln -s /opt/es1/phantomjs-2.1.1-linux-x86_64/bin/phantomjs /usr/local/bin/phantomjs
​
RUN chown -R elasticsearch:elasticsearch /usr/share/elasticsearch \
    && chown -R elasticsearch:elasticsearch /etc/elasticsearch
USER elasticsearch
​
[root@docker2 es1]# vim elasticsearch.yml 
cluster.name: elk-cluster
node.name: node1
node.master: true
node.data: true
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
http.port: 9200
transport.tcp.port: 9300
discovery.zen.ping.unicast.hosts: ["172.111.0.11","172.111.0.12"]
​
[root@docker2 es1]# docker build -t es1:elk .
​
[root@docker2 es1]# docker run -itd --name es1 -p 9200:9200 --net mynetwork --ip 172.111.0.11 es1:elk
58ed4d572bece8ae75ec9be63d5f1335ee9b3e7c0ccfd40ba23909faf4223402
[root@docker2 es1]# docker ps
CONTAINER ID   IMAGE     COMMAND                   CREATED         STATUS         PORTS                                       NAMES
58ed4d572bec   es1:elk   "/usr/share/elastics…"   3 seconds ago   Up 3 seconds   0.0.0.0:9200->9200/tcp, :::9200->9200/tcp   es1

es2镜像

docker创建es2
[root@docker2 es2]# vim elasticsearch.yml 
cluster.name: elk-cluster
node.name: node2
node.master: false
node.data: true
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
http.port: 9200
transport.tcp.port: 9300
discovery.zen.ping.unicast.hosts: ["172.111.0.11","172.111.0.12"]
​
[root@docker2 es2]# vim Dockerfile 
FROM centos:7
RUN rm -rf /etc/yum.repos.d/*
ADD http://mirrors.aliyun.com/repo/Centos-7.repo /etc/yum.repos.d/Centos-7.repo
RUN yum clean all && yum makecache
RUN yum -y install epel-release
RUN yum -y install gcc gcc-c++ make
RUN yum -y install java
RUN yum clean all
COPY elasticsearch-6.7.2.rpm /opt
RUN rpm -ivh /opt/elasticsearch-6.7.2.rpm
COPY elasticsearch.yml /etc/elasticsearch/elasticsearch.yml
EXPOSE 9200
CMD ["/usr/share/elasticsearch/bin/elasticsearch"]
​
WORKDIR /opt/
ADD node-v8.2.1.tar.gz /opt/
RUN cd node-v8.2.1 \
    && ./configure \
    && make -j 2 \
    && make install
​
ADD phantomjs-2.1.1-linux-x86_64.tar.bz2 /opt/es1
RUN ln -s /opt/es1/phantomjs-2.1.1-linux-x86_64/bin/phantomjs /usr/local/bin/phantomjs
​
RUN chown -R elasticsearch:elasticsearch /usr/share/elasticsearch \
    && chown -R elasticsearch:elasticsearch /etc/elasticsearch
USER elasticsearch
​
[root@docker2 es2]# docker build -t es2:elk .
[root@docker2 es1]# docker run -itd --name es2 -p 9201:9200 --net mynetwork --ip 172.111.0.12 es2:elk

logstash镜像

拉取logstash和kibana的v6.7.2版本镜像

docker pull logstash:6.7.2
docker pull kibana:6.7.2

logstash镜像编排

cd /opt/logstash
vim logstash.yml
# 编写yml文件
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.url: [ "http://es1:9200","http://es2:9201" ]
​
vim logstash.conf
# 编写 conf 文件,定义输入输出,指定输入之一为宿主机Apache的日志文件
input {
  tcp {
    mode => "server"
    host => "0.0.0.0"
    port => 5044
    codec => "json"
  }
  file {
    path => "/opt/logs/*_log"
    start_position => "beginning"
  }
}
output {
  elasticsearch {
    hosts => [ "http://es1:9200","http://es2:9201" ]
    index => "elk-%{+YYYY.MM.dd}"
  }
}

为了方便,把logstash镜像的权限设置为root,否则宿主机共享过去的Apache日志文件logstash都无权限访问

vim Dockerfile
​
FROM logstash:6.7.2
USER root
COPY logstash.yml ./config/logstash.yml
COPY logstash.conf ./pipeline/logstash.conf
docker build -t logstash:elk . 
docker run -itd --name logstash -v /etc/httpd/logs/:/opt/logs/ --net mynetwork1 --ip 172.111.0.30 -p 5044:5044 -p 9600:9600 logstash:elk
docker exec -it logstash bash
cd /opt/logs/
doc  

访问几次宿主机的Apache服务,并查看Apache的日志文件是否共享到容器内

kibana镜像编排

cd /opt/kibana
vim kibana.yml
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://172.111.0.11:9200","http://172.111.0.12:9201" ]
​
vim Dockerfile
FROM kibana:6.7.2
COPY kibana.yml ./config/kibana.yml
USER root
RUN chown kibana.kibana ./config/kibana.yml
USER kibana
docker build -t kibana:elk . 
docker run -itd --name kibana --net mynetwork --ip 172.111.0.40 -p 5601:5601 kibana:elk
docker exec -it kibana bash

访问kibana的web页面,查看日志数据并建立对应索引

  • 16
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
你可以使用Docker部署ELK(Elasticsearch, Logstash, Kibana)堆栈。以下是一些步骤: 1. 安装DockerDocker Compose:请确保你的机器上已经安装了DockerDocker Compose。 2. 创建一个新的目录并在该目录下创建一个`docker-compose.yml`文件。 3. 在`docker-compose.yml`文件中添加以下内容: ```yaml version: '3' services: elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:7.14.0 container_name: elasticsearch environment: - discovery.type=single-node ports: - 9200:9200 - 9300:9300 volumes: - ./data:/usr/share/elasticsearch/data logstash: image: docker.elastic.co/logstash/logstash:7.14.0 container_name: logstash volumes: - ./logstash/config:/usr/share/logstash/pipeline ports: - 5044:5044 kibana: image: docker.elastic.co/kibana/kibana:7.14.0 container_name: kibana ports: - 5601:5601 ``` 这个`docker-compose.yml`文件定义了三个服务:Elasticsearch、Logstash和Kibana。每个服务都使用了ELK堆栈的官方Docker镜像。 4. 创建一个`data`目录,用于保存Elasticsearch的数据。 5. 在一个终端窗口中,导航到包含`docker-compose.yml`文件的目录,并运行以下命令来启动ELK堆栈: ```bash docker-compose up ``` 这将启动Elasticsearch、Logstash和Kibana容器,并将它们连接在一起。 6. 访问Kibana:在浏览器中访问`http://localhost:5601`,你将看到Kibana的登录界面。 现在,你已经成功地使用Docker部署ELK堆栈。你可以通过Logstash将日志数据发送到Elasticsearch,并使用Kibana来可视化和分析这些日志数据。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值