第十七周-20231119

1.实现Java日志实现多行合并收集

# 收集java日志,多行合并
root@elk-logstash:/etc/logstash/conf.d# cat es-log-to-es.conf 
input {
  file {
    path => "/data/eslogs/magedu-es-cluster.log"
    type => "eslog"
    stat_interval => "1"
    start_position => "beginning"
    codec => multiline {
      #pattern => "^\["
      pattern => "^\[[0-9]{4}\-[0-9]{2}\-[0-9]{2}" # 匹配java日志时间开头,实现合并多行的效果
      negate => "true"
      what => "previous"
    }
  }
}

output {
  if [type] == "eslog" {
    elasticsearch {
      hosts =>  ["172.18.10.170:9200"]
      index => "magedu-eslog-%{+YYYY.ww}"
      user => "magedu"
      password => "123456"
    }}
}
root@elk-logstash:/data/eslogs# systemctl restart logstash.service 
# 

在这里插入图片描述

2.实现TCP日志收集、Syslog日志收集及基于ES API历史index

# TCP日志收集
root@elk-logstash:/etc/logstash/conf.d# vim tcp-log-to-es.conf 
input {
  tcp {
    port => 9889
    type => "magedu-tcplog"
    mode => "server"
  }
}


output {
  if [type] == "magedu-tcplog" {
    elasticsearch {
      hosts => ["172.18.10.170:9200"]
      index => "magedu-tcplog-%{+YYYY.MM.dd}"
      user => "magedu"
      password => "123456"
  }}
}
root@elk-logstash:/etc/logstash/conf.d# systemctl restart logstash
# 向logsatsh发送日志
root@es-node1:/data/eslogs# echo "ERROR tcplog message1" > /dev/tcp/172.18.10.173/9889
root@es-node1:/data/eslogs# nc 172.18.10.173 9889 < /etc/passwd
# 日志展示

在这里插入图片描述

# Syslog日志收集
root@elk-logstash:~# apt update && apt install -y haproxy
root@elk-logstash:~# vim /etc/haproxy/haproxy.cfg 
listen  kibana
  bind 0.0.0.0:5602
  log global
  server 172.18.10.173 172.18.10.173:5601 check inter 2s fall 3 rise 3

listen elasticsearch-9200
  bind 0.0.0.0:9200
  log global
  server 172.18.10.170 172.18.10.170:9200 check inter 2s fall 3 rise 3
  server 172.18.10.171 172.18.10.171:9200 check inter 2s fall 3 rise 3
root@elk-logstash:~# systemctl  restart  haproxy.service
root@elk-logstash:~# vim /etc/rsyslog.d/49-haproxy.conf 
# Send HAProxy messages to a dedicated logfile
:programname, startswith, "haproxy" {
#  /var/log/haproxy.log
  @@172.18.10.173:514
  stop
}
root@elk-logstash:~# systemctl  restart  rsyslog.service
root@elk-logstash:/etc/logstash/conf.d# vim rsyslog-haproxy-to-es.conf 
input{
  syslog {
    type => "rsyslog-haproxy"
    port => "514"  #监听一个本地的端口
}}

output{
  if [type] == "rsyslog-haproxy" {
    elasticsearch {
      hosts =>  ["172.18.10.170:9200"]
      index => "magedu-rsyslog-haproxy-%{+YYYY.ww}"
      user => "magedu"
      password => "123456"
    }}
}
root@elk-logstash:/etc/logstash/conf.d# systemctl  restart  logstash.service
# 基于脚本调用ES API实现索引的自动周期删除
# 删除单个索引
root@elk-logstash:~# curl -u magedu:123456 -X DELETE "http://172.31.2.102:9200/test_index?pretty"
# #基于脚本批量删除
root@elk-logstash:~# cat es-index-delete.sh
#!/bin/bash
DATE=`date -d "2 days ago" +%Y.%m.%d`
index="
logstash-magedu-accesslog
magedu-app1-errorlog
"
for NAME in  ${index};do
  INDEX_NAME="$NAME-$DATE"
  echo $FULL_NAME
  curl -u magedu:123456 -XDELETE http://172.18.10.170:9200/${INDEX_NAME}
done

3.logstash基于Redis实现日志收集缓存后再消费至ES集群及filebeat-logstash-redis-logsatsh-es架构

# 部署redis  172.18.10.174
root@ubuntu-virtual-machine:~# apt install redis-server
root@ubuntu-virtual-machine:~# vim /etc/redis/redis.conf 
requirepass 123456
root@ubuntu-virtual-machine:~# systemctl restart redis-server.service 

# 安装filebeat  172.18.10.173
root@elk-logstash:/apps# dpkg -i filebeat-8.5.1-amd64.deb 
root@elk-logstash:/apps# grep -Ev "^$|^#" /etc/filebeat/filebeat.yml |grep -v "#"
filebeat.inputs:
- type: filestream
  id: magedu-app1
  enabled: true
  paths:
    - /apps/nginx/logs/error.log
  fields:
    project: magedu
    type: magedu-app1-errorlog
 
- type: filestream
  id: magedu-app1
  enabled: true
  paths:
    - /apps/nginx/logs/access.log
  fields:
    project: magedu
    type: magedu-app1-accesslog
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
setup.kibana:
output.logstash:
  enabled: true
  hosts: ["172.18.10.174:5044"]
  loadbalance: true
  worker: 1
  compression_level: 3
processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~
root@elk-logstash:/apps# systemctl restart filebeat.service 

input {
  beats {
    port => 5044
    codec => "json"
  }
}

# 172.18.10.174 logsatsh配置
root@ubuntu-virtual-machine:/etc/logstash/conf.d# vim beats-magedu-to-redis.conf 
output {
  #stdout {
  #  codec => "rubydebug"
  #}
####################################
  if [fields][type] == "magedu-app1-accesslog" {
  redis {
    host => "172.18.10.174"
    password => "123456"
    port => "6379"
    db => "0"
    key => "magedu-app1-accesslog"
    data_type => "list"
   }
  }
  if [fields][type] == "magedu-app1-errorlog" {
  redis {
    host => "172.18.10.174"
    password => "123456"
    port => "6379"
    db => "0"
    key => "magedu-app1-errorlog"
    data_type => "list"
     }
  }
}
root@ubuntu-virtual-machine:/etc/logstash/conf.d# systemctl restart logstash
# 验证redis数据

在这里插入图片描述

# 将redis数据写入es
root@elk-logstash:/etc/logstash/conf.d# vim magedu-filebeat-redis-to-es.conf 
input {
  redis {
    data_type => "list"
    key => "magedu-app1-accesslog"
    host => "172.18.10.174"
    port => "6379"
    db => "0"
    password => "123456"
    codec => "json"  #json解析
  }

  redis {
    data_type => "list"
    key => "magedu-app1-errorlog"
    host => "172.18.10.174"
    port => "6379"
    db => "0"
    password => "123456"
  }
}

output {
  if [fields][type] == "magedu-app1-accesslog" {
    elasticsearch {
      hosts => ["172.18.10.170:9200"]
      index => "magedu-app1-accesslog-%{+YYYY.MM.dd}"
      user => "magedu"
      password => "123456"
    }
  }

  if [fields][type] == "magedu-app1-errorlog" {
    elasticsearch {
      hosts => ["172.18.10.170:9200"]
      index => "magedu-app1-errorlog-%{+YYYY.MM.dd}"
      user => "magedu"
      password => "123456"
    }
  }
}
root@elk-logstash:/etc/logstash/conf.d# systemctl restart logstash
# es验证

在这里插入图片描述

4.在Kibana 创建图像及Dashboard

垂直条形图-访问量最高的客户端前十IP

在这里插入图片描述

​ 表-访问量最高的页面

在这里插入图片描述

​ 饼图-状态码百分比

在这里插入图片描述

​ 创建仪表盘

在这里插入图片描述

5.在K8S环境部署kafka集群(kakfak课程-基于Strimzi Operator部署kafka集群)或使用已有的kafka集群,用于后续K8S环境的日志收集

# 创建NS并部署strimzi-cluster-operator
[root@k8s-deployer 20231102]# kubectl create namespace myserver
[root@k8s-deployer 20231102]# kubectl apply -f https://strimzi.io/install/latest?namespace=myserver
[root@k8s-deployer 20231102]# kubectl get pod -n myserver
NAME                                        READY   STATUS             RESTARTS        AGE
strimzi-cluster-operator-95d88f6b5-75pq4    1/1     Running            0               3m26s
# 部署kafka集群
[root@k8s-deployer 20231102]# wget https://strimzi.io/examples/latest/kafka/kafka-persistent-single.yaml --no-check-certificate
[root@k8s-deployer 20231102]#  mv kafka-persistent-single.yaml 1.kafka-persistent-single.yaml
[root@k8s-deployer 7.strimzi-kafka-operator]# vim 1.kafka-persistent-single.yaml 
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
  name: myserver-kafka-cluster
  namespace: myserver #指定目的NS
spec:
  kafka:
    version: 3.5.1
    replicas: 3 #副本数,默认为1
    listeners:
      - name: plain
        port: 9092
        type: internal
        tls: false
      - name: tls
        port: 9093
        type: internal
        tls: true
      - name: external # 增加外部客户端访问用的linstener
        port: 9094 #监听端口
        type: nodeport #指定nodeport类型
        tls: false
        configuration:
          bootstrap:
            nodePort: 31092 # 指定宿主机nodeport端口
    config:
      offsets.topic.replication.factor: 3
      transaction.state.log.replication.factor: 3
      transaction.state.log.min.isr: 3
      default.replication.factor: 1
      min.insync.replicas: 1
      inter.broker.protocol.version: "3.5"
    storage:
      type: jbod
      volumes:
      - id: 0
        type: persistent-claim
        class: nfs-csi 
        size: 100Gi
        deleteClaim: false
  zookeeper:
    replicas: 3
    storage:
      type: persistent-claim
      class: nfs-csi 
      size: 10Gi
      deleteClaim: false
  entityOperator:
    topicOperator: {}
    userOperator: {}
# 创建存储类
[root@k8s-deployer 1.storageclasses-dir]# kubectl apply -f 1-rbac.yaml 
[root@k8s-deployer 1.storageclasses-dir]# kubectl apply -f 2-storageclass.yaml 
[root@k8s-deployer 1.storageclasses-dir]# kubectl apply -f 3-nfs-provisioner.yaml
[root@k8s-deployer 1.storageclasses-dir]# kubectl get sc
NAME                PROVISIONER                                   RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-csi (default)   k8s-sigs.io/nfs-subdir-external-provisioner   Retain          Immediate           false                  67s
[root@k8s-deployer 7.strimzi-kafka-operator]# kubectl apply -f 1.kafka-persistent-single.yaml 
kafka.kafka.strimzi.io/myserver-kafka-cluster created
#验证pod,确认zk及kafka分别为running状态
[root@k8s-deployer tmp]#  kubectl get pod  -n myserver

6、Kubernetes日志收集—基于DaemonSet实现容器日志

# 构建镜像
[root@k8s-deployer 1.logstash-image-Dockerfile]# ll
总用量 16
-rw-r--r-- 1 root root 350 1119 19:50 build-commond.sh
-rw-r--r-- 1 root root 221 1119 19:50 Dockerfile
-rw-r--r-- 1 root root 805 1119 19:50 logstash.conf
-rw-r--r-- 1 root root  92 1119 19:50 logstash.yml
[root@k8s-deployer 1.logstash-image-Dockerfile]# vim Dockerfile 
FROM logstash:7.12.1
USER root
WORKDIR /usr/share/logstash
#RUN rm -rf config/logstash-sample.conf
ADD logstash.yml /usr/share/logstash/config/logstash.yml
ADD logstash.conf /usr/share/logstash/pipeline/logstash.conf
[root@k8s-deployer 1.logstash-image-Dockerfile]# vim build-commond.sh 
#!/bin/bash
nerdctl build -t harbor.linuxarchitect.io/baseimages/logstash:v7.12.1-json-file-log-v1 .

nerdctl push harbor.linuxarchitect.io/baseimages/logstash:v7.12.1-json-file-log-v1

# 采集配置文件
[root@k8s-deployer 1.logstash-image-Dockerfile]# vim logstash.conf 
input {
  file {
    #path => "/var/lib/docker/containers/*/*-json.log" #docker
    path => "/var/log/pods/*/*/*.log"
    start_position => "beginning"
    type => "jsonfile-daemonset-applog"
  }

  file {
    path => "/var/log/*.log"
    start_position => "beginning"
    type => "jsonfile-daemonset-syslog"
  }
}

output {
  if [type] == "jsonfile-daemonset-applog" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}" # 使用变量,在k8syaml文件中定义
      topic_id => "${TOPIC_ID}"
      batch_size => 16384  #logstash每次向ES传输的数据量大小,单位为字节
      codec => "${CODEC}"
   } }

  if [type] == "jsonfile-daemonset-syslog" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384
      codec => "${CODEC}" #系统日志不是json格式
  }}
}
# 配置监听地址
[root@k8s-deployer 1.logstash-image-Dockerfile]# vim logstash.yml 
http.host: "0.0.0.0"

# 构建镜像
[root@k8s-deployer 1.logstash-image-Dockerfile]# sh build-commond.sh 

# 创建DaemonSet收集日志
[root@k8s-deployer 1.daemonset-logstash]# vim 2.DaemonSet-logstash.yaml 
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: logstash-elasticsearch
  namespace: kube-system
  labels:
    k8s-app: logstash-logging
spec:
  selector:
    matchLabels:
      name: logstash-elasticsearch
  template:
    metadata:
      labels:
        name: logstash-elasticsearch
    spec:
      tolerations:
      # this toleration is to have the daemonset runnable on master nodes
      # remove it if your masters can't run pods
      - key: node-role.kubernetes.io/master
        operator: Exists
        effect: NoSchedule
      containers:
      - name: logstash-elasticsearch
        image: harbor.linuxarchitect.io/baseimages/logstash:v7.12.1-json-file-log-v1
        env:
        - name: "KAFKA_SERVER"
          #value: "172.31.2.107:9092,172.31.2.108:9092,172.31.2.109:9092"
          value: "172.18.10.141:39092,172.18.10.142:39092,172.18.10.143:39092"
        - name: "TOPIC_ID"
          value: "jsonfile-log-topic"
        - name: "CODEC"
          value: "json"
#        resources:
#          limits:
#            cpu: 1000m
#            memory: 1024Mi
#          requests:
#            cpu: 500m
#            memory: 1024Mi
        volumeMounts:
        - name: varlog #定义宿主机系统日志挂载路径
          mountPath: /var/log #宿主机系统日志挂载点
        - name: varlibdockercontainers #定义容器日志挂载路径,和logstash配置文件中的收集路径保持一直
          #mountPath: /var/lib/docker/containers #docker挂载路径
          mountPath: /var/log/pods #containerd挂载路径,此路径与logstash的日志收集路径必须一致
          readOnly: false
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log #宿主机系统日志
      - name: varlibdockercontainers
        hostPath:
          #path: /var/lib/docker/containers #docker的宿主机日志路径
          path: /var/log/pods #containerd的宿主机日志路径

[root@k8s-deployer 1.daemonset-logstash]# kubectl apply -f 2.DaemonSet-logstash.yaml 

# 从kafka中获取日志写入es中
root@elk-logstash:/etc/logstash/conf.d# cd /etc/logstash/conf.d
root@elk-logstash:/etc/logstash/conf.d# vim logsatsh-daemonset-jsonfile-kafka-to-es.conf 

input {
  kafka {
    bootstrap_servers => "172.18.10.141:39092,172.18.10.142:39092,172.18.10.143:39092"
    topics => ["jsonfile-log-topic"]
    codec => "json"
  }
}

output {
  #if [fields][type] == "app1-access-log" {
  if [type] == "jsonfile-daemonset-applog" {
    elasticsearch {
      hosts => ["172.18.10.170:9200","172.18.10.171:9200"]
      index => "jsonfile-daemonset-applog-%{+YYYY.MM.dd}"
      user => magedu
      password => "123456"
    }}

  if [type] == "jsonfile-daemonset-syslog" {
    elasticsearch {
      hosts => ["172.18.10.170:9200","172.18.10.171:9200"]
      index => "jsonfile-daemonset-syslog-%{+YYYY.MM.dd}"
      user => magedu
      password => "123456"
    }}
}

root@elk-logstash:/etc/logstash/conf.d# systemctl restart logstash

7、Kubernetes日志收集—以Sidecar容器实现Pod中的日志收集

# 使用sidcar容器(一个pod多容器)收集当前pod内一个或者多个业务容器的日志(通常基于emptyDir实现业务容器与sidcar之间的日志共享)。
# 构建镜像
[root@k8s-deployer 1.logstash-image-Dockerfile]# ll
总用量 16
-rw-r--r-- 1 root root 313 1119 19:50 build-commond.sh
-rw-r--r-- 1 root root 221 1119 19:50 Dockerfile
-rw-r--r-- 1 root root 740 1119 19:50 logstash.conf
-rw-r--r-- 1 root root  92 1119 19:50 logstash.yml
[root@k8s-deployer 1.logstash-image-Dockerfile]# cat Dockerfile 
FROM logstash:7.12.1
USER root
WORKDIR /usr/share/logstash 
#RUN rm -rf config/logstash-sample.conf
ADD logstash.yml /usr/share/logstash/config/logstash.yml
ADD logstash.conf /usr/share/logstash/pipeline/logstash.conf 
[root@k8s-deployer 1.logstash-image-Dockerfile]# cat logstash.conf 
input {
  file {
    path => "/var/log/applog/catalina.out"
    start_position => "beginning"
    type => "app1-sidecar-catalina-log"
  }
  file {
    path => "/var/log/applog/localhost_access_log.*.txt"
    start_position => "beginning"
    type => "app1-sidecar-access-log"
  }
}

output {
  if [type] == "app1-sidecar-catalina-log" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384  #logstash每次向ES传输的数据量大小,单位为字节
      codec => "${CODEC}" 
   } }

  if [type] == "app1-sidecar-access-log" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384
      codec => "${CODEC}"
  }}
}

[root@k8s-deployer 1.logstash-image-Dockerfile]# sh build-commond.sh 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-tomcat-app1-deployment-label
  name: magedu-tomcat-app1-deployment #当前版本的deployment 名称
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: magedu-tomcat-app1-selector
  template:
    metadata:
      labels:
        app: magedu-tomcat-app1-selector
    spec:
      containers:
      - name: magedu-tomcat-app1-container
        image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/tomcat-app1:v1
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
        resources:
          limits:
            cpu: 1
            memory: "512Mi"
          requests:
            cpu: 500m
            memory: "512Mi"
        volumeMounts:
        - name: applogs
          mountPath: /apps/tomcat/logs
        startupProbe:
          httpGet:
            path: /myapp/index.html
            port: 8080
          initialDelaySeconds: 5 #首次检测延迟5s
          failureThreshold: 3  #从成功转为失败的次数
          periodSeconds: 3 #探测间隔周期
        readinessProbe:
          httpGet:
            #path: /monitor/monitor.html
            path: /myapp/index.html
            port: 8080
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
        livenessProbe:
          httpGet:
            #path: /monitor/monitor.html
            path: /myapp/index.html
            port: 8080
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
      - name: sidecar-container
        image: harbor.linuxarchitect.io/baseimages/logstash:v7.12.1-sidecar
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        env:
        - name: "KAFKA_SERVER"
          #value: "172.31.2.107:9092,172.31.2.108:9092,172.31.2.109:9092"
          value: "172.18.10.141:39092"
        - name: "TOPIC_ID"
          value: "tomcat-app1-topic"
        - name: "CODEC"
          value: "json"
        volumeMounts:
        - name: applogs
          mountPath: /var/log/applog
      volumes:
      - name: applogs #定义通过emptyDir实现业务容器与sidecar容器的日志共享,以让sidecar收集业务容器中的日志
        emptyDir: {}

# 创建service
[root@k8s-deployer 2.sidecar-logstash]# cat 3.tomcat-service.yaml 
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-tomcat-app1-service-label
  name: magedu-tomcat-app1-service
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 40080
  selector:
    app: magedu-tomcat-app1-selector



# logsatsh配置文件
root@elk-logstash:/etc/logstash/conf.d# vim logsatsh-sidecar-kafka-to-es.conf 
input {
  kafka {
    #bootstrap_servers => "172.31.4.101:9092,172.31.4.102:9092,172.31.4.103:9092"
    bootstrap_servers => "172.18.10.141:39092"
    topics => ["tomcat-app1-topic"]
    codec => "json"
  }
}



output {
  #if [fields][type] == "app1-access-log" {
  if [type] == "app1-sidecar-access-log" {
    elasticsearch {
      hosts => ["172.18.10.170:9200","172.18.10.171:9200"]
      index => "app1-sidecar-accesslog-%{+YYYY.MM.dd}"
      user => magedu
      password => "123456"
    }
  }


  if [type] == "app1-sidecar-catalina-log" {
    elasticsearch {
      hosts => ["172.18.10.170:9200","172.18.10.171:9200"]
      index => "app1-sidecar-catalinalog-%{+YYYY.MM.dd}"
      user => magedu
      password => "123456"
    }
  }

}

root@elk-logstash:~# systemctl restart logstash

8、Kubernetes日志收集—容器内置日志收集进程filebeat实现pod日志收集

# 容器内置日志收集服务进程
# 构建镜像
[root@k8s-deployer 1.webapp-filebeat-image-Dockerfile]# ll
总用量 31896
-rw-r--r-- 1 root root      300 1119 19:50 build-command.sh # 构建镜像并上传到本地仓库
-rw-r--r-- 1 root root    23611 1119 19:50 catalina.sh	# tomcat启动文件
-rw-r--r-- 1 root root      544 1119 19:50 Dockerfile	# dockerfile
-rw-r--r-- 1 root root 32600353 1119 19:51 filebeat-7.12.1-x86_64.rpm
-rw-r--r-- 1 root root      805 1119 19:51 filebeat.yml # filebeat配置文件
-rw-r--r-- 1 root root       63 1119 19:50 index.html 
drwxr-xr-x 2 root root       24 1119 19:51 myapp
-rw-r--r-- 1 root root      149 1119 19:50 myapp.tar.gz
-rw-r--r-- 1 root root      372 1119 19:51 run_tomcat.sh # tomcat启动脚本
-rw-r--r-- 1 root root     6462 1119 19:51 server.xml # tomcat配置文件

[root@k8s-deployer 1.webapp-filebeat-image-Dockerfile]# cat Dockerfile 
#tomcat web1
FROM harbor.linuxarchitect.io/pub-images/tomcat-base:v8.5.43 

ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD server.xml /apps/tomcat/conf/server.xml
#ADD myapp/* /data/tomcat/webapps/myapp/
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD filebeat.yml /etc/filebeat/filebeat.yml 

ADD myapp.tar.gz /data/tomcat/webapps/myapp/
RUN chown  -R tomcat.tomcat /data/ /apps/
#ADD filebeat-7.5.1-x86_64.rpm /tmp/
#RUN cd /tmp && yum localinstall -y filebeat-7.5.1-amd64.deb

EXPOSE 8080 8443

CMD ["/apps/tomcat/bin/run_tomcat.sh"]

[root@k8s-deployer 1.webapp-filebeat-image-Dockerfile]# cat Dockerfile 
#tomcat web1
FROM harbor.linuxarchitect.io/pub-images/tomcat-base:v8.5.43 

ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD server.xml /apps/tomcat/conf/server.xml
#ADD myapp/* /data/tomcat/webapps/myapp/
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD filebeat.yml /etc/filebeat/filebeat.yml 

ADD myapp.tar.gz /data/tomcat/webapps/myapp/
RUN chown  -R tomcat.tomcat /data/ /apps/
#ADD filebeat-7.5.1-x86_64.rpm /tmp/
#RUN cd /tmp && yum localinstall -y filebeat-7.5.1-amd64.deb

EXPOSE 8080 8443

CMD ["/apps/tomcat/bin/run_tomcat.sh"]

[root@k8s-deployer 1.webapp-filebeat-image-Dockerfile]# cat filebeat.yml 
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /apps/tomcat/logs/catalina.out
  fields:
    type: filebeat-tomcat-catalina
- type: log
  enabled: true
  paths:
    - /apps/tomcat/logs/localhost_access_log.*.txt 
  fields:
    type: filebeat-tomcat-accesslog
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
setup.kibana:

output.kafka: # 通过filebeat写入kafka
  hosts: ["172.18.10.141:39092","172.18.10.142:39092","172.18.10.143:39092"]
  required_acks: 1
  topic: "filebeat-magedu-app1"
  compression: gzip
  max_message_bytes: 1000000
#output.redis: # 通过filebeat写入kafka
#  hosts: ["172.18.10.141:6379"]
#  key: "k8s-magedu-app1"
#  db: 1
#  timeout: 5
#  password: "123456"

[root@k8s-deployer 1.webapp-filebeat-image-Dockerfile]# cat run_tomcat.sh 
#!/bin/bash
#echo "nameserver 223.6.6.6" > /etc/resolv.conf
#echo "192.168.7.248 k8s-vip.example.com" >> /etc/hosts

/usr/share/filebeat/bin/filebeat -e -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat &
su - tomcat -c "/apps/tomcat/bin/catalina.sh start"
tail -f /etc/hosts

# 构建上传镜像到本地仓库
[root@k8s-deployer 1.webapp-filebeat-image-Dockerfile]# sh build-command.sh 

[root@k8s-deployer 3.container-filebeat-process]# cat 2.filebeat-serviceaccount.yaml 
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: filebeat-serviceaccount-clusterrole
  labels:
    k8s-app: filebeat-serviceaccount-clusterrole
rules:
- apiGroups: [""] # "" indicates the core API group
  resources:
  - namespaces
  - pods
  - nodes
  verbs:
  - get
  - watch
  - list

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: filebeat-serviceaccount-clusterrolebinding
subjects:
- kind: ServiceAccount
  name: default
  namespace: magedu
roleRef:
  kind: ClusterRole
  name: filebeat-serviceaccount-clusterrole
  apiGroup: rbac.authorization.k8s.io

[root@k8s-deployer 3.container-filebeat-process]# cat 3.tomcat-app1.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-tomcat-app1-filebeat-deployment-label
  name: magedu-tomcat-app1-filebeat-deployment
  namespace: magedu
spec:
  replicas: 4
  selector:
    matchLabels:
      app: magedu-tomcat-app1-filebeat-selector
  template:
    metadata:
      labels:
        app: magedu-tomcat-app1-filebeat-selector
    spec:
      containers:
      - name: magedu-tomcat-app1-filebeat-container
        image: harbor.linuxarchitect.io/magedu/tomcat-app1:v1 
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
        resources:
          limits:
            cpu: 1
            memory: "512Mi"
          requests:
            cpu: 500m
            memory: "512Mi"

[root@k8s-deployer 3.container-filebeat-process]# cat 4.tomcat-service.yaml 
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-tomcat-app1-filebeat-service-label
  name: magedu-tomcat-app1-filebeat-service
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30092
  selector:
    app: magedu-tomcat-app1-filebeat-selector

# 部署web服务
[root@k8s-deployer 3.container-filebeat-process]# kubectl apply -f 2.filebeat-serviceaccount.yaml 
[root@k8s-deployer 3.container-filebeat-process]# kubectl apply -f 3.tomcat-app1.yaml 
[root@k8s-deployer 3.container-filebeat-process]# kubectl apply -f 4.tomcat-service.yaml 

# 配置logsatsh
root@elk-logstash:~# cd /etc/logstash/conf.d/
root@elk-logstash:/etc/logstash/conf.d# vim 5.logstash-filebeat-process-kafka-to-es.conf 
input {
  kafka {
    bootstrap_servers => "172.18.10.141:39092,172.18.10.142:39092,172.18.10.143:39092"
    topics => ["filebeat-magedu-app1"]
    codec => "json"
  }
}

output {
  if [fields][type] == "filebeat-tomcat-catalina" {
    elasticsearch {
      hosts => ["172.18.10.170:9200","172.18.10.171:9200"]
      index => "filebeat-tomcat-catalina-%{+YYYY.MM.dd}"
      user => magedu
      password => "123456"
    }}

  if [fields][type] == "filebeat-tomcat-accesslog" {
    elasticsearch {
      hosts => ["172.18.10.170:9200","172.18.10.171:9200"]
      index => "filebeat-tomcat-accesslog-%{+YYYY.MM.dd}"
      user => magedu
      password => "123456"
    }}

}

root@elk-logstash:/etc/logstash/conf.d# systemctl restart logstash
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值