11-2 logpilot+elasticsearch+kibana日志实践

11-2 logpilot+elasticsearch+kibana日志实践

先搭建基础服务 es kibana log-pilot

elasticsearch.yaml 

---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch-api
  namespace: kube-system
  labels:
    name: elasticsearch
spec:
  selector:
    app: es
  ports:
  - name: transport
    port: 9200
    protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch-discovery
  namespace: kube-system
  labels:
    name: elasticsearch
spec:
  selector:
    app: es
  ports:
  - name: transport
    port: 9300
    protocol: TCP
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: elasticsearch
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
spec:
# 3个节点满足高可用
  replicas: 3
  serviceName: "elasticsearch-service"
  selector:
    matchLabels:
      app: es
  template:
    metadata:
      labels:
        app: es
    spec:
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
      serviceAccountName: dashboard-admin
      initContainers:
      - name: init-sysctl
        image: busybox:1.27
        command:
        - sysctl
        - -w
        - vm.max_map_count=262144
        securityContext:
          privileged: true
      containers:
      - name: elasticsearch
        image: registry.cn-hangzhou.aliyuncs.com/imooc/elasticsearch:5.5.1
        ports:
        - containerPort: 9200
          protocol: TCP
        - containerPort: 9300
          protocol: TCP
        securityContext:
          capabilities:
            add:
              - IPC_LOCK
              - SYS_RESOURCE
        resources:
          limits:
            memory: 4000Mi
          requests:
            cpu: 100m
            memory: 2000Mi
        env:
          - name: "http.host"
            value: "0.0.0.0"
          - name: "network.host"
            value: "_eth0_"
          - name: "cluster.name"
            value: "docker-cluster"
          - name: "bootstrap.memory_lock"
            value: "false"
          - name: "discovery.zen.ping.unicast.hosts"
            value: "elasticsearch-discovery"
          - name: "discovery.zen.ping.unicast.hosts.resolve_timeout"
            value: "10s"
          - name: "discovery.zen.ping_timeout"
            value: "6s"
          - name: "discovery.zen.minimum_master_nodes"
            value: "2"
          - name: "discovery.zen.fd.ping_interval"
            value: "2s"
          - name: "discovery.zen.no_master_block"
            value: "write"
          - name: "gateway.expected_nodes"
            value: "2"
          - name: "gateway.expected_master_nodes"
            value: "1"
          - name: "transport.tcp.connect_timeout"
            value: "60s"
          - name: "ES_JAVA_OPTS"
            value: "-Xms2g -Xmx2g"
        livenessProbe:
          tcpSocket:
            port: transport
          initialDelaySeconds: 20
          periodSeconds: 10
        volumeMounts:
        - name: es-data
          mountPath: /data
      terminationGracePeriodSeconds: 30
      volumes:
      - name: es-data
        hostPath:
          path: /es-data

如果之前没有创建过dashboard-admin 需要先创建serviceAccount

kubectl create sa dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
kubectl describe secret -n kube-system ${ADMIN_SECRET} |grep -E '^token' |awk '{print $2}'
 

 

kubectl  apply -f elasticsearch.yaml 

kubectl  get svc -n kube-system -o wide

kubectl get statefulset -n kube-system

es部署完成之后 部署log-pilot

log-pilot.yaml 

---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: log-pilot
  namespace: kube-system
  labels:
    k8s-app: log-pilot
    kubernetes.io/cluster-service: "true"
spec:
  template:
    metadata:
      labels:
        k8s-app: log-es
        kubernetes.io/cluster-service: "true"
        version: v1.22
    spec:
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
      serviceAccountName: dashboard-admin
      containers:
      - name: log-pilot
        image: registry.cn-hangzhou.aliyuncs.com/imooc/log-pilot:0.9-filebeat
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 200Mi
        env:
          - name: "FILEBEAT_OUTPUT"
            value: "elasticsearch"
          - name: "ELASTICSEARCH_HOST"
            value: "elasticsearch-api"
          - name: "ELASTICSEARCH_PORT"
            value: "9200"
          - name: "ELASTICSEARCH_USER"
            value: "elastic"
          - name: "ELASTICSEARCH_PASSWORD"
            value: "changeme"
        volumeMounts:
        - name: sock
          mountPath: /var/run/docker.sock
        - name: root
          mountPath: /host
          readOnly: true
        - name: varlib
          mountPath: /var/lib/filebeat
        - name: varlog
          mountPath: /var/log/filebeat
        securityContext:
          capabilities:
            add:
            - SYS_ADMIN
      terminationGracePeriodSeconds: 30
      volumes:
      - name: sock
        hostPath:
          path: /var/run/docker.sock
      - name: root
        hostPath:
          path: /
      - name: varlib
        hostPath:
          path: /var/lib/filebeat
          type: DirectoryOrCreate
      - name: varlog
        hostPath:
          path: /var/log/filebeat
          type: DirectoryOrCreate
 

 

kubectl  apply -f log-pilot.yaml

kubectl get ds -n kube-system

这里只有8个 是因为 我的9个节点有一个是污点。所以那个上面pod没有跑起来

启动kibana

kibana.yaml 
---
apiVersion: v1
kind: Service
metadata:
  name: kibana
  namespace: kube-system
  labels:
    component: kibana
spec:
  selector:
    component: kibana
  ports:
  - name: http
    port: 80
    targetPort: http
---
#ingress
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: kibana
  namespace: kube-system
spec:
  rules:
  - host: kibana.pdabc.com
    http:
      paths:
      - path: /
        backend:
          serviceName: kibana
          servicePort: 80
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: kibana
  namespace: kube-system
  labels:
    component: kibana
spec:
  replicas: 1
  selector:
    matchLabels:
     component: kibana
  template:
    metadata:
      labels:
        component: kibana
    spec:
      containers:
      - name: kibana
        image: registry.cn-hangzhou.aliyuncs.com/imooc/kibana:5.5.1
        env:
        - name: CLUSTER_NAME
          value: docker-cluster
        - name: ELASTICSEARCH_URL
          value: http://elasticsearch-api:9200/
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        ports:
        - containerPort: 5601
          name: http
 

部署kibana

kubectl  apply -f kibana.yaml

 

查看kibana是否处于正常状态

kubectl  get deploy -n kube-system

 

修改hosts 进行访问

查看log-pilot 是否产生了日志

docker ps |grep log-pilot

docker logs -f 24a5110e9983

enable pilot: filebeat 表示基于filebeat来做

 

找一个服务配置一下日志 让log-pilot去采集日志

创建web.yaml 

#deploy
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-demo
spec:
  selector:
    matchLabels:
      app: web-demo
  replicas: 3
  template:
    metadata:
      labels:
        app: web-demo
    spec:
      containers:
      - name: web-demo
        image: harbor.pdabc.com/kubernetes/web:v3
        ports:
        - containerPort: 8080
        env:
# 这里开头必须是aliyun_logs_+自己的名字
# 如果对接是es 那么就是索引的名称 如果是kafka 那么就是topic名称
        - name: aliyun_logs_catalina
# 容器的标准输出
          value: "stdout"
        - name: aliyun_logs_access
          value: "/usr/local/tomcat/logs/*"
# 把目录挂载到宿主机上
        volumeMounts:
        - mountPath: /usr/local/tomcat/logs
          name: accesslogs
      volumes:
      - name: accesslogs
        emptyDir: {}
---
#service
apiVersion: v1
kind: Service
metadata:
  name: web-demo
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    app: web-demo
  type: ClusterIP


---
#ingress
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: web-demo
spec:
  rules:
  - host: web.pdabc.com
    http:
      paths:
      - path: /
        backend:
          serviceName: web-demo
          servicePort: 80
 

启动服务

kubectl  apply -f web.yaml

kubectl  get pods -o wide

再次查看指定node的log-pilot的日志

time="2020-01-03T07:07:58Z" level=debug msg="Process container start event: 5f151b787624ed954014b642d9cd27148b2b402a3bc1ea99e0c7d4403f5d6f93" 
time="2020-01-03T07:07:58Z" level=debug msg="5f151b787624ed954014b642d9cd27148b2b402a3bc1ea99e0c7d4403f5d6f93 has not log config, skip" 
time="2020-01-03T07:09:05Z" level=debug msg="Process container destory event: 5f151b787624ed954014b642d9cd27148b2b402a3bc1ea99e0c7d4403f5d6f93" 
time="2020-01-03T07:09:05Z" level=info msg="begin to watch log config: 5f151b787624ed954014b642d9cd27148b2b402a3bc1ea99e0c7d4403f5d6f93.yml" 
time="2020-01-03T07:09:18Z" level=debug msg="Process container start event: 87cbcc9bcded05e77d89bac6700c77cea1b28ab207e1e9c2c5d7fc2b97626ff7" 
time="2020-01-03T07:09:18Z" level=debug msg="87cbcc9bcded05e77d89bac6700c77cea1b28ab207e1e9c2c5d7fc2b97626ff7 has not log config, skip" 
time="2020-01-03T07:09:25Z" level=debug msg="Process container start event: 0b6b7d9edbb82bf035bbb17fce7a5c150022f239cb577bb35a84a7db33489568" 
# 其中access是日志的名字
# 对应的目录表示把宿主机的根目录 挂载到了容器的/host目录
time="2020-01-03T07:09:25Z" level=info msg="logs: 0b6b7d9edbb82bf035bbb17fce7a5c150022f239cb577bb35a84a7db33489568 = &{access /host/var/lib/kubelet/pods/842fcda4-093c-4e10-b2d4-160e763abf2a/volumes/kubernetes.io~empty-dir/accesslogs /usr/local/tomcat/logs nonex map[time_key:_timestamp] * map[index:access topic:access]  true false}" 
time="2020-01-03T07:09:25Z" level=info msg="logs: 0b6b7d9edbb82bf035bbb17fce7a5c150022f239cb577bb35a84a7db33489568 = &{catalina /host/var/lib/docker/containers/0b6b7d9edbb82bf035bbb17fce7a5c150022f239cb577bb35a84a7db33489568  nonex map[time_format:%Y-%m-%dT%H:%M:%S.%NZ] 0b6b7d9edbb82bf035bbb17fce7a5c150022f239cb577bb35a84a7db33489568-json.log* map[index:catalina topic:catalina]  false true}" 
time="2020-01-03T07:09:25Z" level=info msg="Reload filebeat" 
time="2020-01-03T07:09:25Z" level=info msg="Start reloading" 
time="2020-01-03T07:09:25Z" level=debug msg="not need to reload filebeat" 
time="2020-01-03T07:09:49Z" level=info msg="log config 5f151b787624ed954014b642d9cd27148b2b402a3bc1ea99e0c7d4403f5d6f93.yml has been removed and ignore" 

 

宿主机上查看该目录

ls /var/lib/kubelet/pods/842fcda4-093c-4e10-b2d4-160e763abf2a/volumes/kubernetes.io~empty-dir/accesslogs

ls /var/lib/docker/containers/0b6b7d9edbb82bf035bbb17fce7a5c150022f239cb577bb35a84a7db33489568

 

配置kibana索引

索引名称为access*

时间字段选择@timestamp

 

同理创建一个catalina

 

然后在Discover里 查看日志

也可以通过docker_container名称和k8s_pod字段筛选

访问http://web.pdabc.com/hello?name=jiaminxu

在kibana可以搜到

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值