k8s安装EFK日志服务

1、下载安装包并解压

wget https://dl.k8s.io/v1.16.6/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes
tar xf kubernetes-src.tar.gz
cd cluster/addons/fluentd-elasticsearch

2、修改配置文件

cat es-statefulset.yaml

# RBAC authn and authz
apiVersion: v1
kind: ServiceAccount
metadata:
  name: elasticsearch-logging
  namespace: kube-system
  labels:
    k8s-app: elasticsearch-logging
    addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: elasticsearch-logging
  labels:
    k8s-app: elasticsearch-logging
    addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
  - ""
  resources:
  - "services"
  - "namespaces"
  - "endpoints"
  verbs:
  - "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: kube-system
  name: elasticsearch-logging
  labels:
    k8s-app: elasticsearch-logging
    addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
  name: elasticsearch-logging
  namespace: kube-system
  apiGroup: ""
roleRef:
  kind: ClusterRole
  name: elasticsearch-logging
  apiGroup: ""
---
# Elasticsearch deployment itself
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: elasticsearch-logging
  namespace: kube-system
  labels:
    k8s-app: elasticsearch-logging
    version: v7.2.0
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  serviceName: elasticsearch-logging
  replicas: 2
  selector:
    matchLabels:
      k8s-app: elasticsearch-logging
      version: v7.2.0
  template:
    metadata:
      labels:
        k8s-app: elasticsearch-logging
        version: v7.2.0
    spec:
      serviceAccountName: elasticsearch-logging
      containers:
      - image: quay.io/fluentd_elasticsearch/elasticsearch:v7.2.0
        name: elasticsearch-logging
        imagePullPolicy: IfNotPresent           # 修改后的有镜像先使用本地镜像,这是k8s镜像拉取策略,根据自己的实际情况更改
        resources:
          # need more cpu upon initialization, therefore burstable class
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        ports:
        - containerPort: 9200
          name: db
          protocol: TCP
        - containerPort: 9300
          name: transport
          protocol: TCP
        volumeMounts:
        - name: elasticsearch-logging
          mountPath: /data
        env:
        - name: "NAMESPACE"
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
      volumes:
      - name: elasticsearch-logging
        emptyDir: {}
      # Elasticsearch requires vm.max_map_count to be at least 262144.
      # If your OS already sets up this number to a higher value, feel free
      # to remove this init container.
      initContainers:
      - image: alpine:3.6
        command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
        name: elasticsearch-logging-init
        securityContext:
          privileged: true
cat kibana-deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana-logging
  namespace: kube-system
  labels:
    k8s-app: kibana-logging
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: kibana-logging
  template:
    metadata:
      labels:
        k8s-app: kibana-logging
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      containers:
      - name: kibana-logging
        image: docker.elastic.co/kibana/kibana-oss:7.2.0
        resources:
          # need more cpu upon initialization, therefore burstable class
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        env:
          - name: ELASTICSEARCH_HOSTS
            value: http://elasticsearch-logging:9200
          - name: SERVER_NAME
            value: kibana-logging
          - name: SERVER_BASEPATH
            value: ""                     # 修改后的
          - name: SERVER_REWRITEBASEPATH
            value: "false"
        ports:
        - containerPort: 5601
          name: ui
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /api/status
            port: ui
          initialDelaySeconds: 5
          timeoutSeconds: 10
        readinessProbe:
          httpGet:
            path: /api/status
            port: ui
          initialDelaySeconds: 5
          timeoutSeconds: 10

3、安装yaml文件

kubectl apply -f .
service/elasticsearch-logging created
serviceaccount/elasticsearch-logging created
clusterrole.rbac.authorization.k8s.io/elasticsearch-logging created
clusterrolebinding.rbac.authorization.k8s.io/elasticsearch-logging created
statefulset.apps/elasticsearch-logging created
configmap/fluentd-es-config-v0.2.0 created
serviceaccount/fluentd-es created
clusterrole.rbac.authorization.k8s.io/fluentd-es created
clusterrolebinding.rbac.authorization.k8s.io/fluentd-es created
daemonset.apps/fluentd-es-v2.7.0 created
deployment.apps/kibana-logging created
service/kibana-logging created

4、查看状态

[root@master1 ~]# kubectl get pod --all-namespaces
NAMESPACE      NAME                                      READY   STATUS    RESTARTS   AGE
istio-system   istio-citadel-8648cbcbf8-wnb9l            1/1     Running   0          40h
istio-system   istio-galley-6bcbfb89fc-h5g8s             2/2     Running   0          40h
istio-system   istio-ingressgateway-58bdd8c5f6-zzmjn     1/1     Running   0          40h
istio-system   istio-pilot-65ffb49779-dr24l              2/2     Running   0          40h
istio-system   istio-policy-7fbfbbd67f-wms8c             2/2     Running   0          40h
istio-system   istio-sidecar-injector-7c9f94b474-dfnfh   1/1     Running   0          40h
istio-system   istio-telemetry-d79967d58-rj5ll           2/2     Running   0          40h
istio-system   prometheus-946f9f9d8-8lkv2                1/1     Running   0          40h
kube-system    coredns-66bff467f8-trdxz                  1/1     Running   0          45h
kube-system    coredns-66bff467f8-zkqzz                  1/1     Running   0          45h
kube-system    elasticsearch-logging-0                   1/1     Running   0          48m
kube-system    elasticsearch-logging-1                   1/1     Running   0          39m
kube-system    etcd-master1                              1/1     Running   0          45h
kube-system    etcd-master2                              1/1     Running   1          45h
kube-system    etcd-master3                              1/1     Running   0          45h
kube-system    fluentd-es-v2.7.0-9jt5q                   1/1     Running   0          48m
kube-system    fluentd-es-v2.7.0-g48wd                   1/1     Running   0          48m
kube-system    fluentd-es-v2.7.0-qj76t                   1/1     Running   0          48m
kube-system    fluentd-es-v2.7.0-vp29s                   1/1     Running   0          48m
kube-system    kibana-logging-dc6454c9c-fzhql            1/1     Running   8          48m
kube-system    kube-apiserver-master1                    1/1     Running   0          45h
kube-system    kube-apiserver-master2                    1/1     Running   2          45h
kube-system    kube-apiserver-master3                    1/1     Running   0          45h
kube-system    kube-controller-manager-master1           1/1     Running   6          45h
kube-system    kube-controller-manager-master2           1/1     Running   2          45h
kube-system    kube-controller-manager-master3           1/1     Running   1          45h
kube-system    kube-flannel-ds-kbq9k                     1/1     Running   0          45h
kube-system    kube-flannel-ds-ndpzj                     1/1     Running   0          45h
kube-system    kube-flannel-ds-pvf7p                     1/1     Running   0          45h
kube-system    kube-flannel-ds-wzf7w                     1/1     Running   0          45h
kube-system    kube-proxy-7h5sb                          1/1     Running   0          45h
kube-system    kube-proxy-qfmww                          1/1     Running   0          45h
kube-system    kube-proxy-v8sdn                          1/1     Running   0          45h
kube-system    kube-proxy-zrdhj                          1/1     Running   0          45h
kube-system    kube-scheduler-master1                    1/1     Running   2          45h
kube-system    kube-scheduler-master2                    1/1     Running   2          45h
kube-system    kube-scheduler-master3                    1/1     Running   1          45h

5、暴露服务供外网访问

[root@master1 ~]kubectl patch -n kube-system svc kibana-logging -p '{"spec":{"type":"NodePort"}}'
service/kibana-logging patched


[root@master1 fluentd-elasticsearch]# kubectl get svc kibana-logging -n kube-system
NAME             TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE
kibana-logging   NodePort   10.102.215.9   <none>        5601:31937/TCP   38m

7、web端访问,打开浏览器输入http://192.168.200.3:31937进行操作

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值