Docker Kubernetes监控----Kubernetes高可用集群

1. 环境清理

[root@server2 ~]# kubectl delete nodes server4    ##将server3和server4从server2上剥离
node "server4" deleted
[root@server2 ~]# kubectl delete nodes server3
node "server3" deleted
[root@server2 ~]# kubectl delete nodes server2
node "server2" deleted
[root@server2 ~]# kubeadm reset    ##重置server
[root@server3 ~]# kubeadm reset 
[root@server4 ~]# kubeadm reset

[root@server2 ~]# ipvsadm --clear
[root@server3 ~]# ipvsadm --clear
[root@server4 ~]# ipvsadm --clear
[root@server2 ~]# ipvsadm -l     ##server2、3、4查看ipvs是否存在ipvs策略,如果存在ipvsadm --clear
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn

在这里插入图片描述
在这里插入图片描述

2. 使用pacemaker搭建k8s的高可用(haproxy的高可用)

2.1 安装并配置haproxy
[root@foundation50 addons]# cd /var/www/html/rhel7.6/addons
[root@foundation50 addons]# ls
HighAvailability  ResilientStorage

[root@server5 sysconfig]# cd /etc/yum.repos.d/
[root@server5 yum.repos.d]# vim westos.repo 
[dvd]
name=rhel7.6
baseurl=http://192.168.0.100/rhel7.6
gpgcheck=0

[HighAvailability]
name=HighAvailability
baseurl=http://192.168.0.100/rhel7.6/addons/HighAvailability/
gpgcheck=0

[root@server5 yum.repos.d]# cd /etc/haproxy/
[root@server5 haproxy]# ls
haproxy.cfg
[root@server5 haproxy]# vim haproxy.cfg 
[root@server5 haproxy]# systemctl enable --now haproxy.service  ##启动
[root@server5 haproxy]# systemctl status haproxy.service 
[root@server5 haproxy]# netstat -antlp
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      13549/haproxy 

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

2.2 安装pacemaker
## 1. 免密
[root@server5 haproxy]# ssh-keygen   
[root@server5 haproxy]# ssh-copy-id server6
[root@server5 haproxy]# cd /etc/yum.repos.d/
[root@server5 yum.repos.d]# ls
redhat.repo  westos.repo
[root@server5 yum.repos.d]# scp westos.repo server6:/etc/yum.repos.d/  ##拷贝一份仓库文件

##2.安装pacemaker
[root@server5 haproxy]# yum install -y pacemaker pcs psmisc policycoreutils-python
[root@server5 yum.repos.d]# ssh server6 yum install -y haproxy pacemaker pcs psmisc policycoreutils-python  
[root@server5 yum.repos.d]# cd /etc/haproxy/
[root@server5 haproxy]# ls
haproxy.cfg  kubeadm-init.yaml
[root@server5 haproxy]# scp haproxy.cfg server6:/etc/haproxy/
[root@server6 haproxy]# systemctl start haproxy.service                         
[root@server6 haproxy]# netstat -antlp
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      3922/haproxy  

 ## 3.启动pcs服务
[root@server5 haproxy]# systemctl stop haproxy.service
[root@server6 haproxy]# systemctl stop haproxy.service 
[root@server5 haproxy]# systemctl enable --now pcsd.service   
Created symlink from /etc/systemd/system/multi-user.target.wants/pcsd.service to /usr/lib/systemd/system/pcsd.service.
[root@server5 haproxy]# ssh server6 systemctl enable --now pcsd.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/pcsd.service to /usr/lib/systemd/system/pcsd.service.

在这里插入图片描述

2.3 配置pacemaker
## 1. create a hacluster user
[root@server5 haproxy]# echo westos | passwd --stdin hacluster
[root@server5 haproxy]# ssh server6 'echo westos | passwd --stdin hacluster'
Changing password for user hacluster.
passwd: all authentication tokens updated successfully.
[root@server6 haproxy]# cat /etc/shadow  ##查看是否有密码

## 2. 对两个用户进行认证
[root@server5 haproxy]# pcs cluster auth server5 server6
Username: hacluster
Password: 
server5: Authorized
server6: Authorized
## 3. 组件集群
[root@server5 haproxy]# pcs cluster setup --name mycluster server5 server6

## 4. 设置开机自启动集群
[root@server5 haproxy]# pcs cluster start --all
[root@server5 haproxy]# pcs cluster enable --all
[root@server5 haproxy]# pcs property set stonith-enabled=false
[root@server5 haproxy]# crm_verify -LV
[root@server5 haproxy]# pcs status

在这里插入图片描述
在这里插入图片描述

2.4 配置资源
### 1. 配置vip资源
[root@server5 haproxy]# pcs resource create vip ocf:heartbeat:IPaddr2 ip=192.168.0.101 cidr_netmask=32 op monitor interval=30s
[root@server5 haproxy]# pcs status
[root@server5 haproxy]# ip addr

### 2. 配置haproxy服务资源
[root@server5 haproxy]# pcs resource create haproxy systemd:haproxy op monitor interval=60s

### 3. 资源放到一个组
[root@server5 haproxy]# pcs resource add group hagroup vip haproxy
[root@server5 haproxy]# pcs status
[root@server5 haproxy]# ip addr
[root@server5 haproxy]# netstat -antlp | grep :6443

在这里插入图片描述
在这里插入图片描述

在这里插入图片描述

2.5 测试主从切换(双机热备)
[root@server5 haproxy]# pcs node standby
[root@server5 haproxy]# pcs status
[root@server6 haproxy]# ip addr
[root@server6 haproxy]# netstat -antlp | grep :6443
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      3673/haproxy     

[root@server5 haproxy]# pcs node unstandby
[root@server5 haproxy]# pcs status

在这里插入图片描述
在这里插入图片描述

3. k8s集群部署

3.1 k8s配置(三个master节点)
3.1.1 修改初始化文件
# 1.修改初始化文件
[root@server2 ~]# kubeadm config print init-defaults > kubeadm-init.yaml  ##生成init文件
[root@server2 ~]# vim kubeadm-init.yaml    ##编辑文件内容如下图 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.0.2
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: server2
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.0.101:6443"
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: reg.westos.org/k8s
#imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.2
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs     ##ipvs模块来做proxy

在这里插入图片描述

3.1.2 删除server2、3、4所有的镜像
## 2. 删除server2、3、4上面所有的镜像,然后从本地部署k8s需要的镜像
[root@server2 ~]# docker rmi `docker images | grep -v ^REPOSITORY | awk '{print $1":"$2}'`
3.1.3 上传仓库所需镜像
[root@server1 harbor]# docker pull registry.aliyuncs.com/google_containers/pause:3.2
[root@server1 harbor]# docker pull registry.aliyuncs.com/google_containers/coredns:1.7.0
[root@server1 harbor]# docker pull registry.aliyuncs.com/google_containers/etcd:3.4.13-0
[root@server1 harbor]# docker pull quay.io/coreos/flannel:v0.12.0-amd64
[root@server1 harbor]# docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.20.2
[root@server1 harbor]# docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.20.2
[root@server1 harbor]# docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.20.2
[root@server1 harbor]# docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.20.2

在这里插入图片描述

3.1.4 k8s初始化
[root@server2 ~]# kubeadm init   --config kubeadm-init.yaml --upload-certs ##初始化,生成两个密钥,一个是加入控制面板,都是master(control-plane),一个是加入集群作为slave。三个都是master,需要共享证书。

[root@server2 ~]# cp /etc/kubernetes/admin.conf .kube/config 
cp: overwrite ‘.kube/config’? y
[root@server2 ~]# kubectl get node
[root@server2 ~]# kubectl get pod -n kube-system

[root@server3 ~]# cd /etc/docker/
[root@server3 docker]# cat daemon.json 
{
  "registry-mirrors": ["https://reg.westos.org"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}

在这里插入图片描述
在这里插入图片描述

3.1.5 节点扩容
[root@server3 docker]# kubeadm join 192.168.0.101:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:fb02d570e5d0b6ada19eac1376361d4535787d37a23d1221d637a27f41adb7a9 \
>     --control-plane --certificate-key 8e115cf3911bf4c3f6369b4a4a57bd4f00e2d328995f580e40ca1b1bf6d20a03

[root@server4 docker]# kubeadm join 192.168.0.101:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:fb02d570e5d0b6ada19eac1376361d4535787d37a23d1221d637a27f41adb7a9 \
>     --control-plane --certificate-key 8e115cf3911bf4c3f6369b4a4a57bd4f00e2d328995f580e40ca1b1bf6d20a03

[root@server2 ~]# kubectl -n kube-system get node

在这里插入图片描述

3.1.6 安装网络组件(flannel)
[root@server2 ~]# vim kube-flannel.yml 
[root@server2 ~]# cat kube-flannel.yml   ##这个文件可以网络上下载,修改一下镜像获取路径即可
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: ['extensions']
    resources: ['podsecuritypolicies']
    verbs: ['use']
    resourceNames: ['psp.flannel.unprivileged']
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "host-gw"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - amd64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: reg.westos.org/k8s/flannel:v0.12.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: reg.westos.org/k8s/flannel:v0.12.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - arm64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-arm64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-arm64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - arm
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-arm
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-arm
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-ppc64le
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - ppc64le
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-s390x
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - s390x
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-s390x
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-s390x
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
[root@server2 ~]# kubectl apply -f kube-flannel.yml   ##创建flannel
[root@server2 ~]# kubectl -n kube-system get pod
[root@server2 ~]# kubectl -n kube-system get pod | grep coredns | awk '{system("kubectl -n kube-system delete pod "$1"")}'   ##删除pod重新生成(调度会比较慢)
[root@server2 ~]# kubectl -n kube-system delete pod coredns-58796ff66-2czq5 coredns-58796ff66-r5mvx --force
 ##删不了就强制删除,会重新生成 
[root@server2 ~]# kubectl -n kube-system get pod

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

3.2 k8s配置(node节点–需要配置docker和k8s)
3.2.1 安装docker并发送所需文件
[root@server7 ~]# vim /etc/hosts
192.168.0.1 server1 reg.westos.org
[root@server4 yum.repos.d]# scp docker-ce.repo k8s.repo server7:/etc/yum.repos.d
[root@server7 yum.repos.d]# yum install docker-ce -y
[root@server7 yum.repos.d]# yum install -y kubeadm-1.20.2-0.x86_64 kubelet-1.20.2-0.x86_64
[root@server7 yum.repos.d]# systemctl enable --now docker   ##启动docker
[root@server7 yum.repos.d]# systemctl enable --now kubelet ##启动kubelet服务

[root@server4 sysctl.d]# scp /etc/sysctl.d/docker.conf server7:/etc/sysctl.d
[root@server7 yum.repos.d]# sysctl --system  ##生效

[root@server4 etc]# cd /etc/docker/
[root@server4 docker]# scp -r certs.d/ daemon.json server7:/etc/docker/   ##发送配置和证书

[root@server7 docker]# ls
certs.d  daemon.json  key.json
[root@server7 docker]# systemctl restart docker
[root@server7 docker]# docker info

在这里插入图片描述

3.2.2 安装ipvsadm模块
[root@server7 ~]# vim /etc/hosts
192.168.0.1 server1 reg.westos.org 
[root@server7 ~]# yum install -y ipvsadm
[root@server7 ~]# modprobe ip_vs_rr
[root@server7 ~]# modprobe ip_vs_sh
[root@server7 ~]# modprobe ip_vs_wrr
[root@server7 ~]# lsmod | grep ip_vs

在这里插入图片描述

3.2.3 节点配置kubeadm,kubectl,kubelet
##禁用swap分区
[root@server7 ~]# swapoff -a 
[root@server7 ~]# vim /etc/fstab 
[root@server7 ~]# cat /etc/fstab 
#
# /etc/fstab
# Created by anaconda on Tue Feb  2 09:03:35 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/rhel-root   /                       xfs     defaults        0 0
UUID=dc5cf3ea-2aca-4a0d-90b1-791ffa119129 /boot                   xfs     defaults        0 0
#/dev/mapper/rhel-swap   swap                    swap    defaults        0 0

在这里插入图片描述

3.2.4 节点扩容
[root@server7 ~]# kubeadm join 192.168.0.101:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:fb02d570e5d0b6ada19eac1376361d4535787d37a23d1221d637a27f41adb7a9
[root@server2 ~]# kubectl get node
[root@server2 ~]# kubectl -n kube-system get pod
[root@server2 ~]# kubectl run demo --image=myapp:v1
pod/demo created
[root@server2 ~]# kubectl get pod -o wide
NAME   READY   STATUS    RESTARTS   AGE   IP           NODE      NOMINATED NODE   READINESS GATES
demo   1/1     Running   0          16s   10.244.3.2   server7   <none>           <none>

在这里插入图片描述

3.2.5 测试
[root@server2 ~]# curl 10.244.3.2
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

在这里插入图片描述

[root@server5 ~]# yum install -y fence-virt
[root@server6 ~]# yum install -y fence-virt
[root@server5 ~]# stonith_admin -I
[root@server6 ~]# stonith_admin -I
 fence_xvm
 fence_virt
2 devices found
[root@server5 ~]# mkdir /etc/cluster
[root@server6 ~]# mkdir /etc/cluster
[root@foundation50 cluster]# scp fence_xvm.key root@192.168.0.5:/etc/cluster/
[root@foundation50 cluster]# scp fence_xvm.key root@192.168.0.6:/etc/cluster/
[root@foundation50 cluster]# netstat -anulp | grep :1229
[root@foundation50 cluster]# systemctl start fence_virtd
[root@foundation50 cluster]# netstat -anulp | grep :1229
udp        0      0 0.0.0.0:1229            0.0.0.0:*                           13309/fence_virtd 

[root@server5 cluster]# pcs stonith create vmfence fence_xvm pcmk_host_map="server5:vm5;server6:vm6" op monitor interval=60s ##主机名:域名 
[root@server5 cluster]# pcs property set stonith-enabled=true
[root@server5 cluster]# pcs status

在这里插入图片描述

在这里插入图片描述

[root@server5 cluster]# echo c > /proc/sysrq-trigger 
[root@server6 cluster]# pcs status

在这里插入图片描述

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值