k8s高可用

1.主机初始化

#1.添加hosts
cat >> /etc/hosts <<'EOF'
192.168.6.63  k8s1
192.168.6.64  k8s2
192.168.6.65  k8s3
192.168.6.66  k8s4
192.168.6.67  k8s5
192.168.6.68  k8s6
192.168.6.80  master-lb
EOF

#2.临时关闭swap
swapoff -a 
sysctl -w vm.swappiness=0

#3.永久关闭(修改配置文件)
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
echo vm.swappiness=0 >>/etc/sysctl.conf
sysctl -p

#4.允许 iptables 检查桥接流量
cat <<EOF | tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system

#5.配置国内yum源
yum clean all
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/CentOS-Base.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/epel.repo

#6.升级内核并重启
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y
yum --enablerepo=elrepo-kernel install kernel-ml -y
grub2-set-default 0
grub2-mkconfig -o /boot/grub2/grub.cfg
reboot


#7.配置docker源
curl -o /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# 查看版本
yum list docker-ce --showduplicates

#8.安装docker,可安装指定版本(1.22 版本安装最新docker即可)
yum -y install docker-ce-19.03.15 docker-ce-cli-19.03.15
yum -y install docker-ce

yum -y install bash-completion
source /usr/share/bash-completion/bash_completion


#9.配置docker加速器
# 配置cgroupdriver为systemd(1.22版本)

mkdir -pv /etc/docker && cat <<EOF | sudo tee /etc/docker/daemon.json
{
  "registry-mirrors": ["https://6wdq83of.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

systemctl daemon-reload
systemctl enable --now docker

#10.配置 kubernetes yum源配置文件
cat  > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
EOF
#安装kubeadm,kubelet,kubectl(版本需要一致)
#11.可查看版本
yum -y list kubeadm --showduplicates | sort -r
# 指定版本安装
yum -y install kubeadm-1.22.15-0 kubelet-1.22.15-0 kubectl-1.22.15-0
####都部署完成后再启动(这里不需要)
systemctl enable --now kubelet
systemctl status kubelet

2.高可用组件部署(master节点部署)

  • 部署nginx在所有节点
#1. 下载
mkdir -p /server/tools
cd /server/tools
wget http://nginx.org/download/nginx-1.22.1.tar.gz

#2. 安装以来环境
yum install pcre-devel openssl-devel -y

#3. 解压编译安装 ngx
tar xf nginx-1.22.1.tar.gz
cd nginx-1.22.1

./configure --prefix=/usr/local/nginx \
--with-pcre \
--with-http_ssl_module \
--with-http_stub_status_module \
--with-stream \
--with-http_gzip_static_module

make -j2 && make install

#4. 修改 nginx 配置文件
cat >/usr/local/nginx/conf/nginx.conf <<'EOF'
user nginx;
worker_processes  auto;

events {
    worker_connections  10240;
}
error_log /usr/local/nginx/logs/error.log info;

stream {
    upstream kube-apiservers {
        hash $remote_addr consistent;
        server k8s1:6443 weight=5 max_fails=1 fail_timeout=3s;
        server k8s2:6443 weight=5 max_fails=1 fail_timeout=3s;
        server k8s3:6443 weight=5 max_fails=1 fail_timeout=3s;
    }
    server {
        listen 8443 reuseport;
        proxy_connect_timeout 3s;
        proxy_timeout 3000s;
        proxy_pass kube-apiservers;
    }
}
EOF

#5. 创建ngx用户
useradd nginx -s /sbin/nologin -M

#6. 编写启动文件
cat >/usr/lib/systemd/system/nginx.service <<'EOF'
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network-online.target remote-fs.target nss-lookup.target
Wants=network-online.target

[Service]
Type=forking
ExecStartPre=/usr/local/nginx/sbin/nginx -t -c /usr/local/nginx/conf/nginx.conf
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
ExecReload=/usr/local/nginx/sbin/nginx -s reload
ExecStop=/usr/local/nginx/sbin/nginx -s stop
TimeoutStopSec=5
Restart=on-failure
RestartSec=42s

[Install]
WantedBy=multi-user.target
EOF

#7. 启动服务,并检查
systemctl daemon-reload
systemctl start nginx
systemctl enable nginx

netstat -lntup|grep nginx
  • 部署keeplive
#1. 安装 keepalived
yum install keepalived -y

#2. 编辑配置文件(修改router_id 和 mcast_src_ip)
cp /etc/keepalived/keepalived.conf{,.bak}
cat >/etc/keepalived/keepalived.conf <<'EOF'
! Configuration File for keepalived

global_defs {
    # 改为每台机器的IP
    router_id  192.168.6.63
    enable_script_security
}
vrrp_script check_k8s {
    script "/server/scripts/check-k8s.sh"
    interval 2
    weight 2
    fall 3
    rise 2
}
vrrp_instance k8s-master {
    # 其他两台机器改为 BACKUP
    state MASTER
    #自己网卡名字 有eth0写eth0的ip 没有写网卡名
    interface ens192
    virtual_router_id 51
    # 其他两台机器优先级低于100,要不同
    priority 100
    advert_int 1
    # 每台机器mcast_src_ip不一样,自己网卡名字 有eth0写eth0的ip 没有写网卡名
    mcast_src_ip ens192
    authentication {
        auth_type PASS
        auth_pass k8s-master-ha
    }
    track_script {
        check_k8s
    }   
    virtual_ipaddress {
        192.168.6.80/24
    }
}
EOF

#3. 编写健康检查脚本
mkdir -p /server/scripts/
cat >/server/scripts/check-k8s.sh <<'EOF'
#!/bin/bash

function check_kube-apiserver(){
for i in `seq 5`
do
    pid=`pgrep kube-apiserver`
    if [[ -n $pid ]]
    then
        sleep 1
        continue
    else
        pid=0
        break
    fi    
done
}

check_kube-apiserver
if [[ $pid -eq 0 ]]
then
    systemctl stop keepalived
    exit 1
else
    exit 0
fi
EOF

chmod +x /server/scripts/check-k8s.sh

#4. 启动服务
systemctl start keepalived
systemctl enable keepalived


type=SERVICE_STOP msg=audit(1693210185.280:2116): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=kernel msg='unit=keepalived comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'

3.初始化master节点

#1. 生成一个 预处理文件
 mkdir -p /data/k8s/master
 cd /data/k8s/master
kubeadm config print init-defaults > kubeadm-init.yml

#2. 修改初始化文件

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  # master IP地址
  advertiseAddress: 192.168.6.63
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  imagePullPolicy: IfNotPresent
  name: k8s1
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
# 修改镜像源
#imageRepository: k8s.gcr.io
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.22.0
# 负载地址和端口
controlPlaneEndpoint: 192.168.6.80:8443
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  # pod子网
  podSubnet: 10.244.0.0/16
scheduler: {}


# 3.检查配置文件是否有错
kubeadm init --config kubeadm-init.yml --dry-run

4. 预拉取镜像

kubeadm config images list --config kubeadm-init.yml

registry.aliyuncs.com/google_containers/kube-apiserver:v1.22.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.22.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.22.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.22.0
registry.aliyuncs.com/google_containers/pause:3.5
registry.aliyuncs.com/google_containers/etcd:3.5.0-0
registry.aliyuncs.com/google_containers/coredns:v1.8.4

5.基于 kubeadm 配置文件初始化集群

#1.初始化
kubeadm init --config kubeadm-init.yml --upload-certs
#2.启动kubelet
systemctl enable --now kubelet
systemctl status kubelet

#3.复制 kubectl 的 kubeconfig ,便于集群管理
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

#4.查看configmap资源,初始化的配置文件
kubectl -n kube-system get cm kubeadm-config -o yaml
###node节点加入集群
kubeadm join 192.168.6.80:8443 --token rklw7e.jg7gojsohytm856r --discovery-token-ca-cert-hash sha256:874d0e3c953d00474c3739c38eb4b27191696607cab2a06a7efcdb7aeb065050

#5.扩展:重置,初始化
kubeadm reset 
rm -fr ~/.kube/  /etc/kubernetes/*  /var/lib/etcd/*

6.master节点加入集群

#1.查看列举当前的 token 信息
kubeam token list
#2.创建一个永不过期的 key
kubeadm token create --print-join-command --ttl 0
#3.新生成的 token
kubeadm join 192.168.6.80:8443 --token rklw7e.jg7gojsohytm856r --discovery-token-ca-cert-hash sha256:874d0e3c953d00474c3739c38eb4b27191696607cab2a06a7efcdb7aeb065050

#4.kubeadm 生成的token数据存储在k8s集群
kubectl -n kube-system get secret
kubectl -n kube-system get secret bootstrap-token-rklw7e -o yaml

#5.将 master 证书上传到kubeadm的证书文件中(将控制面板证书文件上传到kubeadm证书)
kubeadm init phase upload-certs --upload-certs

#6.上传至kubeadm 证书后,会生成一个 key
39221397d694956cfd73e0102421a8877fdb8a46c25cadebc07738cc72aef6f2

#7.将其他master节点加入集群
kubeadm join 192.168.6.80:8443 --token rklw7e.jg7gojsohytm856r --discovery-token-ca-cert-hash sha256:874d0e3c953d00474c3739c38eb4b27191696607cab2a06a7efcdb7aeb065050 \
--control-plane  --certificate-key 39221397d694956cfd73e0102421a8877fdb8a46c25cadebc07738cc72aef6f2

#8.复制 kubectl 的 kubeconfig
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config



#9.拓展
#master 上创建 token,生成加入 集群的命令
date +%s | md5sum | cut -c 5-20
kubeadm token create diypas.69ad50b9bfcc3a52 --ttl 0 --print-join-command
# 删除token
kubeadm token delete diypas.69ad50b9bfcc3a52 

7.安装网络插件

#安装flannel.yml
cat > flannel.yml <<'EOF'
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  seLinux:
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      imagePullSecrets:
      - name: harbor-key
      initContainers:
      - name: install-cni-plugin
        imagePullPolicy: Always
        image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
        imagePullPolicy: Always
        image: rancher/mirrored-flannelcni-flannel:v0.18.1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        imagePullPolicy: Always
        image: rancher/mirrored-flannelcni-flannel:v0.18.1 
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
EOF

kubectl apply -f flannel.yml

8.kube-proxy 修改为 ipvs 模式

# 01. 安装软件包(所有节点安装)
yum -y install conntrack-tools ipvsadm

# 02. 配置模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

# 03. 修改 mode 为 ipvs (master节点)
kubectl -n kube-system edit cm kube-proxy

# 04. 查看
kubectl -n kube-system describe cm kube-proxy | grep mode

# 05. 删除旧的 kube-proxy
kubectl get pods -A | grep kube-proxy | awk '{print $2}' | xargs kubectl -n kube-system delete pods 

# 06. 查看日志验证
kubectl -n kube-system logs -f kube-proxy-xxx

9.面板

#安装
kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
kubectl get -n kuboard po
http://192.168.6.63:30080/sso/auth/default?req=rxk5odwrmhw53tydq6gzh4mpv

用户名: admin
密码: Kuboard123
#卸载方法
kubectl delete -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值