k8s 1.28版本 部署方案

k8s 1.28版本 部署方案

1. 部署前准备

1.1 服务器环境

虚拟环境: Oracle VM VirtualBox

操作系统: ubuntu-22.04.2-live-server-amd64.iso

最小化安装, 未开启防火墙

/data挂载到数据盘

关闭sudo需要输入密码验证

网络环境:
    natwork: 
        10.0.0.0/8
    host subnet:
        10.0.0.0/16
    k8s pod subnet:
        10.1.0.0/16
    k8s service subnet:
        10.2.0.0/16
    k8s apiserver vip:
        10.0.1.1:16443

    Etcd:
        k8s-lo-vm-etcd-1: 10.0.2.1
        k8s-lo-vm-etcd-2: 10.0.2.2
        k8s-lo-vm-etcd-3: 10.0.2.3
    LoadBalancer:
        k8s-lo-vm-proxy-1: 10.0.2.4
        k8s-lo-vm-proxy-2: 10.0.2.5
        k8s-lo-vm-proxy-3: 10.0.2.6
    k8sMaster:
        k8s-lo-vm-master-1: 10.0.2.7
        k8s-lo-vm-master-2: 10.0.2.8
        k8s-lo-vm-master-3: 10.0.2.9
    k8sNode:
        k8s-lo-vm-node-1: 10.0.2.10
        k8s-lo-vm-node-2: 10.0.2.11
        k8s-lo-vm-node-3: 10.0.2.12
# Etcd,LoadBalancer,k8sMaster 可以合并到同一批服务器上,这里为了演示特意分开部署

2. Etcd集群部署

2.1 部署前准备

2.1.1 服务器列表
echo '10.0.2.1 k8s-lo-vm-etcd-1
10.0.2.2 k8s-lo-vm-etcd-2
10.0.2.3 k8s-lo-vm-etcd-3' | sudo tee -a /etc/hosts
2.1.2 下载cfssl安装包
# 无法直接下载,请用浏览器下载再传到服务器
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssl_1.6.4_linux_amd64 

wget https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssljson_1.6.4_linux_amd64 

wget https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssl-certinfo_1.6.4_linux_amd64

sudo mv cfssl_1.6.4_linux_amd64 /usr/local/bin/cfssl

sudo mv cfssljson_1.6.4_linux_amd64 /usr/local/bin/cfssljson

sudo mv cfssl-certinfo_1.6.4_linux_amd64 /usr/bin/cfssl-certinfo

sudo chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/bin/cfssl-certinfo
2.1.3 下载etcd安装包
# 无法直接下载,请用浏览器下载再传到服务器
wget https://github.com/etcd-io/etcd/releases/download/v3.5.10/etcd-v3.5.10-linux-amd64.tar.gz

2.2 etcd部署

2.2.1 生成ca key
mkdir -p /tmp/etcd/tls

echo '{
    "signing": {
        "default": {
            "expiry": "87600h"
        },
        "profiles": {
            "www": {
                "expiry": "87600h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}' |tee -a /tmp/etcd/tls/ca-config.json

echo '{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}' |tee -a /tmp/etcd/tls/ca-csr.json

echo '{
    "CN": "etcd",
    "hosts": [
        "10.0.2.1",
        "10.0.2.2",
        "10.0.2.3"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}' |tee -a /tmp/etcd/tls/server-csr.json

cd /tmp/etcd/tls
cfssl gencert -initca /tmp/etcd/tls/ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=/tmp/etcd/tls/ca.pem -ca-key=/tmp/etcd/tls/ca-key.pem -config=/tmp/etcd/tls/ca-config.json -profile=www /tmp/etcd/tls/server-csr.json | cfssljson -bare server
2.2.2 安装etcd(任意选择一台服务器)
sudo mkdir -p /data/lib/etcd/default.etcd
sudo mkdir -p /opt/module/etcd/{bin,cfg,ssl} 
sudo tar -zxvf ~/etcd-v3.5.10-linux-amd64.tar.gz -C ~/
sudo mv -f ~/etcd-v3.5.10-linux-amd64/{etcd,etcdctl,etcdutl} /opt/module/etcd/bin/
sudo mv -f /tmp/etcd/tls/ca*pem /tmp/etcd/tls/server*pem /opt/module/etcd/ssl/
rm -rf /tmp/etcd/tls
echo '#[Member]
ETCD_NAME="k8s-lo-vm-etcd-1"
ETCD_DATA_DIR="/data/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.2.1:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.0.2.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.2.1:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.0.2.1:2379"
ETCD_INITIAL_CLUSTER="k8s-lo-vm-etcd-1=https://10.0.2.1:2380,k8s-lo-vm-etcd-2=https://10.0.2.2:2380,k8s-lo-vm-etcd-3=https://10.0.2.3:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"' | sudo tee /opt/module/etcd/cfg/etcd.conf
echo '[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/module/etcd/cfg/etcd.conf
ExecStart=/opt/module/etcd/bin/etcd \
    --cert-file=/opt/module/etcd/ssl/server.pem \
    --key-file=/opt/module/etcd/ssl/server-key.pem \
    --peer-cert-file=/opt/module/etcd/ssl/server.pem \
    --peer-key-file=/opt/module/etcd/ssl/server-key.pem \
    --trusted-ca-file=/opt/module/etcd/ssl/ca.pem \
    --peer-trusted-ca-file=/opt/module/etcd/ssl/ca.pem \
    --logger=zap
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target' | sudo tee /usr/lib/systemd/system/etcd.service
2.2.3 其他服务器
上诉操作和配置文件,在一台服务器上操作,然后同步到其他两台服务器,注意etcd.conf配置文件,需要更改服务器名称和IP地址等参数,包括:ETCD_NAME, ETCD_LISTEN_PEER_URLS, ETCD_LISTEN_CLIENT_URLS, ETCD_INITIAL_ADVERTISE_PEER_URLS, ETCD_ADVERTISE_CLIENT_URLS
2.2.4 启动
# 至少两台执行,才能正常启动
sudo systemctl start etcd
sudo systemctl status etcd
sudo systemctl enable etcd

# 检查状态
sudo ETCDCTL_API=3 /opt/module/etcd/bin/etcdctl --cacert=/opt/module/etcd/ssl/ca.pem --cert=/opt/module/etcd/ssl/server.pem --key=/opt/module/etcd/ssl/server-key.pem --endpoints="https://10.0.2.1:2379,https://10.0.2.2:2379,https://10.0.2.3:2379" endpoint health

2.3 备份

#!/bin/bash
innerip=`grep "$(ip a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"|awk -F '/' '{print $1}')" /etc/hosts|awk '{print $1}'`
backupDataDir="/data/lib/etcd/backup"
if [[ ! -d $backupDataDir ]];then mkdir -p $backupDataDir; fi
ETCDCTL_API=3 /opt/module/etcd/bin/etcdctl --endpoints="https://${innerip}:2379" --cert=/opt/module/etcd/ssl/server.pem --key=/opt/module/etcd/ssl/server-key.pem --cacert=/opt/module/etcd/ssl/ca.pem snapshot save /data/lib/etcd/backup/snapshot.`date +%s`.db

3. 负载均衡

3.1 服务器列表

echo 'k8s-lo-vm-proxy-1 10.0.2.4
k8s-lo-vm-proxy-2 10.0.2.5
k8s-lo-vm-proxy-3 10.0.2.6
k8s-lo-vm-master-1 10.0.2.7
k8s-lo-vm-master-2 10.0.2.8
k8s-lo-vm-master-3 10.0.2.9
k8s-lo-vm-node-1 10.0.2.10
k8s-lo-vm-node-2 10.0.2.11
k8s-lo-vm-node-3 10.0.2.12' | sudo tee -a /etc/hosts

3.2 部署过程

参考文档:https://github.com/kubernetes/kubeadm/blob/main/docs/ha-considerations.md#options-for-software-load-balancing
sudo apt install -y haproxy keepalived

sudo mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak

echo 'global
    log /dev/log local0
    log /dev/log local1 notice
    daemon

defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 1
    timeout http-request    10s
    timeout queue           20s
    timeout connect         5s
    timeout client          20s
    timeout server          20s
    timeout http-keep-alive 10s
    timeout check           10s

frontend apiserver
    bind *:16443
    mode tcp
    option tcplog
    default_backend apiserverbackend

backend apiserverbackend
    option httpchk GET /healthz
    http-check expect status 200
    mode tcp
    option ssl-hello-chk
    balance     roundrobin
        server      k8s-lo-vm-master-1   10.0.2.7:6443 check
        server      k8s-lo-vm-master-2   10.0.2.8:6443 check
        server      k8s-lo-vm-master-3   10.0.2.9:6443 check' |sudo tee /etc/haproxy/haproxy.cfg

echo 'global_defs {
    router_id LVS_DEVEL
}
vrrp_script check_apiserver {
  script "/etc/keepalived/check_apiserver.sh"
  interval 3
  weight -2
  fall 10
  rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface enp0s8
    virtual_router_id 51
    priority 100
    authentication {
        auth_type PASS
        auth_pass 42
    }
    virtual_ipaddress {
        10.0.1.1
    }
    track_script {
        check_apiserver
    }
}' |sudo tee /etc/keepalived/keepalived.conf

echo '#!/bin/sh

errorExit() {
    echo "*** $*" 1>&2
    exit 1
}

curl --silent --max-time 2 --insecure https://localhost:16443/ -o /dev/null || errorExit "Error GET https://localhost:16443/"
if ip addr | grep -q 10.0.1.1; then
    curl --silent --max-time 2 --insecure https://10.0.1.1:16443/ -o /dev/null || errorExit "Error GET https://10.0.1.1:16443/"
fi' |sudo tee /etc/keepalived/check_apiserver.sh

3.3 启动

sudo systemctl enable haproxy --now
sudo systemctl enable keepalived --now

# 检查
sudo systemctl status haproxy
sudo systemctl status keepalived

ip a|grep 10.0.1.1

4. container部署

https://kubernetes.io/zh-cn/docs/setup/production-environment/container-runtimes/

4.1 设置系统环境

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter

# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

sudo sysctl --system

lsmod | grep br_netfilter
lsmod | grep overlay

# 关闭swap
sudo sed -i 's|/swap.img|#/swap.img|g' /etc/fstab
sudo swapoff -a

4.2 部署docker

4.2.1 安装

# https://docs.docker.com/engine/install/ubuntu/
for pkg in docker.io docker-doc docker-compose docker-compose-v2 podman-docker containerd runc; do sudo apt-get remove $pkg; done

# Add Docker's official GPG key:
sudo apt-get update
sudo apt-get install ca-certificates curl gnupg
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg

# Add the repository to Apt sources:
echo \
  "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
  $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
  sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

sudo apt-get update

sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

4.2.2 卸载(补充)

sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras
sudo rm -rf /var/lib/docker
sudo rm -rf /var/lib/containerd

4.2.3 配置docker

echo '{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
        "max-size": "200m"
    },
    "storage-driver": "overlay2",
    "registry-mirrors": [
        "https://docker.mirrors.ustc.edu.cn",
        "https://hub.docker.com",
        "https://mirror.baidubce.com",
        "http://hub-mirror.c.163.com"
    ],
    "data-root": "/data/lib/docker"
}' |sudo tee /etc/docker/daemon.json

4.2.4 启动docker

# 启动容器
sudo systemctl enable docker --now
sudo systemctl status docker

4.3.1 部署cri-docker

k8s 1.24 版本后,不再支持直接访问docker,需要手动下载一个附件
# 无法直接下载,请用浏览器下载再传到服务器
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd_0.3.4.3-0.ubuntu-jammy_amd64.deb -O cri-dockerd_0.3.4.3-0.ubuntu-jammy_amd64.deb

sudo chown root:root ./cri-dockerd_0.3.4.3-0.ubuntu-jammy_amd64.deb
sudo apt install -y ./cri-dockerd_0.3.4.3-0.ubuntu-jammy_amd64.deb
sudo systemctl daemon-reload
sudo systemctl enable --now cri-docker.socket
sudo systemctl status cri-docker

5. 安装kubernetes

5.1 下载应用

sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl gpg
curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main' | sudo tee /etc/apt/sources.list.d/kubernetes.list

sudo apt-get update
sudo apt-get install -y kubelet=1.28.2-00 kubeadm=1.28.2-00 kubectl=1.28.2-00
# sudo apt-mark unhold kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl

5.2 配置文件 放在 k8s-lo-vm-master-1 上

# ~/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
  - token: "9a08jv.c0izixklcxtmnze7"
    description: "kubeadm bootstrap token"
    ttl: "8760h0m0s"
  - token: "783bde.3f89s0fje9f38fhf"
    description: "another bootstrap token"
    ttl: "8760h0m0s"
    usages:
      - signing
      - authentication
    groups:
      - system:bootstrappers:kubeadm:default-node-token
certificateKey: 07ef165f6723337d68b0eca1c6a29222a44aecadabbbbb79016cd160a397782c
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.0.2.7
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  name: k8s-lo-vm-master-1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta3
apiServer:
  timeoutForControlPlane: 4m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 10.0.1.1:16443
controllerManager: {}
dns:
  imageRepository: registry.aliyuncs.com/google_containers
etcd:
  external:
    endpoints:
        - https://10.0.2.1:2379
        - https://10.0.2.2:2379
        - https://10.0.2.3:2379
    caFile: /etc/kubernetes/pki/etcd/ca.crt
    certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt
    keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.28.2
networking:
  dnsDomain: k8s.pangfaheng.com
  podSubnet: 10.1.0.0/16
  serviceSubnet: 10.2.0.0/16
scheduler: {}
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd

5.3 设置环境

sudo mkdir -p /etc/kubernetes/pki
sudo mkdir -p /etc/kubernetes/pki/etcd

# 从 etcd 中复制密钥到k8s节点上
sudo scp -P 36000 /opt/module/etcd/ssl/ca.pem ubuntu@10.0.2.11:~/
sudo scp -P 36000 /opt/module/etcd/ssl/server.pem ubuntu@10.0.2.11:~/
sudo scp -P 36000 /opt/module/etcd/ssl/server-key.pem ubuntu@10.0.2.11:~/

sudo mv ~/ca.pem /etc/kubernetes/pki/etcd/ca.crt
sudo mv ~/server.pem /etc/kubernetes/pki/apiserver-etcd-client.crt
sudo mv ~/server-key.pem /etc/kubernetes/pki/apiserver-etcd-client.key

sudo chown root:root /etc/kubernetes/pki/etcd/ca.crt /etc/kubernetes/pki/apiserver-etcd-client.crt /etc/kubernetes/pki/apiserver-etcd-client.key

5.4 下载镜像

sudo kubeadm config images list
sudo docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.28.2
sudo docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.28.2
sudo docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.28.2
sudo docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.28.2
sudo docker pull registry.aliyuncs.com/google_containers/coredns:v1.10.1
sudo docker pull registry.aliyuncs.com/google_containers/pause:3.6
sudo docker tag registry.aliyuncs.com/google_containers/pause:3.6 registry.k8s.io/pause:3.6

5.5 首台服务器开始部署

sudo kubeadm init --config ~/kubeadm-config.yaml --upload-certs

5.6 根据提示部署其他节点

# master
sudo kubeadm join 10.0.1.1:16443 --token 9a08jv.c0izixklcxtmnze7 \
    --discovery-token-ca-cert-hash sha256:c87a00fa4447f3ea4c210a28d7600e006a57c96e49ad3e4e364eaa9dfb746ae3 \
    --control-plane --certificate-key 07ef165f6723337d68b0eca1c6a29222a44aecadabbbbb79016cd160a397782c \
    --cri-socket unix:///var/run/cri-dockerd.sock \
    --node-name k8s-lo-vm-master-3
# node
sudo kubeadm join 10.0.1.1:16443 --token 9a08jv.c0izixklcxtmnze7 \
    --discovery-token-ca-cert-hash sha256:c87a00fa4447f3ea4c210a28d7600e006a57c96e49ad3e4e364eaa9dfb746ae3 \
    --cri-socket unix:///var/run/cri-dockerd.sock \
    --node-name k8s-lo-vm-node-3

# 检查
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl get nodes

5.7 安装网络插件

sudo docker pull rancher/mirrored-flannelcni-flannel-cni-plugin:v1.2.0

wget https://github.com/flannel-io/flannel/blob/master/Documentation/kube-flannel.yml -O kube-flannel.yml

sed -i 's|docker.io/flannel/flannel-cni-plugin:v1.2.0|ccr.ccs.tencentyun.com/google_cn/mirrored-flannelcni-flannel-cni-plugin:v1.1.0|g' kube-flannel.yml

sed -i 's|docker.io/flannel/flannel:v0.22.3|ccr.ccs.tencentyun.com/google_cn/mirrored-flannelcni-flannel:v0.18.1|g' kube-flannel.yml

sed -i 's|10.244.0.0/16|10.1.0.0/16|' kube-flannel.yml

kubectl apply -f kube-flannel.yml
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值