kubeadm部署k8s_V1.31集群教程(保证成功)

本文参考kubernetes官网教程:https://kubernetes.io/zh-cn/docs/setup/production-environment/tools/kubeadm/

在这里插入图片描述
在这里插入图片描述

前提准备

准备3台机器(1m2w)

操作系统HostNameIP架构内存CPU磁盘角色
Ubuntu 20.04.5 LTSubuntu1192.168.17.142aarch642G2C50Gmaster
Ubuntu 20.04.5 LTSubuntu3192.168.17.8aarch642G2C50Gworker
Ubuntu 20.04.5 LTSubuntu3192.168.17.239aarch642G2C50Gworker

以下操作,3台机器上都需要执行。

  • 关闭swap使用
# 临时关闭(服务器重启会失效)
sudo swapoff -a

# 永久关闭(推荐)
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab  # 永久关闭 Swap
sudo swapon --show # 永久关闭是否生效
  • 关闭swap使用
# 临时生效
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.bridge.bridge-nf-call-ip6tables=1
sysctl -w net.bridge.bridge-nf-call-iptables=1
sysctl -p # 查看以上3个配置是否生效

# 永久生效(推荐)
sudo vim /etc/sysctl.conf
>>文件输入
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
>>保存并退出执行
sudo sysctl -p # 该命令会从 /etc/sysctl.conf 文件中重新加载所有参数设置。

注意:
如果出现 net.bridge 参数无法应用
如果 net.bridge 参数显示错误,可能是因为你的系统缺少 br_netfilter 模块。在这种情况下,可以通过以下命令加载 br_netfilter 模块:
sudo modprobe br_netfilter
然后将其添加到 /etc/modules 文件中,以便在每次系统启动时加载:
sudo echo "br_netfilter" >> /etc/modules

部署容器运行时(containerd)

以下操作,3台机器上都需要执行。

  • 下载安装
https://github.com/containerd/containerd/blob/main/docs/getting-started.md

大家按照上面文章Option1的step1→step2→step3操作,但是要注意大家下面下载的软件需要结合自己的操作系统架构来选择。我这边是ubuntu aarch64所以选择想相关的aarch64软件就好了。

在这里插入图片描述

  • 配置containerd

如果本地没有可以创建:/etc/containerd/config.toml,把下面的内容复制到自己的机器上对应的文件。

默认好像containerd启动是不会创建这个文件的,查看默认的配置文件内容执行:containerd config default。这个命令会输出如下内容:

disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2

[cgroup]
  path = ""

[debug]
  address = ""
  format = ""
  gid = 0
  level = ""
  uid = 0

[grpc]
  address = "/run/containerd/containerd.sock"
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216
  tcp_address = ""
  tcp_tls_ca = ""
  tcp_tls_cert = ""
  tcp_tls_key = ""
  uid = 0

[metrics]
  address = ""
  grpc_histogram = false

[plugins]

  [plugins."io.containerd.gc.v1.scheduler"]
    deletion_threshold = 0
    mutation_threshold = 100
    pause_threshold = 0.02
    schedule_delay = "0s"
    startup_delay = "100ms"

  [plugins."io.containerd.grpc.v1.cri"]
    cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"]
    device_ownership_from_security_context = false
    disable_apparmor = false
    disable_cgroup = false
    disable_hugetlb_controller = true
    disable_proc_mount = false
    disable_tcp_service = true
    drain_exec_sync_io_timeout = "0s"
    enable_cdi = false
    enable_selinux = false
    enable_tls_streaming = false
    enable_unprivileged_icmp = false
    enable_unprivileged_ports = false
    ignore_deprecation_warnings = []
    ignore_image_defined_volumes = false
    image_pull_progress_timeout = "5m0s"
    image_pull_with_sync_fs = false
    max_concurrent_downloads = 3
    max_container_log_line_size = 16384
    netns_mounts_under_state_dir = false
    restrict_oom_score_adj = false
    sandbox_image = "registry.k8s.io/pause:3.10"
    selinux_category_range = 1024
    stats_collect_period = 10
    stream_idle_timeout = "4h0m0s"
    stream_server_address = "127.0.0.1"
    stream_server_port = "0"
    systemd_cgroup = false
    tolerate_missing_hugetlb_controller = true
    unset_seccomp_profile = ""

    [plugins."io.containerd.grpc.v1.cri".cni]
      bin_dir = "/opt/cni/bin"
      conf_dir = "/etc/cni/net.d"
      conf_template = ""
      ip_pref = ""
      max_conf_num = 1
      setup_serially = false

    [plugins."io.containerd.grpc.v1.cri".containerd]
      default_runtime_name = "runc"
      disable_snapshot_annotations = true
      discard_unpacked_layers = false
      ignore_blockio_not_enabled_errors = false
      ignore_rdt_not_enabled_errors = false
      no_pivot = false
      snapshotter = "overlayfs"

      [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
        base_runtime_spec = ""
        cni_conf_dir = ""
        cni_max_conf_num = 0
        container_annotations = []
        pod_annotations = []
        privileged_without_host_devices = false
        privileged_without_host_devices_all_devices_allowed = false
        runtime_engine = ""
        runtime_path = ""
        runtime_root = ""
        runtime_type = ""
        sandbox_mode = ""
        snapshotter = ""

        [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]

      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]

        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
          base_runtime_spec = ""
          cni_conf_dir = ""
          cni_max_conf_num = 0
          container_annotations = []
          pod_annotations = []
          privileged_without_host_devices = false
          privileged_without_host_devices_all_devices_allowed = false
          runtime_engine = ""
          runtime_path = ""
          runtime_root = ""
          runtime_type = "io.containerd.runc.v2"
          sandbox_mode = "podsandbox"
          snapshotter = ""

          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
            BinaryName = ""
            CriuImagePath = ""
            CriuPath = ""
            CriuWorkPath = ""
            IoGid = 0
            IoUid = 0
            NoNewKeyring = false
            NoPivotRoot = false
            Root = ""
            ShimCgroup = ""
            **# 这个地方需要注意,要保持kubelet一致的cgroup方式。本文采用cgroups所以设置为true,如果不是需要改成true。
            # 具体可以看下这里解释:https://kubernetes.io/zh-cn/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime:~:text=%E7%B3%BB%E7%BB%9F%E9%85%8D%E7%BD%AE%E6%96%B9%E5%BC%8F%E3%80%82-,%E5%AE%89%E8%A3%85%E5%AE%B9%E5%99%A8%E8%BF%90%E8%A1%8C%E6%97%B6,-%E4%B8%BA%E4%BA%86%E5%9C%A8%20Pod**
            SystemdCgroup = true

      [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
        base_runtime_spec = ""
        cni_conf_dir = ""
        cni_max_conf_num = 0
        container_annotations = []
        pod_annotations = []
        privileged_without_host_devices = false
        privileged_without_host_devices_all_devices_allowed = false
        runtime_engine = ""
        runtime_path = ""
        runtime_root = ""
        runtime_type = ""
        sandbox_mode = ""
        snapshotter = ""

        [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]

    [plugins."io.containerd.grpc.v1.cri".image_decryption]
      key_model = "node"

    [plugins."io.containerd.grpc.v1.cri".registry]
      config_path = ""

      [plugins."io.containerd.grpc.v1.cri".registry.auths]

      [plugins."io.containerd.grpc.v1.cri".registry.configs]

      [plugins."io.containerd.grpc.v1.cri".registry.headers]

      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]

    [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
      tls_cert_file = ""
      tls_key_file = ""

  [plugins."io.containerd.internal.v1.opt"]
    path = "/opt/containerd"

  [plugins."io.containerd.internal.v1.restart"]
    interval = "10s"

  [plugins."io.containerd.internal.v1.tracing"]

  [plugins."io.containerd.metadata.v1.bolt"]
    content_sharing_policy = "shared"

  [plugins."io.containerd.monitor.v1.cgroups"]
    no_prometheus = false

  [plugins."io.containerd.nri.v1.nri"]
    disable = true
    disable_connections = false
    plugin_config_path = "/etc/nri/conf.d"
    plugin_path = "/opt/nri/plugins"
    plugin_registration_timeout = "5s"
    plugin_request_timeout = "2s"
    socket_path = "/var/run/nri/nri.sock"

  [plugins."io.containerd.runtime.v1.linux"]
    no_shim = false
    runtime = "runc"
    runtime_root = ""
    shim = "containerd-shim"
    shim_debug = false

  [plugins."io.containerd.runtime.v2.task"]
    platforms = ["linux/arm64/v8"]
    sched_core = false

  [plugins."io.containerd.service.v1.diff-service"]
    default = ["walking"]

  [plugins."io.containerd.service.v1.tasks-service"]
    blockio_config_file = ""
    rdt_config_file = ""

  [plugins."io.containerd.snapshotter.v1.aufs"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.blockfile"]
    fs_type = ""
    mount_options = []
    root_path = ""
    scratch_file = ""

  [plugins."io.containerd.snapshotter.v1.btrfs"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.devmapper"]
    async_remove = false
    base_image_size = ""
    discard_blocks = false
    fs_options = ""
    fs_type = ""
    pool_name = ""
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.native"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.overlayfs"]
    mount_options = []
    root_path = ""
    sync_remove = false
    upperdir_label = false

  [plugins."io.containerd.snapshotter.v1.zfs"]
    root_path = ""

  [plugins."io.containerd.tracing.processor.v1.otlp"]

  [plugins."io.containerd.transfer.v1.local"]
    config_path = ""
    max_concurrent_downloads = 3
    max_concurrent_uploaded_layers = 3

    [[plugins."io.containerd.transfer.v1.local".unpack_config]]
      differ = ""
      platform = "linux/arm64/v8"
      snapshotter = "overlayfs"

[proxy_plugins]

[stream_processors]

  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
    accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
    path = "ctd-decoder"
    returns = "application/vnd.oci.image.layer.v1.tar"

  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
    accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
    path = "ctd-decoder"
    returns = "application/vnd.oci.image.layer.v1.tar+gzip"

[timeouts]
  "io.containerd.timeout.bolt.open" = "0s"
  "io.containerd.timeout.metrics.shimstats" = "2s"
  "io.containerd.timeout.shim.cleanup" = "5s"
  "io.containerd.timeout.shim.load" = "5s"
  "io.containerd.timeout.shim.shutdown" = "3s"
  "io.containerd.timeout.task.state" = "2s"

[ttrpc]
  address = ""
  gid = 0
  uid = 0

也可以执行:containerd config default > config.toml,这样内容就可以直接复制到/etc/containerd/config.toml文件中不要手动复制了。

  • 查看containerd是否启动成功
systemctl status containerd.service
# 或者执行
journalctl -u containerd -f

在这里插入图片描述

注意需要保证3台机器上的containerd都要启动成哦。

kubeadmin、kubectl、kubelet安装

以下操作,3台机器上都需要执行。

参考:https://kubernetes.io/zh-cn/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime:~:text=cri%2Ddockerd.sock-,%E5%AE%89%E8%A3%85%20kubeadm%E3%80%81kubelet%20%E5%92%8C%20kubectl,-%E4%BD%A0%E9%9C%80%E8%A6%81%E5%9C%A8

在这里插入图片描述

大家可根据上面的链接选择自己的操作系统文档教程分别安装kubeadmkubectlkubelet

基础镜像准备

以下操作,3台机器上都需要执行。

由于k8s内置镜像下载都需要科学上网才能拉取的下来,所有可以自己电脑使用科学上网拉取到本地然后再重新tag推到自己的docker仓库。然后在3台机器上分别拉取,重新tag成官方的镜像。

  • 如何知道需要准备哪些镜像(如果你部署的也是v1.31版本,你可以直接用拉取我的下面镜像)
root@ubuntu1:~/kubernetes/deploy/calico# kubeadm config images list

# 输出7个镜像需要提前准备
registry.k8s.io/kube-apiserver:v1.31.0
registry.k8s.io/kube-controller-manager:v1.31.0
registry.k8s.io/kube-scheduler:v1.31.0
registry.k8s.io/kube-proxy:v1.31.0
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.10
registry.k8s.io/etcd:3.5.15-0

# tag自己仓库名 10个
docker tag registry.k8s.io/kube-apiserver:v1.31.0 registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-apiserver:v1.31.0
docker tag registry.k8s.io/kube-controller-manager:v1.31.0 registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-controller-manager:v1.31.0
docker tag registry.k8s.io/kube-scheduler:v1.31.0 registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-scheduler:v1.31.0
docker tag registry.k8s.io/kube-proxy:v1.31.0 registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-proxy:v1.31.0
docker tag registry.k8s.io/coredns/coredns:v1.11.3 registry.cn-hangzhou.aliyuncs.com/shouzhi/coredns:v1.11.3
docker tag registry.k8s.io/pause:3.10 registry.cn-hangzhou.aliyuncs.com/shouzhi/pause:3.10
docker tag registry.k8s.io/etcd:3.5.15-0 registry.cn-hangzhou.aliyuncs.com/shouzhi/etcd:3.5.15-0
# 这3个镜像不属于k8s集群范畴镜像,但是后续整个集群启动会需要所以也可以提前准备好
docker tag docker.io/calico/cni:v3.26.0 registry.cn-hangzhou.aliyuncs.com/shouzhi/calico_cni:v3.26.0
docker tag docker.io/calico/node:v3.26.0 registry.cn-hangzhou.aliyuncs.com/shouzhi/calico_node:v3.26.0
docker tag docker.io/calico/kube-controllers:v3.26.0 registry.cn-hangzhou.aliyuncs.com/shouzhi/calico_kube-controllers:v3.26.0

# crictl 拉取镜像 10个
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-apiserver:v1.31.0
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-controller-manager:v1.31.0
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-scheduler:v1.31.0
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-proxy:v1.31.0
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/coredns:v1.11.3
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/pause:3.10
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/etcd:3.5.15-0
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/calico_cni:v3.26.0
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/calico_node:v3.26.0
crictl pull registry.cn-hangzhou.aliyuncs.com/shouzhi/calico_kube-controllers:v3.26.0

# 修改镜像名 10个
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-apiserver:v1.31.0 registry.k8s.io/kube-apiserver:v1.31.0
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-controller-manager:v1.31.0 registry.k8s.io/kube-controller-manager:v1.31.0
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-scheduler:v1.31.0 registry.k8s.io/kube-scheduler:v1.31.0
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/kube-proxy:v1.31.0 registry.k8s.io/kube-proxy:v1.31.0
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/coredns:v1.11.3 registry.k8s.io/coredns/coredns:v1.11.3
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/pause:3.10 registry.k8s.io/pause:3.10
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/etcd:3.5.15-0 registry.k8s.io/etcd:3.5.15-0
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/calico_cni:v3.26.0 docker.io/calico/cni:v3.26.0
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/calico_node:v3.26.0 docker.io/calico/node:v3.26.0
ctr --namespace k8s.io image tag  registry.cn-hangzhou.aliyuncs.com/shouzhi/calico_kube-controllers:v3.26.0 docker.io/calico/kube-controllers:v3.26.0

# 最小集群启动所需镜像列表 10个
docker.io/calico/cni                                                v3.26.0             54cd67220700c       85.5MB
docker.io/calico/kube-controllers                                   v3.26.0             aebf438b736fc       29.2MB
docker.io/calico/node                                               v3.26.0             0259a80e0f442       84.6MB
registry.k8s.io/coredns/coredns                                     v1.11.3             2f6c962e7b831       16.9MB
registry.k8s.io/etcd                                                3.5.15-0            27e3830e14027       66.4MB
registry.k8s.io/kube-apiserver                                      v1.31.0             cd0f0ae0ec9e0       25.6MB
registry.k8s.io/kube-controller-manager                             v1.31.0             fcb0683e6bdbd       23.9MB
registry.k8s.io/kube-proxy                                          v1.31.0             71d55d66fd4ee       26.8MB
registry.k8s.io/kube-scheduler                                      v1.31.0             fbbbd428abb4d       18.4MB
registry.k8s.io/pause                                               3.10                afb61768ce381       266kB

  • 查看本地镜像
root@ubuntu1:~/kubernetes/deploy/calico# crictl images               TAG                 IMAGE ID            SIZE
docker.io/calico/cni                                                v3.26.0             54cd67220700c       85.5MB
docker.io/calico/kube-controllers                                   v3.26.0             aebf438b736fc       29.2MB
docker.io/calico/node                                               v3.26.0             0259a80e0f442       84.6MB
registry.k8s.io/coredns/coredns                                     v1.11.3             2f6c962e7b831       16.9MB
registry.k8s.io/etcd                                                3.5.15-0            27e3830e14027       66.4MB
registry.k8s.io/kube-apiserver                                      v1.31.0             cd0f0ae0ec9e0       25.6MB
registry.k8s.io/kube-controller-manager                             v1.31.0             fcb0683e6bdbd       23.9MB
registry.k8s.io/kube-proxy                                          v1.31.0             71d55d66fd4ee       26.8MB
registry.k8s.io/kube-scheduler                                      v1.31.0             fbbbd428abb4d       18.4MB
registry.k8s.io/pause                                               3.10                afb61768ce381       266kB

检查以上镜像是否却是,否则会影响剧群的启动。

开始部署

准备kubeadmin-config.yml

主意:这个只需要在你具体的master环境服务器上执行

  • 输出kubeadm默认配置,执行:kubeadm config print init-defaults

这个文件创建自己的维护目录,将下面输出内容复制到kubeadmin-config.yml文件中。

# 这个内容具体根据你执行(kubeadm config print init-defaults)命令输出的为准。

apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  # 你的master服务器的ip(需要改)
  advertiseAddress: 192.168.17.142
  # api-server端口,不改除非本机端口占用
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  imagePullSerial: true
  name: node
  taints: null
timeouts:
  controlPlaneComponentHealthCheck: 4m0s
  discovery: 5m0s
  etcdAPICall: 2m0s
  kubeletHealthCheck: 4m0s
  kubernetesAPICall: 1m0s
  tlsBootstrap: 5m0s
  upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
# eycd证书保存文件,不用改
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
encryptionAlgorithm: RSA-2048
etcd:
  local:
    dataDir: /var/lib/etcd
# 设置k8s的仓库地址,不用改
imageRepository: registry.k8s.io
kind: ClusterConfiguration
# 设置k8s版本,根据具体的版本
kubernetesVersion: 1.31.0
networking:
  dnsDomain: cluster.local
  # servcie虚拟ip网段不用改
  serviceSubnet: 10.96.0.0/12
proxy: {}
scheduler: {}

# 需要最追加下面的内容
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
# 设置kubelet驱动
cgroupDriver: systemd

初始化Master

主意:这个只需要在你具体的master环境服务器上执行


# 初始化master,找到你本地上面创建的kubeadm-config.yaml文件,进入这个文件所在路径执行下面
kubeadm init --config=kubeadm-config.yaml

# 也可以根据kubeadm-config.yaml文件定义内容来输出你配置的镜像仓库
kubeadm config images list --config=kubeadm-config.yaml
# 出现下面的内容表示你的master就初始化成功

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

# 这句话的意思是最终集群启动成功还需要依赖部署一个网络插件,这里我们选择了calico
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

# 这里需要记住,这个是其他非worker节点加入master时候需要执行的,也是在后面**加入work节点**章节用到
kubeadm join 192.168.17.142:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:8bcc34481b37c8325791bc0d275bf7aab6b1c9222c4ea23f5dfa4988d3f21f60

出现上面的成功需要继续执行:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

走到这一步,master节点的初始化工作都已完成。

加入worker节点

主意:这个只需要在你其他的worker服务器上执行。

# 这个是你执行kubeadm init --config=kubeadm-config.yaml具体输出的
kubeadm join 192.168.17.142:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:8bcc34481b37c8325791bc0d275bf7aab6b1c9222c4ea23f5dfa4988d3f21f60

Calico安装

在 Kubernetes v1.31 上安装 Calico 网络插件,可以按照以下步骤操作:

  • 准备工作

确保 Kubernetes 集群已经部署并且所有节点都能够互相通信,并且具备以下要求:

  1. 确保每个节点上都有合适的网络连接,特别是控制平面与工作节点之间。
  2. kubectl 命令可以正常使用,集群处于运行状态。
  • 下载 Calico 配置文件

Calico 提供官方的安装文件,使用以下命令来下载配置文件:

# 下载的到:calico.yaml
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml

# 启动calico
kubectl apply -f calico.yaml
  • 检查 Calico Pods 状态

应用安装后,Calico 的 Pod 会在 kube-system 命名空间启动。通过以下命令查看状态:

kubectl get pods -n kube-system

所有与 Calico 相关的 Pod 应该显示为 Running 状态,通常包括以下组件:

  • calico-node
  • calico-kube-controllers

在这里插入图片描述

出现上图表示整个calico服务启动成功。

集群状态确认

  • 检查集群所有node的状态
root@ubuntu1:~/kubernetes/deploy# kubectl get node -o wide
NAME      STATUS   ROLES           AGE    VERSION   INTERNAL-IP      EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME
node      Ready    control-plane   163m   v1.31.1   192.168.17.142   <none>        Ubuntu 20.04.5 LTS   5.4.0-196-generic   containerd://1.7.22
ubuntu2   Ready    <none>          155m   v1.31.1   192.168.17.8     <none>        Ubuntu 20.04.5 LTS   5.4.0-196-generic   containerd://1.7.22
ubuntu3   Ready    <none>          154m   v1.31.1   192.168.17.239   <none>        Ubuntu 20.04.5 LTS   5.4.0-196-generic   containerd://1.7.22
  • 检查集群所有pod

下面就是一个最小k8s集群启动最小单元的所有pod

root@ubuntu1:~/kubernetes/deploy# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-7f764f4f68-tgwzt   1/1     Running   0          151m
kube-system   calico-node-4h285                          1/1     Running   0          151m
kube-system   calico-node-rkl7l                          1/1     Running   0          151m
kube-system   calico-node-xqpq8                          1/1     Running   0          151m
kube-system   coredns-7c65d6cfc9-68fsq                   1/1     Running   0          166m
kube-system   coredns-7c65d6cfc9-nh9b2                   1/1     Running   0          166m
kube-system   etcd-node                                  1/1     Running   0          166m
kube-system   kube-apiserver-node                        1/1     Running   0          166m
kube-system   kube-controller-manager-node               1/1     Running   6          166m
kube-system   kube-proxy-bm7rx                           1/1     Running   0          157m
kube-system   kube-proxy-r97ql                           1/1     Running   0          158m
kube-system   kube-proxy-z9d2j                           1/1     Running   0          166m
kube-system   kube-scheduler-node                        1/1     Running   6          166m

至此整个的1matser和2worker集群已经部署成功。

后续展望

这个文档只是给大家部署了一个最小的集群单元,够大家学习和开发环境使用。如果是生产级别的高可用集群还需要基于当前会部署多个master作为高可用集群。后续会抽时间给大家输出一个关于比较全面高可用部署教程。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值