K8S 1.22.2集群搭建

IPHostname
192.168.8.5master-1
192.168.8.8master-2
192.168.8.9master-3
192.168.8.7node-1
192.168.8.25node-2
VIP192.168.8.100

修改主机名(所有主机操作)

hostnamectl set-hostname <<hostname>>

关闭防火墙禁用selinux(所有主机操作)

[root@master-1 ~]#systemctl stop firewalld
[root@master-1 ~]#systemctl disable firewalld
[root@master-1 ~]#sed -i 's/enforcing/disabled/' /etc/selinux/config
[root@master-1 ~]#setenforce 0
setenforce: SELinux is disabled

关闭swap(所有主机操作)

swapoff -a  # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab  #永久

设置主机名并写入hosts(所有主机操作)

cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.8.5 master-1 
192.168.8.8 master-2
192.168.8.9 master-3
192.168.8.7 node-1
192.168.8.25 node-2

将桥接的IPv4流量传递到iptables的链(所有主机操作)

#桥接前确认br_netfilter模块是否加载,执行以下命令
lsmod | grep br_netfilter
modprobe br_netfilter
#然后执行下命令
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system

时间同步(所有主机操作)

yum -y install chrony
systemctl enable --now chronyd

安装docker(所有主机操作)

wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
yum -y install docker-ce

配置镜像加速
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://cynsyplj.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker

添加kubeadm阿里云yum源(所有主机操作)

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装kubeadm,kubelet和kubectl(所有主机操作)

yum install -y kubelet-1.22.2 kubeadm-1.22.2 kubectl-1.22.2
[root@master ~ ]# docker info |grep 'Cgroup Driver' |awk '{print $3}'
cgroupfs

vim  /etc/sysconfig/kubelet # 配置kubelet启动参数
KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.5"

systemctl  daemon-reload


[root@master ~ ]# cat /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/sysconfig/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS


systemctl enable --now kubelet

在所有master节点上建立高可用

在master建立高可用,其实就是给所有的kube-apiserver做反向代理,可使用SLB或者使用一台独立虚拟服务器代理。本例是在所有master节点上部署nginx(upstream)+keepalived方式反向代理kube-apiserver。

kube-proxy开启IPVS配置

#ipvs称之为IP虚拟服务器(IP Virtual Server,简写为IPVS)
#1.在所有master节点执行以下命令
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
#2.查看IPVS模块加载情况
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#能看到ip_vs ip_vs_rr ip_vs_wrr  ip_vs_sh nf_conntrack_ipv4加载成功

所有master节点安装nginx和keepalived

yum -y install nginx keepalived
systemctl enable --now keepalived 
systemctl enable --now nginx
yum install -y nginx-mod-stream --skip-broken

配置Nginx的upstream反向代理

#nginx.conf末尾添加
stream {
        include /etc/nginx/tcpconf/*.conf;
}

mkdir /etc/nginx/tcpconf 

cat > /etc/nginx/tcpconf/k8s_lb.conf  <<EOF 
upstream kubernetes_lb {
server 192.168.8.5:6443 weight=5 max_fails=3 fail_timeout=30s; 
server 192.168.8.8:6443 weight=5 max_fails=3 fail_timeout=30s;
server 192.168.8.9:6443 weight=5 max_fails=3 fail_timeout=30s;
}
server {
	listen 7443;
	proxy_connect_timeout 30s;
	proxy_timeout 30s;
	proxy_pass kubernetes_lb;
}
EOF
nginx -t
nginx -s relod

keepalived配置

cat > keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_script chk_nginx {
script "/etc/keepalived/nginx_check.sh"          ## 检测 nginx 状态的脚本路径
interval 2                                       ## 检测时间间隔
weight -20                                       ## 如果条件成立,权重-20
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51  
    priority 100  #其他节点设置为99 98,
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
track_script {
chk_nginx                   #执行nginx监控
}
    virtual_ipaddress {
        192.168.8.100 #虚拟IP
    }
}
EOF

cat > nginx_check.sh <<EOF
#!/bin/bash
export LANG="en_US.UTF-8"
if [ ! -f "/run/nginx.pid" ]; then
    /usr/bin/systemctl restart nginx
    sleep 2
    if [ ! -f "/run/nginx.pid" ]; then
       /bin/kill -9 $(head -n 1 /var/run/keepalived.pid)
    fi
fi
EOF

chmod a+x /etc/keepalived/nginx_check.sh
#在其他master节点执行
scp /etc/keepalived/* 192.168.8.9:/etc/keepalived/
scp /etc/keepalived/* 192.168.8.8:/etc/keepalived/

所有master节点重启keepalived
systemctl restart keepalived

在同网络任意节点验证keepalived是否畅通
ping 192.168.8.100

master-1节点执行kubeadm init进行集群初始化

[root@master-1 ~]#kubeadm config print init-defaults > kubeadm-init.yaml
[root@master-1 ~]#cat >  kubeadm-init.yaml  << EOF
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.8.5   #本地IP地址 
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  imagePullPolicy: IfNotPresent
  name: master-1
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
controlPlaneEndpoint: "192.168.8.100:7443"   #增加kubeapiserver集群ip地址和端口,就是VIP
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers  #国外网址k8s.gcr.io受限换成阿里云的
kind: ClusterConfiguration
kubernetesVersion: 1.22.2
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16                   #增加pod网络
scheduler: {}
---                                                   #增加kubeproxy代理配置
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"

EOF

#拉取镜像
[root@master-1 ~]#kubeadm config images pull --config kubeadm-init.yaml
[root@master-1 ~]#kubeadm init --config kubeadm-init.yaml
[init] Using Kubernetes version: v1.22.2
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master-1] and IPs [10.96.0.1 192.168.8.5 192.168.8.100]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master-1] and IPs [192.168.8.5 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master-1] and IPs [192.168.8.5 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 33.961498 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.22" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master-1 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 192.168.8.100:7443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:891c840ee3b30071451cd2ba36fb430dc5513c3aacdb6d2229a04578c7607c93 \
	--control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.8.100:7443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:891c840ee3b30071451cd2ba36fb430dc5513c3aacdb6d2229a04578c7607c93 
	
[root@master-1 ~]#  mkdir -p $HOME/.kube
[root@master-1 ~]#  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master-1 ~]#  sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master-1 ~]#  export KUBECONFIG=/etc/kubernetes/admin.conf

[root@master-1 ~]# scp /etc/kubernetes/pki/ca.* 192.168.8.9:/etc/kubernetes/pki/
[root@master-1 ~]# scp -r /etc/kubernetes/pki/ca.* 192.168.8.9:/etc/kubernetes/pki/
[root@master-1 ~]# scp -r /etc/kubernetes/pki/sa.* 192.168.8.9:/etc/kubernetes/pki/
[root@master-1 ~]# scp -r /etc/kubernetes/pki/front-proxy-ca.* 192.168.8.9:/etc/kubernetes/pki/
[root@master-1 ~]# scp -r /etc/kubernetes/pki/etcd/ca.* 192.168.8.9:/etc/kubernetes/pki/etcd/
[root@master-1 ~]# scp -r /etc/kubernetes/admin.conf 192.168.8.9:/etc/kubernetes/

[root@master-1 ~]# scp /etc/kubernetes/pki/ca.* 192.168.8.8:/etc/kubernetes/pki/
[root@master-1 ~]# scp -r /etc/kubernetes/pki/ca.* 192.168.8.8:/etc/kubernetes/pki/
[root@master-1 ~]# scp -r /etc/kubernetes/pki/sa.* 192.168.8.8:/etc/kubernetes/pki/
[root@master-1 ~]# scp -r /etc/kubernetes/pki/front-proxy-ca.* 192.168.8.8:/etc/kubernetes/pki/
[root@master-1 ~]# scp -r /etc/kubernetes/pki/etcd/ca.* 192.168.8.8:/etc/kubernetes/pki/etcd/
[root@master-1 ~]# scp -r /etc/kubernetes/admin.conf 192.168.8.8:/etc/kubernetes/

master节点执行

kubeadm join 192.168.8.100:7443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:891c840ee3b30071451cd2ba36fb430dc5513c3aacdb6d2229a04578c7607c93 \
	--control-plane 

node节点执行

kubeadm join 192.168.8.100:7443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:891c840ee3b30071451cd2ba36fb430dc5513c3aacdb6d2229a04578c7607c93 

给node节点打label,使ROLES可以识别

kubectl label nodes node-1 node-role.kubernetes.io/node=node
kubectl label nodes node-2 node-role.kubernetes.io/node=node
kubectl get node

部署网络组件flannel

wget https://objects.githubusercontent.com/github-production-release-asset-2e65be/21704134/59918bcd-b16c-4767-88db-96245e4871b9?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20230216%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230216T031215Z&X-Amz-Expires=300&X-Amz-Signature=43db1b4fb8589eb5a9887e7fb5f506bba144d74078c1d1e1406c2c212d414b43&X-Amz-SignedHeaders=host&actor_id=106376131&key_id=0&repo_id=21704134&response-content-disposition=attachment%3B%20filename%3Dkube-flannel.yml&response-content-type=application%2Foctet-stream

kubectl apply -f kube-flannel.yml

安装命令自动补全

yum -y install bash-completion
source /usr/share/bash-completion/bash_completion 
source <(kubectl completion bash)
echo 'source <(kubectl completion bash)' >> /root/.bashrc 
[root@master-1 ayq]#kubectl get node
NAME       STATUS   ROLES                  AGE   VERSION
master-1   Ready    control-plane,master   69m   v1.22.2
master-2   Ready    control-plane,master   36m   v1.22.2
master-3   Ready    control-plane,master   16m   v1.22.2
node-1     Ready    node                   38m   v1.22.2
node-2     Ready    node                   37m   v1.22.2

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值