Kubeadmin部署K8S集群
初始化设置
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
启动ip_vs模块
for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done
修改主机名字
hostnamectl set-hostname master01
hostnamectl set-hostname node01
hostnamectl set-hostname node02
master01 ip是192.168.20.71
node01 是192.168.2 0.72
node03是192.168.20.73
所有节点修改hosts文件做映射
vim /etc/hosts
192.168.20.71 master01
192.168.20.72 node01
192.168.20.73 node02
调整内核参数
cat > /etc/sysctl.d/kubernetes.conf << EOF
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.ipv6.conf.all.disable_ipv6=1
net.ipv4.ip_forward=1
EOF
sysctl --system
将所有服务器安装Docker
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce docker-ce-cli containerd.io
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["你的镜像加速器"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
systemctl daemon-reload
systemctl restart docker.service
systemctl enable docker.service
docker info | grep "Cgroup Driver"
所有节点安装kubeadm,kubelet和kubect
定义kubernetes源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum 安装 kubelet kubeadm kubectl
yum install -y kubelet-1.20.11 kubeadm-1.20.11 kubectl-1.20.11
在 master 节点上传apiserver.tar 、controller-manager.tar 、 coredns.tar 、 etcd.tar 、 pause.tar proxy.tar 、 scheduler.tar
for i in $(ls *.tar); do docker load -i $i; done
载入镜像
复制镜像和脚本到 node 节点,并在 node 节点上执行脚本加载镜像文件
scp -r /opt/k8s root@node01:/opt
scp -r /opt/k8s root@node02:/opt
初始化kubeadm
kubeadm config print init-defaults > /opt/kubeadm-config.yaml
cd /opt/
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
#--experimental-upload-certs 参数可以在后续执行加入节点时自动分发证书文件,K8S V1.16版本开始替换为 --upload-certs
#tee kubeadm-init.log 用以输出日志
设定kubectl
mkdir -p /root/.kube
cp -i /etc/kubernetes/admin.conf root/.kube/config
chown root:root /root/.kube/config
修改
vim /etc/kubernetes/manifests/kube-scheduler.yaml
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
重启kubelet
systemctl restart kubelet
kubectl get cs
节点正常
部署 flannel 网络
在所有节点上上传flannel镜像和cni镜像,及其配置文件
docker load -i flannel-cni-v1.2.0.tar
docker load -i flannel-v0.22.2.tar
在kube-flannel.yml 配置文件里修改版本
kubectl apply -f kube-flannel.yml
kubectl get node
cat /opt/ kubeadm-init.log
在ndoe节点上执行
kubeadm join 192.168.20.71:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:e230a7d348eb444baac7b7991174d62e43fb4d28f8d693d760ba7ec454815e94
测试 pod 资源创建
kubectl create deployment nginx --image=nginx
kubectl get pods -o wide
创建成功了