实验之前确保环境纯净,关闭swarm的所有东西,最好用新开的虚拟机
[root@server2 compose]# vim /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
[root@server2 ~]# systemctl daemon-reload
[root@server2 ~]# systemctl restart docker
[root@server2 ~]# scp /etc/docker/daemon.json server3:/etc/docker/daemon.json
[root@server2 ~]# scp /etc/docker/daemon.json server4:/etc/docker/daemon.json
文件传输给server3、4也需要重新加载
[root@server2 ~]# docker info
此处如果报错排不了错就重装docker-ce,新虚拟机一般不会有问题
禁用swap ,注释掉最后一行,server3、4也需要做
[root@server2 ~]# swapoff -a
[root@server2 ~]# vim /etc/fstab
[root@server2 ~]# swapon -s
确保源环境纯净,在server2上安装完,将源传给server3、4,作相同操作,都要安装启动
[root@server2 ~]# cd /etc/yum.repos.d/
[root@server2 yum.repos.d]# vim k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
[root@server2 yum.repos.d]# yum install -y kubelet kubeadm kubectl
[root@server2 yum.repos.d]# yum repolist
[root@server2 yum.repos.d]# systemctl enable --now kubelet
查看默认配置
kubeadm config print init-defaults
列出所需镜像
[root@server2 yum.repos.d]# kubeadm config images list --image-repository registry.aliyuncs.com/google_containers
拉取镜像
[root@server2 yum.repos.d]# kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
初始化集群
[root@server2 yum.repos.d]# kubeadm init --pod-network-cidr=10.244.0.0/16 --image-repository registry.aliyuncs.com/google_containers
红线部分用于后面server3、4加入server2的命令
[root@server2 ~]# mkdir -p $HOME/.kube
[root@server2 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@server2 ~]# kubectl get ns
查看节点
[root@server2 ~]# kubectl get node
命令补齐,断开连接重连
[root@server2 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
[root@server2 ~]# kubectl get pod --namespace kube-system
[root@server2 ~]# vim kube-flannel.yml
本地文件太长了这里不写了
也可以尝试使用网络组件
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@server2 ~]# kubectl apply -f kube-flannel.yml
[root@server2 ~]# kubectl get pod --namespace kube-system
导出节点所需镜像并传输给server3、4
[root@server2 ~]# docker save quay.io/coreos/flannel:v0.12.0-amd64 registry.aliyuncs.com/google_containers/pause:3.2 registry.aliyuncs.com/google_containers/coredns:1.7.0 registry.aliyuncs.com/google_containers/kube-proxy:v1.20.2 > node.tar
[root@server2 ~]# scp node.tar server3:
[root@server2 ~]# scp node.tar server4:
server3、4导入本地镜像
[root@server3 ~]# docker load -i node.tar
[root@server4 ~]# docker load -i node.tar
server3、4节点加入
[root@server3 ~]# kubeadm join 192.168.3.202:6443 --token 0wbzv7.44zylrhhpmlbohzh \
> --discovery-token-ca-cert-hash sha256:f0986d8964f4f21996da9d741e285f6ec4ff32cd91f4a0a314eeb43be8d2cf94
[root@server4 ~]# kubeadm join 192.168.3.202:6443 --token 0wbzv7.44zylrhhpmlbohzh \
> --discovery-token-ca-cert-hash sha256:f0986d8964f4f21996da9d741e285f6ec4ff32cd91f4a0a314eeb43be8d2cf94
server2查看,部署完成
[root@server2 ~]# kubectl get nodes
[root@server2 ~]# kubectl get pod -n kube-system