一、kube-proxy开启ipvs的前置条件
三台机器同时执行
$ modprobe br_netfilter
写一个脚本
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
$ chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
二、安装 Docker 软件
三台机器同时执行
$ yum install -y yum-utils device-mapper-persistent-data lvm2
$ yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
$ yum install -y docker-ce
# 创建 /etc/docker 目录
$ mkdir /etc/docker
# 配置 daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"insecure-registries": ["harbor.hongfu.com"],
"registry-mirrors": ["https://kfp63jaj.mirror.aliyuncs.com"]
}
EOF
$ systemctl enable docker
$ reboot
$ systemctl restart docker
$ systemctl status docker
三、关闭 NetworkManager
因为所选网络是calico,所以需要关闭
三台机器同时执行
$ systemctl disable NetworkManager
$ systemctl stop NetworkManager
四、安装 Kubeadm (主从配置)
三台机器同时执行
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
$ yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
$ systemctl enable kubelet.service
启动原理-流程
五、初始化主节点
master端
$ kubeadm config print init-defaults > kubeadm-config.yaml
$ vim kubeadm-config.yaml
# localAPIEndpoint 下,改:
advertiseAddress: 192.168.46.101
kubernetesVersion: v1.15.1
# networking下,在serviceSubnet下添加:
podSubnet: "172.100.0.0/16"
#紧接着上面添加↓(指定负载调度的底层),#下面的这个---一定要写上
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
导入镜像
三台机器都将此包拉入
# 将包kubeadm-basic.images.tar.gz拉入
$ tar-zxf kubeadm-basic.images.tar.gz
编写导入镜像的脚本(三台机器都需要)
$ vim load-images.sh
#!/bin/bash
ls /root/kubeadm-basic.images/ > /tmp/images.cache
for i in $( cat /tmp/images.cache)
do
docker load -i /root/kubeadm-basic.images/$i
done
rm -rf /tmp/images.cache
$ chmod +x load-images.sh
$ ./load-images.sh
然后master端初始化
$ kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
六、加入主节点以及其余工作节点: maste端
###执行安装日志中的加入命令即可
# master端 粘贴以下命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 然后node01和02节点粘贴以下命令
kubeadm join 192.168.46.101:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:aabcd7ee4a7fb8bc5ffb6169cac8b233915291933f10fdb081c5bf9e25ac4037
然后master查看
[root@k8s-master01 ~]# kubectl get node
七、部署网络
将压缩包calico-k8s-1.15.1.tar.gz cni/
拉入
## master 端 ##
$ mkdir /usr/local/kubernetes
$ mkdir /usr/local/kubernetes/install
$ mv kubeadm-config.yaml kubeadm-init.log /usr/local/kubernetes/install/
$ mkdir cni
$ mv calico-k8s-1.15.1.tar.gz cni/
$ cd cni
$ tar -zxf calico-k8s-1.15.1.tar.gz
$ cd calico-k8s-1.15.1.tar.gz
将镜像导入
$ docker load -i calico-cni-3.3.7.tar
$ docker load -i calico-node-3.3.7.tar
分享给另外两个node节点
$ scp calico-cni-3.3.7.tar calico-node-3.3.7.tar root@n1:/root/
$ scp calico-cni-3.3.7.tar calico-node-3.3.7.tar root@n2:/root/
# node1和node2也将镜像导入
$ docker load -i calico-cni-3.3.7.tar
$ docker load -i calico-node-3.3.7.tar
创建资源
[root@k8s-master01 calico-k8s-1.15.1]# kubectl create -f rbac-kdd.yaml
[root@k8s-master01 calico-k8s-1.15.1]# kubectl create -f calico.yaml
[root@k8s-master01 calico-k8s-1.15.1]# kubectl get pod -A
#get 获取;#pod最小单位;#-A显示当前所有名字空间(可理解为班级)
kubectl get node 再查看已经部署好
官网下载:
calico 官方网站:Project Calico Documentation
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml