1. 环境说明
ip | 用途 | 操作系统 |
192.168.181.36 | k8s-master1 | centos7.6 |
192.168.181.37 | k8s-master2 | centos7.6 |
192.168.181.38 | k8s-master3 | centos7.6 |
192.168.181.39 | k8s-worker1 | centos7.6 |
192.168.181.40 | k8s-worker2 | centos7.6 |
192.168.181.41 | k8s-worker3 | centos7.6 |
192.168.181.42 | vip |
2. 安装前准备
1、升级操作系统内核
2、安装docker
3、安装keepalived并配置,配置文件如下
[root@app18137 home]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
simon.li@sinoeyes.com
}
notification_email_from paas-support@sinoeyes.com
smtp_server smtp.exmail.qq.com
smtp_connect_timeout 30
router_id app18137
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state BACKUP
## 网卡名称(需要修改)
interface eno1
virtual_router_id 233
priority 90 ##(需要修改)
nopreempt ###设置 nopreempt 防止抢占资源
advert_int 1 ######服务器之间的存活检查时间
authentication {
auth_type PASS
auth_pass 18142
}
unicast_src_ip 192.168.181.37 #配置单播的源地址,即本机地址
unicast_peer {
192.168.181.38 #配置单播的目标地址,即对方节点地址,备有多台就配置多个地址
}
virtual_ipaddress {
## 同一网段虚拟IP(需要修改)
192.168.181.42/24
}
}
4、安装haproxy并配置,配置文件如下
[root@app18137 config]# cat haproxy.cfg
global
maxconn 10000 # 最大同时10000连接
daemon # 以daemon方式在后台运行
defaults
log 127.0.0.1 local0 debug # [emerg, alert, crit, err, warning, notice, info, debug]
# mode http # 默认的模式mode { tcp|http|health },tcp是4层,http是7层,health只会返回OK
retries 3 # 连接后端服务器失败重试次数,超过3次后会将失败的后端服务器标记为不可用。
timeout client 1h # 客户端响应超时 1小时
timeout server 1h # server端响应超时 1小时
timeout connect 1h # 连接server端超时 1小时
timeout check 10s # 对后端服务器的检测超时时间 10秒
listen stats # 定义监控页面
mode http
bind *:1080 # 绑定容器内的1080端口
stats refresh 5s # 每1秒更新监控数据
stats uri /stats # 访问监控页面的uri
stats realm HAProxy\ Stats # 监控页面的认证提示
stats auth admin:654321 # 监控页面的用户名和密码
frontend tidb_front
mode tcp
bind *:16443 # 监听容器内的4000端口
default_backend tidb_back
backend tidb_back
mode tcp
option tcp-check
balance roundrobin
server k8s-master1 192.168.181.36:6443 check inter 10s rise 5 fall 3 weight 1
server k8s-master2 192.168.181.37:6443 check inter 10s rise 5 fall 3 weight 1
server k8s-master3 192.168.181.38:6443 check inter 10s rise 5 fall 3 weight 1
3. 安装过程
关闭防火墙(在所有服务器上运行)
systemctl stop firewalld && systemctl disable firewalld
关闭selinux(在所有服务器上运行)
sed -i 's/enforcing/disabled/' /etc/selinux/config && setenforce 0
关闭swap(在所有服务器上运行)
swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
配置内核参数,将桥接的IPv4流量传递到iptables的链(在所有服务器上运行)
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
添加kubernetes阿里YUM源(在所有服务器上运行)
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
安装软件(在所有服务器上运行)
yum install -y kubelet-1.20.15 kubeadm-1.20.15 kubectl-1.20.15
生成集群(k8s-master1上执行)
cat > kubeadm-config.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kubernetesVersion: 1.20.15
apiServer:
certSANs:
- "192.168.181.42"
controlPlaneEndpoint: "192.168.181.42:16444"
networking:
podSubnet: "10.244.0.0/16"
EOF
kubeadm init --config=kubeadm-config.yaml
等几分钟后出现如下内容表示成功
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.181.42:16444 --token na5fnw.ed4h3gmp7hnikqnz \
--discovery-token-ca-cert-hash sha256:bee38a46333661d40ae10c6d9e9e2b3d5d69b900b600a2c7e8821dd62049eab7 \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.181.42:16444 --token na5fnw.ed4h3gmp7hnikqnz \
--discovery-token-ca-cert-hash sha256:bee38a46333661d40ae10c6d9e9e2b3d5d69b900b600a2c7e8821dd62049eab7
执行提示语句
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
加入其他的master(k8s-master2和k8s-master3上执行)
kubeadm join 192.168.181.42:16444 --token na5fnw.ed4h3gmp7hnikqnz
--discovery-token-ca-cert-hash sha256:bee38a46333661d40ae10c6d9e9e2b3d5d69b900b600a2c7e8821dd62049eab7
--control-plane --certificate-key 4729e9bb3d8b19b810fcbd3ea91e791cb4479b84175307cbdf6cc5128a28da80
加入其他node节点(k8s-worker1、k8s-worker2、k8s-worker3上执行)
kubeadm join 192.168.181.42:16444 --token na5fnw.ed4h3gmp7hnikqnz
--discovery-token-ca-cert-hash sha256:bee38a46333661d40ae10c6d9e9e2b3d5d69b900b600a2c7e8821dd62049eab7
重新生成加入命令
- 执行kubeadm token create --print-join-command,重新生成,重新生成基础的 join 命令(对于添加 master 节点还需要重新生成certificate-key,见下一步)
- 使用 kubeadm init phase upload-certs --upload-certs 重新生成certificate-key
添加 master 节点:用上面第1步生成的 join 命令和第2步生成的certificate-key 值拼接起来执行
kubeadm join 192.168.181.42:16444 --token na5fnw.ed4h3gmp7hnikqnz
--discovery-token-ca-cert-hash sha256:bee38a46333661d40ae10c6d9e9e2b3d5d69b900b600a2c7e8821dd62049eab7
--control-plane --certificate-key 4729e9bb3d8b19b810fcbd3ea91e791cb4479b84175307cbdf6cc5128a28da80
安装flannel网络
打开https://github.com/flannel-io/flannel/blob/master/Documentation/kube-flannel.yml拷贝内容粘贴到文件中
执行:kubectl apply -f 文件名