准备四个节点
在四台机器分别操作
1.设置主机名
hostnamectl set-hostname master1
hostnamectl set-hostname master2
hostnamectl set-hostname node1
hostnamectl set-hostnamek8s-vip
2.关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
3.关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
4.关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
5.在master添加hosts
[root@master1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.30.59.231 master.k8s.io k8s-vip
10.30.59.212 master01.k8s.io master1
10.30.59.221 master02.k8s.io master2
10.30.59.223 node01.k8s.io node1
6.将桥接的IPV4流量传递到iptables的链
[root@master1 ~]# cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
生效
[root@master1 ~]#sysctl --system
时间同步
[root@master1 ~]#yum install ntpdate -y
[root@master1 ~]# ntpdate time.windows.com
master节点部署keepalived
安装相关包和keepalived
[root@master1 ~]# yum install -y conntrack-tools libseccomp libtool-ltdl
[root@master1 ~]#yum install -y keepalived
配置master1节点
[root@master1 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id k8s
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface ens192
virtual_router_id 51
priority 250
advert_int 1
authentication {
auth_type PASS52
auth_pass ceb1b3ec013d66163d6ab
}
virtual_ipaddress {
10.30.59.231
}
track_script {
check_haproxy
}
}
配置master2节点
[root@master2 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id k8s
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state BACKUP
interface ens192
virtual_router_id 51
priority 200
advert_int 1
authentication {
auth_type PASS
auth_pass ceb1b3ec013d66163d6ab
}
virtual_ipaddress {
10.30.59.231
}
track_script {
check_haproxy
}
}
启动keepalived并设置为开机自启,两台master节点都需要操作
systemctl start keepalived.service
systemctl enable keepalived.service
查看启动状态
[root@master2 ~]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2021-06-15 10:52:05 CST; 23h ago
Main PID: 9972 (keepalived)
Tasks: 3
Memory: 0B
CGroup: /system.slice/keepalived.service
├─9972 /usr/sbin/keepalived -D
├─9973 /usr/sbin/keepalived -D
└─9974 /usr/sbin/keepalived -D
Jun 16 10:30:32 master2 Keepalived_vrrp[9974]: VRRP_Instan...
Jun 16 10:30:33 master2 Keepalived_vrrp[9974]: (VI_1): ip ...
Jun 16 10:30:33 master2 Keepalived_vrrp[9974]: bogus VRRP ...
Jun 16 10:30:33 master2 Keepalived_vrrp[9974]: VRRP_Instan...
Jun 16 10:30:34 master2 Keepalived_vrrp[9974]: (VI_1): ip ...
Jun 16 10:30:34 master2 Keepalived_vrrp[9974]: bogus VRRP ...
Jun 16 10:30:34 master2 Keepalived_vrrp[9974]: VRRP_Instan...
Jun 16 10:30:35 master2 Keepalived_vrrp[9974]: (VI_1): ip ...
Jun 16 10:30:35 master2 Keepalived_vrrp[9974]: bogus VRRP ...
Jun 16 10:30:35 master2 Keepalived_vrrp[9974]: VRRP_Instan...
Hint: Some lines were ellipsized, use -l to show in full.
查看master1的网卡信息
ip a s ens192
部署haproxy
在master节点
安装haproxy
yum install -y haproxy
配置
两台master节点的配置均相同,配置中声明了后端代理的两个master节点服务器,指定了haproxy运行的端口为16443等,因此16443端口为集群的入口
[root@master1 ~]# cat /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
mode tcp
bind *:16443
option tcplog
default_backend kubernetes-apiserver
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
mode tcp
balance roundrobin
server master01.k8s.io 10.30.59.212:6443 check
server master02.k8s.io 10.30.59.221:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
bind *:1080
stats auth admin:awesomePassword
stats refresh 5s
stats realm HAProxy\ Statistics
stats uri /admin?stats
启动和检查
两台master都启动
设置开机启动
systemctl enable haproxy
开启haproxy
systemctl start haproxy
查看启动状态
systemctl status haproxy
检查端口
[root@master1 ~]# netstat -lntup|grep haproxy
tcp 0 0 0.0.0.0:1080 0.0.0.0:* LISTEN 11783/haproxy
tcp 0 0 0.0.0.0:16443 0.0.0.0:* LISTEN 11783/haproxy
udp 0 0 0.0.0.0:59833 0.0.0.0:* 11782/haproxy
所有节点安装Docker/kubeadm/kubelet
安装docker
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce-18.06.1.ce-3.el7
systemctl enable docker && systemctl start docker
docker --version
Docker version 18.06.1-ce, build e68fc7a
[root@master2 ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
添加阿里云YUM软件源
[root@master2 ~]# cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
安装kubeadm,kubelet和kubectl
yum install -y kubelet-1.16.3 kubeadm-1.16.3 kubectl-1.16.3
systemctl enable kubelet
部署Kubernetes Master
创建kubeadm配置文件
在master1节点
mkdir /usr/local/kubernetes/manifests -p
cd /usr/local/kubernetes/manifests/
vi kubeadm-config.yaml
apiServer:
certSANs:
- master1
- master2
- master.k8s.io
- 10.30.59.231
- 10.30.59.212
- 10.30.59.221
- 127.0.0.1
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "master.k8s.io:16443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.16.3
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.1.0.0/16
scheduler: {}
执行
kubeadm init --config kubeadm-config.yaml
按照提示配置环境变量,使用kubectl工具:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
$ kubectl get nodes
$ kubectl get pods -n kube-system
保存备用
kubeadm join master.k8s.io:16443 --token u96wvh.rua3gk470xnp5562 \
--discovery-token-ca-cert-hash sha256:ea8960d2a558b3d908ae34b963de087dd76a809fc3b669d343c5613820e58143 \
--control-plane
查看集群状态
[root@master1 ~]# kubectl get cs
NAME AGE
controller-manager <unknown>
scheduler <unknown>
etcd-0 <unknown>
安装集群网络
在master1执行
mkdir flannel
cd flannel
wget -c https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
安装flannel 网络并检查
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
master2节点加入集群
复制密钥以及相关文件,从master1节点复制
# ssh root@10.30.59.221 mkdir -p /etc/kubernetes/pki/etcd
# scp /etc/kubernetes/admin.conf root@10.30.59.221:/etc/kubernetes
# scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@10.30.59.221:/etc/kubernetes/pki
# scp /etc/kubernetes/pki/etcd/ca.* root@10.30.59.221:/etc/kubernetes/pki/etcd
master2加入集群
kubeadm join master.k8s.io:16443 --token u96wvh.rua3gk470xnp5562 \
--discovery-token-ca-cert-hash sha256:ea8960d2a558b3d908ae34b963de087dd76a809fc3b669d343c5613820e58143 \
--control-plane
检查状态
[root@master2 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master1 Ready master 15h v1.16.3
node1 Ready <none> 2m57s v1.16.3
[root@master2 ~]#kubectl get pods --all-namespaces
加入Kubernetes Node
在node1上执行
向集群添加新节点,执行在kubeadm init输出的kubeadm join命令:
kubeadm join master.k8s.io:16443 --token u96wvh.rua3gk470xnp5562 \
--discovery-token-ca-cert-hash sha256:ea8960d2a558b3d908ae34b963de087dd76a809fc3b669d343c5613820e58143 \
--control-plane
检查状态
kubectl get node
kubectl get pods --all-namespaces
测试kubernetes集群
在Kubernetes集群中创建一个pod,验证是否正常运行:
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc
访问地址:http://NodeIP:Port