Kubernetes实录-第一篇-集群部署配置(7) 使用kubeadm配置HA模式kubernets集群

Kubernetes实录系列记录文档完整目录参考: Kubernetes实录-目录

相关记录链接地址 :

本篇记录使用kubeadm配置Kubernetes 集群HA方案的实验过程,主要参考官方文档

主机名称ip地址操作系统角色软件版本备注
ejucsmaster-shqs-110.99.12.201
10.99.13.201<存储>
CentOS 7.8 minproxy, masterkeepalived 2.0.20
haproxy 1.8.25
docker-ce 19.03.12

kubernets 1.18.5

VIP=10.99.12.200
ejucsmaster-shqs-210.99.12.202
10.99.13.202<存储>
CentOS 7.8 minproxy, masterkeepalived 2.0.20
haproxy 1.8.25
docker-ce 19.03.12

kubernets 1.18.5

VIP=10.99.12.200
ejucsmaster-shqs-310.99.12.203
10.99.13.203<存储>
CentOS 7.8 minproxy, masterkeepalived 2.0.20
haproxy 1.8.25
docker-ce 19.03.12

kubernets 1.18.5

VIP=10.99.12.200
ejucsnode-shqs-110.99.12.204
10.99.13.204<存储>
CentOS 7.8 minworkerdocker-ce 19.03.12

kubernets 1.18.5

ejucsnode-shqs-210.99.12.205
10.99.13.205<存储>
CentOS 7.8 minworkerdocker-ce 19.03.12

kubernets 1.18.5

ejucsnode-shqs-210.99.12.206
10.99.13.206<存储>
CentOS 7.8 minworkerdocker-ce 19.03.12

kubernets 1.18.5

备注:存储采用GlusterFS,参考另一篇文档

一、更新并初始操作系统

1. [所有节点] 网络配置

BondMode类型IP地址使用端口应用
bond0mode=110.99.12.201-206em1 em3kubernets
bond1mode=110.99.13.201-206em1 em3GlusterFS<存储>
#bond0
vi /etc/sysconfig/network-scripts/ifcfg-bond0
NAME=bond0
DEVICE=bond0
TYPE=Bond
ONBOOT=yes
NETBOOT=yes
BOOTPROTO=static
NM_CONTROLLED=no
BONDING_OPTS="miimon=100 mode=1"
IPADDR=10.99.12.201
PREFIX=24
GATEWAY=10.99.12.254

# bond1
vi /etc/sysconfig/network-scripts/ifcfg-bond1
NAME=bond1
DEVICE=bond1
TYPE=Bond
ONBOOT=yes
NETBOOT=yes
BOOTPROTO=static
NM_CONTROLLED=no
BONDING_OPTS="miimon=100 mode=1"
IPADDR=10.99.13.201
PREFIX=24

# em1, em3与em1同样的配置,只是NAME,DEVICE根据实际填写为em3
vi /etc/sysconfig/network-scripts/ifcfg-em1
NAME=em1
DEVICE=em1
TYPE=Ethernet
ONBOOT=yes
NETBOOT=yes
BOOTPROTO=none
NM_CONTROLLED=no
MASTER=bond0
SLAVE=yes

# em2, em4与em2同样的配置,只是NAME,DEVICE根据实际填写为em4
vi /etc/sysconfig/network-scripts/ifcfg-em2
NAME=em2
DEVICE=em2
TYPE=Ethernet
ONBOOT=yes
NETBOOT=yes
BOOTPROTO=none
NM_CONTROLLED=no
MASTER=bond1
SLAVE=yes

2. 主机名称配置

# 各个主机上执行如下指令
hostnamectl set-hostname ejucsmaster-shqs-1
hostnamectl set-hostname ejucsmaster-shqs-2
hostnamectl set-hostname ejucsmaster-shqs-3
hostnamectl set-hostname ejucsnode-shqs-1
hostnamectl set-hostname ejucsnode-shqs-2
hostnamectl set-hostname ejucsnode-shqs-3

3. /etc/hosts

# vi /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.99.12.200 csapi.ejuops.com
10.99.12.201 ejucsmaster-shqs-1
10.99.12.202 ejucsmaster-shqs-2
10.99.12.203 ejucsmaster-shqs-3
10.99.12.204 ejucsnode-shqs-1
10.99.12.205 ejucsnode-shqs-2
10.99.12.206 ejucsnode-shqs-3

4. 域名解析地址配置(DNS)

这里配置的是企业内部构建的dns服务器

cat <<EOF >  /etc/resolv.conf
nameserver 10.99.73.5
nameserver 10.99.73.6
EOF

5. NTP配置(chronyd)

这里配置的是企业内部构建的NTP服务器服务器

yum install chrony -y

vi /etc/chrony.conf
... 
server 10.99.73.5 iburst
server 10.99.73.6 iburst


systemctl restart chronyd.service
systemctl enable chronyd.service

timedatectl 
      Local time: Wed 2020-07-08 15:36:37 CST
  Universal time: Wed 2020-07-08 07:36:37 UTC
        RTC time: Wed 2020-07-08 07:36:37
       Time zone: Asia/Shanghai (CST, +0800)
     NTP enabled: yes
NTP synchronized: yes
 RTC in local TZ: no
      DST active: n/a

6. 防火墙关闭

yum install firewalld -y
systemctl stop firewalld.service
systemctl disable firewalld.service

7. 禁用Selinux,生效需要重启,可以通过命令临时许可

sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
setenforce 0

8. 禁用交互空间 swap

swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab

free 可以查看到swap等于0

9. 内核参数调整

vi /etc/sysctl.conf
...
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

sysctl -p

10. ulimit设置

cat /proc/sys/fs/file-max
13062674

cat <<EOF > /etc/security/limits.d/90-nproc.conf
*          soft    nproc     50000
*          hard    nproc     60000
*          soft    nofile    1024000
*          hard    nofile    1024000
root       soft    nproc     unlimited
EOF

11. 安装常用工具或者开发包

yum -y install epel-release
yum -y install bridge-utils tcpdump screen lrzsz net-tools python-devel gcc wget curl zip unzip nc telnet vim bind-utils openssl-devel yum -y install yum-plugin-priorities tcpdump screen net-tools  gcc wget curl lrzsz zip unzip nc telnet bind-utils rsync vim traceroute sysstat perf iotop iftop strace dstat htop pciutils mtr tree git lsof nmap sudo ntpdate bzip2 gzip xz cmake autoconf automake pcre pcre-devel zlib zlib-devel libselinux-python python-simplejson nethogs nload iptraf multitail tmux atop saidar bmon libcurl-devel libattr-devel python-devel openssl-devel openldap-devel readline-devel gmp-devel libmcrypt-devel mhash-devel libxslt-devel libjpeg-devel freetype-devel libxml2-devel zlib-devel glib2-devel bzip2-devel ncurses-devel e2fsprogs-devel krb5-devel libidn-devel libffi-devel

12. 加载模块

yum install bridge-utils -y
modprobe br_netfilter

二、配置HAproxy+keepalived

Kubernets做HA方案多个Apiserver服务需要一个负载均衡服务(当然也可以采用其他方案),这里使用HAproxy+keepalived构建一个TCP负载均衡服务。构建在3台master节点上。

主机名称ip地址操作系统组件VIP
ejucsmaster-shqs-110.99.12.201CentOS 7.8haproxy+keepalived10.99.12.200
ejucsmaster-shqs-210.99.12.202CentOS 7.8haproxy+keepalived10.99.12.200
ejucsmaster-shqs-310.99.12.203CentOS 7.8haproxy+keepalived10.99.12.200

关于keepalived以及haproxy通过编译的方式安装高版本可以参考文档:编译安装keepalived 2.0.20编译安装haproxy 1.8.25

  • 安装配置haproxy
[所有master节点]安装keepalived haproxy
yum install keepalived haproxy -y

[所有master节点的haproxy配置]
vi /etc/haproxy/haproxy.cfg
global
    daemon
    nbproc    4
    user      haproxy
    group     haproxy
    maxconn   50000
    pidfile   /var/run/haproxy.pid
    log       127.0.0.1   local0
    chroot    /var/lib/haproxy

defaults
    log       global
    log       127.0.0.1   local0
    maxconn   50000
    retries   3
    balance   roundrobin
    option    httplog
    option    dontlognull
    option    httpclose
    option    abortonclose
    timeout   http-request 10s
    timeout   connect 10s
    timeout   server 1m
    timeout   client 1m
    timeout   queue 1m
    timeout   check 5s

listen stats :1234
    stats     enable
    mode      http
    option    httplog
    log       global
    maxconn   10
    stats     refresh 30s
    stats     uri /
    stats     hide-version
    stats     realm HAproxy
    stats     auth admin:xxxxxxxxxxx
    stats     admin if TRUE

listen kube-api-lb
    bind      0.0.0.0:8443
    balance   roundrobin
    mode      tcp
    option    tcplog
    server    ejucsmaster-shqs-1 10.99.12.201:6443 weight 1 maxconn 10000 check inter 10s rise 2 fall 3
    server    ejucsmaster-shqs-1 10.99.12.202:6443 weight 1 maxconn 10000 check inter 10s rise 2 fall 3
    server    ejucsmaster-shqs-1 10.99.12.203:6443 weight 1 maxconn 10000 check inter 10s rise 2 fall 3

systemctl enable haproxy
systemctl start haproxy
  • 安装配置keepalived
[所有master节点 keepalived配置]
vi  /etc/keepalived/keepalived.conf
global_defs {
   router_id csapiserver
}
vrrp_script chk_haproxy {
    script "/opt/app/keepalived/etc/keepalived/chk_haproxy.sh"
    interval 2
    weight 2
}

vrrp_instance VI_1 {
    state MASTER #其他服务器为BACKUP
    interface bond0
    virtual_router_id 51
    priority 101  # 其他服务器为100,99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.99.12.200
    }
    track_script {
        chk_haproxy
    }
}
cat <<EOF > /opt/app/keepalived/etc/keepalived/chk_haproxy.sh
#!/bin/bash
if [ $(ps -C haproxy --no-header | wc -l) -eq 0 ]; then
        systemctl start haproxy
fi
sleep 3
if [ $(ps -C haproxy --no-header | wc -l) -eq 0 ]; then
        systemctl stop keepalived
fi
EOF

chmod +x /opt/app/keepalived/etc/keepalived/chk_haproxy.sh

systemctl enable keepalived.service
systemctl start keepalived.service
netstat -pltn 
...
tcp        0      0 0.0.0.0:8443            0.0.0.0:*               LISTEN      50670/haproxy 

#[master-1]
ip add
bond0: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
        link/ether 14:18:77:64:11:a1 brd ff:ff:ff:ff:ff:ff
        inet 10.99.12.201/24 brd 10.99.12.255 scope global bond0
        inet 10.99.12.200/32 scope global bond0

三、安装docker-ce

1. [所有节点]安装依赖

yum -y install yum-utils device-mapper-persistent-data lvm2 conntrack-tools bridge-utils ipvsadm

2. 添加docker软件源(使用国内)[所有节点执行]

yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum list docker-ce --showduplicates|sort -r
	docker-ce.x86_64            3:19.03.12-3.el7                   docker-ce-stable  # 使用这个版本
	docker-ce.x86_64            3:19.03.11-3.el7                   docker-ce-stable 
	docker-ce.x86_64            3:19.03.10-3.el7                   docker-ce-stable 
	docker-ce.x86_64            3:19.03.9-3.el7                    docker-ce-stable 
	docker-ce.x86_64            3:19.03.8-3.el7                    docker-ce-stable 
	docker-ce.x86_64            3:19.03.7-3.el7                    docker-ce-stable 
	docker-ce.x86_64            3:19.03.6-3.el7                    docker-ce-stable 
	docker-ce.x86_64            3:19.03.5-3.el7                    docker-ce-stable 
	docker-ce.x86_64            3:19.03.4-3.el7                    docker-ce-stable 
	docker-ce.x86_64            3:19.03.3-3.el7                    docker-ce-stable 
	docker-ce.x86_64            3:19.03.2-3.el7                    docker-ce-stable 
	docker-ce.x86_64            3:19.03.1-3.el7                    docker-ce-stable 
	docker-ce.x86_64            3:19.03.0-3.el7                    docker-ce-stable 

3. 安装指定版本docker-ce [所有节点执行]

yum -y install docker-ce-19.03.12

mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "1024m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF

4. 安装yum插件,用来固定docker版本[所有节点执行]

yum -y install yum-plugin-versionlock
yum versionlock docker-ce

5. 配置docker自启动[所有节点执行]

mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl enable docker.service
systemctl start docker.service

四、[所有节点]kubeadm配置集群前的准备工作

1. 软件源设置

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum clean all && yum makecache

yum list kubelet --showduplicates|sort -r
	* kubelet.x86_64                       1.18.5-0                         kubernetes   # 使用这个版本
	kubelet.x86_64                       1.18.4-1                         kubernetes
	kubelet.x86_64                       1.18.4-0                         kubernetes
	kubelet.x86_64                       1.18.3-0                         kubernetes
	kubelet.x86_64                       1.18.2-0                         kubernetes
	kubelet.x86_64                       1.18.1-0                         kubernetes
	kubelet.x86_64                       1.18.0-0                         kubernetes

2. 所有节点安装kubelet,kubeadm, kubectl

# 这里安装版本1.18.5是最新版本, 也可以定版本安装
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

systemctl enable --now kubelet.service
systemctl status kubelet.service # 状态异常属于正常情况,等待kubeadm指定

3. 所有节点拉取kubernets镜像

kubeadm配置集群时使用的是google官方的进行,在国内会被墙。所以这里使用mirror仓库去下载然后再修改tag;有些镜像只需要master节点下载,其他worker节点不需要。这里统一所有节点都下载

# 查看需要的镜像
kubeadm config images list
	k8s.gcr.io/kube-apiserver:v1.18.5
	k8s.gcr.io/kube-controller-manager:v1.18.5
	k8s.gcr.io/kube-scheduler:v1.18.5
	k8s.gcr.io/kube-proxy:v1.18.5
	k8s.gcr.io/pause:3.2
	k8s.gcr.io/etcd:3.4.3-0
	k8s.gcr.io/coredns:1.6.7

docker pull daocloud.io/daocloud/kube-apiserver:v1.18.5
docker pull daocloud.io/daocloud/kube-controller-manager:v1.18.5
docker pull daocloud.io/daocloud/kube-scheduler:v1.18.5
docker pull daocloud.io/daocloud/kube-proxy:v1.18.5
docker pull daocloud.io/daocloud/pause:3.2
docker pull daocloud.io/daocloud/etcd:3.4.3-0
docker pull daocloud.io/daocloud/coredns:1.6.7

docker tag daocloud.io/daocloud/kube-apiserver:v1.18.5 k8s.gcr.io/kube-apiserver:v1.18.5
docker tag daocloud.io/daocloud/kube-controller-manager:v1.18.5 k8s.gcr.io/kube-controller-manager:v1.18.5
docker tag daocloud.io/daocloud/kube-scheduler:v1.18.5 k8s.gcr.io/kube-scheduler:v1.18.5
docker tag daocloud.io/daocloud/kube-proxy:v1.18.5 k8s.gcr.io/kube-proxy:v1.18.5
docker tag daocloud.io/daocloud/pause:3.2  k8s.gcr.io/pause:3.2
docker tag daocloud.io/daocloud/etcd:3.4.3-0  k8s.gcr.io/etcd:3.4.3-0
docker tag daocloud.io/daocloud/coredns:1.6.7  k8s.gcr.io/coredns:1.6.7

docker rmi daocloud.io/daocloud/kube-apiserver:v1.18.5       
docker rmi daocloud.io/daocloud/kube-controller-manager:v1.18.5
docker rmi daocloud.io/daocloud/kube-scheduler:v1.18.5
docker rmi daocloud.io/daocloud/kube-proxy:v1.18.5      
docker rmi daocloud.io/daocloud/pause:3.2       
docker rmi daocloud.io/daocloud/etcd:3.4.3-0           
docker rmi daocloud.io/daocloud/coredns:1.6.7 

#备注,可以只在一个节点上下载后save下来,然后copy到其他节点load。这样速度会快些
docker save -o kube-proxy_v1.13.0.tar k8s.gcr.io/kube-proxy:v1.13.0
docker save -o kube-apiserver_v1.13.0.tar k8s.gcr.io/kube-apiserver:v1.13.0
docker save -o kube-controller-manager_v1.13.0.tar k8s.gcr.io/kube-controller-manager:v1.13.0
docker save -o coredns_1.2.6.tar k8s.gcr.io/coredns:1.2.6
docker save -o etcd_3.2.24.tar k8s.gcr.io/etcd:3.2.24 
docker save -o pause_3.1.tar k8s.gcr.io/pause:3.1

scp to other nodes

for var in $(ls);do docker load < $var;done


docker images
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy                v1.18.5             a1daed4e2b60        12 days ago         117MB
k8s.gcr.io/kube-apiserver            v1.18.5             08ca24f16874        12 days ago         173MB
k8s.gcr.io/kube-controller-manager   v1.18.5             8d69eaf196dc        12 days ago         162MB
k8s.gcr.io/kube-scheduler            v1.18.5             39d887c6621d        12 days ago         95.3MB
k8s.gcr.io/pause                     3.2                 80d28bedfe5d        4 months ago        683kB
k8s.gcr.io/coredns                   1.6.7               67da37a9a360        5 months ago        43.8MB
k8s.gcr.io/etcd                      3.4.3-0             303ce5db0e90        8 months ago        288MB

五、多个Master节点部署Kubernetes(HA)

在第一个节点上执行,然后将其他2个master节点加入进来

vi kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.5
imageRepository: k8s.gcr.io
clusterName: ejucs-qiushi
certificatesDir: /etc/kubernetes/pki
apiServer:
  timeoutForControlPlane: 4m0s
  CertSANs:
  - "csapi.ejuops.com"
  - 10.99.12.200
  - 10.99.12.201
  - 10.99.12.202
  - 10.99.12.203
  - ejucsmaster-shqs-1
  - ejucsmaster-shqs-2
  - ejucsmaster-shqs-3
controlPlaneEndpoint: "csapi.ejuops.com:8443"
dns:
  type: CoreDNS
networking:
  dnsDomain: cluster.local
  podSubnet: "192.168.0.0/16"
  serviceSubnet: 10.96.0.0/12
kubeadm init --config kubeadm-config.yaml --upload-certs

Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

[other master node]
kubeadm join csapi.ejuops.com:8443 --token fjojag.9rpzhxhj0ur0ng3i \
    --discovery-token-ca-cert-hash sha256:4ee64fd8046d5ff478d07e1e2d72efe38efd5bfe984c0eade248733159703d1a \
    --control-plane --certificate-key f8d5fe845b7826297c76d756510844cb257caf695b5355d9d2709e5fe38e7b9f

[worker node]
kubeadm join csapi.ejuops.com:8443 --token fjojag.9rpzhxhj0ur0ng3i \
    --discovery-token-ca-cert-hash sha256:4ee64fd8046d5ff478d07e1e2d72efe38efd5bfe984c0eade248733159703d1a 

备注:因为这里选择使用参数--upload-certs 所以【2. 同步证书到其他master节点】不在需要操作

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

备注:如果后期需要添加新的节点,而kubeadm join的参数忘记,可以通过如下命令获取:

kubeadm token create --print-join-command

2. 同步证书到其他master节点

如果init的时候选择使用参数--upload-certs ,那么这一步不再需要操作,否则还是需要的。

# on master1
USER=root
CONTROL_PLANE_IPS="10.99.12.202 10.99.12.203"
for host in ${CONTROL_PLANE_IPS}; do
    scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt
    scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key
    scp /etc/kubernetes/admin.conf "${USER}"@$host:
done

# on master2,3
USER=root
mkdir -p /etc/kubernetes/pki/etcd
mv /${USER}/ca.crt /etc/kubernetes/pki/
mv /${USER}/ca.key /etc/kubernetes/pki/
mv /${USER}/sa.pub /etc/kubernetes/pki/
mv /${USER}/sa.key /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /${USER}/admin.conf /etc/kubernetes/admin.conf

# 在master2,3执行,加入集群
kubeadm join csapi.ejuops.com:8443 --token fjojag.9rpzhxhj0ur0ng3i \
    --discovery-token-ca-cert-hash sha256:4ee64fd8046d5ff478d07e1e2d72efe38efd5bfe984c0eade248733159703d1a \
    --control-plane --certificate-key f8d5fe845b7826297c76d756510844cb257caf695b5355d9d2709e5fe38e7b9f

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


# kubectl get nodes
NAME                 STATUS   ROLES    AGE     VERSION
ejucsmaster-shqs-1   Ready    master   15m     v1.18.5
ejucsmaster-shqs-2   Ready    master   4m31s   v1.18.5
ejucsmaster-shqs-3   Ready    master   3m13s   v1.18.5

六、配置网络插件Calico[其中一个master节点执行]

选择版本使用,这里选择当前最新版本3.15.1
[任意一个master节点]
kubectl apply -f  https://docs.projectcalico.org/v3.15/manifests/calico.yaml
# kubectl get pod -n kube-system 
NAME                                         READY   STATUS    RESTARTS   AGE
calico-kube-controllers-578894d4cd-xclh6     1/1     Running   0          9m46s
calico-node-2swcp                            1/1     Running   0          9m46s
calico-node-8z74g                            1/1     Running   0          6m58s
calico-node-hdsjc                            1/1     Running   0          8m16s
coredns-66bff467f8-4hh52                     1/1     Running   0          18m
coredns-66bff467f8-xn6c2                     1/1     Running   0          18m
etcd-ejucsmaster-shqs-1                      1/1     Running   0          18m
etcd-ejucsmaster-shqs-2                      1/1     Running   0          8m12s
etcd-ejucsmaster-shqs-3                      1/1     Running   0          6m56s
kube-apiserver-ejucsmaster-shqs-1            1/1     Running   0          18m
kube-apiserver-ejucsmaster-shqs-2            1/1     Running   0          8m12s
kube-apiserver-ejucsmaster-shqs-3            1/1     Running   0          6m56s
kube-controller-manager-ejucsmaster-shqs-1   1/1     Running   1          18m
kube-controller-manager-ejucsmaster-shqs-2   1/1     Running   0          8m12s
kube-controller-manager-ejucsmaster-shqs-3   1/1     Running   0          6m56s
kube-proxy-8t2n7                             1/1     Running   0          6m58s
kube-proxy-d4bh5                             1/1     Running   0          18m
kube-proxy-mvt49                             1/1     Running   0          8m16s
kube-scheduler-ejucsmaster-shqs-1            1/1     Running   1          18m
kube-scheduler-ejucsmaster-shqs-2            1/1     Running   0          8m12s
kube-scheduler-ejucsmaster-shqs-3            1/1     Running   0          6m56s

七、节点加入集群

worker节点加入集群

#[所有worker节点执行]
kubeadm join csapi.ejuops.com:8443 --token fjojag.9rpzhxhj0ur0ng3i \
    --discovery-token-ca-cert-hash sha256:4ee64fd8046d5ff478d07e1e2d72efe38efd5bfe984c0eade248733159703d1a  

This node has joined the cluster:
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
# kubectl get nodes
NAME                 STATUS   ROLES    AGE     VERSION
ejucsmaster-shqs-1   Ready    master   15m     v1.18.5
ejucsmaster-shqs-2   Ready    master   4m31s   v1.18.5
ejucsmaster-shqs-3   Ready    master   3m13s   v1.18.5
ejucsnode-shqs-1     Ready    <none>   104s    v1.18.5
ejucsnode-shqs-2     Ready    <none>   61s     v1.18.5
ejucsnode-shqs-3     Ready    <none>   27s     v1.18.5

[master]
kubectl label node ejucsnode-shqs-1 node-role.kubernetes.io/node=
kubectl label node ejucsnode-shqs-2 node-role.kubernetes.io/node=
kubectl label node ejucsnode-shqs-3 node-role.kubernetes.io/node=

# kubectl get nodes
NAME                 STATUS   ROLES    AGE     VERSION
ejucsmaster-shqs-1   Ready    master   17m     v1.18.5
ejucsmaster-shqs-2   Ready    master   6m57s   v1.18.5
ejucsmaster-shqs-3   Ready    master   5m39s   v1.18.5
ejucsnode-shqs-1     Ready    node     4m10s   v1.18.5
ejucsnode-shqs-2     Ready    node     3m27s   v1.18.5
ejucsnode-shqs-3     Ready    node     2m53s   v1.18.5

# kubectl get nodes --show-labels
NAME                 STATUS   ROLES    AGE   VERSION   LABELS
ejucsmaster-shqs-1   Ready    master   9h    v1.18.5   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=ejucsmaster-shqs-1,kubernetes.io/os=linux,node-role.kubernetes.io/master=
ejucsmaster-shqs-2   Ready    master   8h    v1.18.5   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=ejucsmaster-shqs-2,kubernetes.io/os=linux,node-role.kubernetes.io/master=
ejucsmaster-shqs-3   Ready    master   8h    v1.18.5   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=ejucsmaster-shqs-3,kubernetes.io/os=linux,node-role.kubernetes.io/master=
ejucsnode-shqs-1     Ready    node     8h    v1.18.5   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=ejucsnode-shqs-1,kubernetes.io/os=linux,node-role.kubernetes.io/node=
ejucsnode-shqs-2     Ready    node     8h    v1.18.5   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=ejucsnode-shqs-2,kubernetes.io/os=linux,node-role.kubernetes.io/node=
ejucsnode-shqs-3     Ready    node     8h    v1.18.5   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=ejucsnode-shqs-3,kubernetes.io/os=linux,node-role.kubernetes.io/node=

八、示例演示

这个示例是网上找到的,好像是一本kubernetes书籍上。这里就是做个演示。一个tomcat应用+mysql数据库。

# cat myweb.yml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myweb
spec:
  replicas: 2
  selector:
    matchLabels:
      app: myweb
  template:
    metadata:
      labels:
        app: myweb
    spec:
      containers:
      - name: myweb
        image: kubeguide/tomcat-app:v1
        ports:
        - containerPort: 8080
        env:
        - name: MYSQL_SERVICE_HOST
          value: 'mysql'
        - name: MYSQL_SERVICE_PORT
          value: '3306'

---
apiVersion: v1
kind: Service
metadata:
  name: myweb
spec:
  type: NodePort
  ports:
  - port: 8080
    nodePort: 30001
  selector:
    app: myweb

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - name: mysql
        image: mysql:5.7
        ports:
        - containerPort: 3306 
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "123456"

---
apiVersion: v1
kind: Service
metadata:
  name: mysql
spec:
  ports:
  - port: 3306
  selector:
    app: mysql
kubectl apply -f mysql.yaml

# kubectl get deployment -n default
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
mysql   1/1     1            1           3m
myweb   2/2     2            2           3m

# kubectl get pods -n default
NAME                     READY   STATUS    RESTARTS   AGE
mysql-76999dd7c8-l96w9   1/1     Running   0          3m
myweb-76c7df5b6c-hjw7p   1/1     Running   0          3m
myweb-76c7df5b6c-zdzbb   1/1     Running   0          3m

# kubectl get svc -n default
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP          2h
mysql        ClusterIP   10.109.235.163   <none>        3306/TCP         4m
myweb        NodePort    10.96.26.71      <none>        8080:30001/TCP   4m

访问该演示实例

在这里插入图片描述

九、Continuing

到这里kubernetes HA集群部署完了,基本可以用了。但是还是产线使用还是缺很多东西的,例如共存存储卷,监控,性能收集,日志解决方案等等

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值