一起来学k8s 39. 利用kubeadm搭建kubernetes高可用集群

利用kubeadm搭建kubernetes高可用集群

不得不说的kubeadm

kubeadm部署很方便,但是是一个老外写的,使用staticPod(容器)运行的管理组件,镜像都是gcr.io域名仓库里的。
域名仓库很多docker的人甚至都不知道,docker镜像命名规则是域名/库名/img_name:tag这种形式,dockerhub上要拉取镜像直接是库名/img_name:tag这种名字,是因为域名缺省是docker.io也就是dockerhub上看到的都是这个域名仓库的
常见的域名仓库国外有gcr.io,quay.io,国内的阿里(registry.cn-hangzhou.aliyuncs.com,hangzhou以外还有shenzhen啥的),daocloud.io等等.gcr.io因为位置在国外会拉取不到.国内阿里仓库同步了gcr.io/google_containers这些镜像,

总有人认为kubeadm的容器运行没有二进制运行放心.容器本身就是个隔离受限的进程,另外管理组件都是无状态的,但是他们总感觉不放心。
事实上除了kubelet以外所有组件都可以用容器方式运行,管理组件简单说下就是集群数据存放etcd数据库里,apiserver去和etcd交互,其他组件和apiserver交互,kubelet调用api去操作docker,其中一些组件也会去操作各个节点的系统设置

环境准备

在所有节点操作

主机划分

系统CentOS 7.6+以上,最好不要使用centos7.5以及一下,容器技术依赖于内核技术,低版本系统部署和运行后可能问题会非常多。

cat /etc/hosts

127.0.0.1 apiserver.k8s.local
192.168.33.101 master01
192.168.33.102 master02
192.168.33.103 master03
192.168.33.201 node01
192.168.33.202 node02
192.168.33.203 node03
IPHostname内核CPUMemory
192.168.33.101master013.10.0-106224G
192.168.33.102master023.10.0-106224G
192.168.33.103master033.10.0-106224G
192.168.33.201node013.10.0-106224G
192.168.33.202node023.10.0-106224G
192.168.33.203node033.10.0-106224G
  • kubeadm好像要求最低配置2c2g还是多少来着
  • 所有操作全部用root使用者进行,系统盘根目录一定要大,不然到时候镜像多了例如到了85%会被gc回收镜像
  • 高可用一般建议大于等于3台的奇数台,使用3台master来做高可用

关闭防火墙,swap,selinux

#关闭防火墙
systemctl disable --now firewalld NetworkManager

#关闭swap
swapoff -a
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

#关闭selinux
setenforce 0
sed -ri '/^[^#]*SELINUX=/s#=.+$#=disabled#' /etc/selinux/config

yum准备

yum install epel-release -y
yum install -y \
    curl \
    wget \
    git \
    conntrack-tools \
    psmisc \
    nfs-utils \
    jq \
    tree \
    socat \
    bash-completion \
    ipset \
    ipvsadm \
    conntrack \
    libseccomp \
    net-tools \
    crontabs \
    sysstat \
    unzip \
    iftop \
    nload \
    strace \
    bind-utils \
    tcpdump \
    telnet \
    lsof \
    htop

加载ipvs

:> /etc/modules-load.d/ipvs.conf
module=(
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
br_netfilter
  )
for kernel_module in ${module[@]};do
    /sbin/modinfo -F filename $kernel_module |& grep -qv ERROR && echo $kernel_module >> /etc/modules-load.d/ipvs.conf || :
done
systemctl daemon-reload
systemctl enable --now systemd-modules-load.service
$ lsmod | grep ip_vs
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  11 
ip_vs                 145497  17 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133095  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              12644  3 ip_vs,nf_nat,nf_conntrack

k8s系统参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
vm.overcommit_memory=1
vm.panic_on_oom=0
vm.swappiness = 0
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
EOF

sysctl --system

文件最大数

cat>/etc/security/limits.d/kubernetes.conf<<EOF
*       soft    nproc   131072
*       hard    nproc   131072
*       soft    nofile  131072
*       hard    nofile  131072
root    soft    nproc   131072
root    hard    nproc   131072
root    soft    nofile  131072
root    hard    nofile  131072
EOF

docker 安装

docker yum
cd /etc/yum.repos.d/  &&  wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
官方脚本检查

docker官方的内核检查脚本建议(RHEL7/CentOS7: User namespaces disabled; add 'user_namespace.enable=1' to boot command line)

grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"

#然后重启
reboot
docker安装
yum install docker-ce -y
配置docker
cp /usr/share/bash-completion/completions/docker /etc/bash_completion.d/
mkdir -p /etc/docker/
cat>/etc/docker/daemon.json<<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": ["https://2lefsjdg.mirror.aliyuncs.com"],
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file": "3"
  }
}
EOF
启动docker
systemctl enable --now docker

kubeadm部署

kubeadm yum

在所有节点操作

cat <<EOF >/etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
EOF

maser节点安装

yum install -y \
    kubeadm-1.16.4 \
    kubectl-1.16.4 \
    kubelet-1.16.4 \
    --disableexcludes=kubernetes && \
    systemctl enable kubelet

node节点安装

yum install -y \
    kubeadm-1.16.4 \
    kubelet-1.16.4 \
    --disableexcludes=kubernetes && \
    systemctl enable kubelet

master高可用

在所有节点部署一个nginx容器

mkdir -p /etc/kubernetes

cat > /etc/kubernetes/nginx.conf << EOF
user nginx nginx;
worker_processes auto;
events {
    worker_connections  20240;
    use epoll;
}
error_log /var/log/nginx_error.log info;

stream {
    upstream kube-servers {
        hash $remote_addr consistent;
        server master01:6443 weight=5 max_fails=1 fail_timeout=3s;
        server master02:6443 weight=5 max_fails=1 fail_timeout=3s;
        server master03:6443 weight=5 max_fails=1 fail_timeout=3s;
    }

    server {
        listen 8443 reuseport;
        proxy_connect_timeout 3s;
        proxy_timeout 3000s;
        proxy_pass kube-servers;
    }
}
EOF
docker run --restart=always \
    -v /etc/kubernetes/nginx.conf:/etc/nginx/nginx.conf \
    -v /etc/localtime:/etc/localtime:ro \
    --name k8sHA \
    --net host \
    -d \
    nginx

kubeadm配置文件

在master01节点操作

cat > /root/initconfig.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
imageRepository: gcr.azk8s.cn/google_containers
kubernetesVersion: v1.16.4
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
networking: 
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
controlPlaneEndpoint: apiserver.k8s.local:8443
apiServer:
  timeoutForControlPlane: 4m0s
  extraArgs:
    authorization-mode: "Node,RBAC"
    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,Priority,PodPreset"
    runtime-config: api/all,settings.k8s.io/v1alpha1=true
    storage-backend: etcd3
    etcd-servers: https://192.168.33.101:2379,https://192.168.33.102:2379,https://192.168.33.103:2379
  certSANs:
  - 10.96.0.1
  - 127.0.0.1
  - localhost
  - apiserver.k8s.local
  - 192.168.33.101
  - 192.168.33.102
  - 192.168.33.103
  - master01
  - master02
  - master03
  - master
  - kubernetes
  - kubernetes.default 
  - kubernetes.default.svc 
  - kubernetes.default.svc.cluster.local
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    readOnly: true
controllerManager:
  extraArgs:
    bind-address: "0.0.0.0"
    experimental-cluster-signing-duration: 867000h
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    readOnly: true
scheduler: 
  extraArgs:
    bind-address: "0.0.0.0"
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    readOnly: true
dns:
  type: CoreDNS
  imageRepository: dockerhub.azk8s.cn/coredns
  imageTag: 1.6.3
etcd:
  local:
    imageRepository: quay.azk8s.cn/coreos
    imageTag: v3.3.17
    dataDir: /var/lib/etcd
    serverCertSANs:
    - master
    - 192.168.33.101
    - 192.168.33.102
    - 192.168.33.103
    - master01
    - master02
    - master03
    peerCertSANs:
    - master
    - 192.168.33.101
    - 192.168.33.102
    - 192.168.33.103
    - master01
    - master02
    - master03
    extraArgs:
      auto-compaction-retention: "1h"
      max-request-bytes: "33554432"
      quota-backend-bytes: "8589934592"
      enable-v2: "false"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
ipvs:
  excludeCIDRs: null
  minSyncPeriod: 0s
  scheduler: "rr"
  strictARP: false
  syncPeriod: 15s
iptables:
  masqueradeAll: true
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
failSwapOn: true
EOF

检查文件是否错误,忽略warning,错误的话会抛出error,没错则会输出到包含字符串kubeadm join xxx啥的

kubeadm init --config /root/initconfig.yaml --dry-run

预先拉取镜像

kubeadm config images pull --config /root/initconfig.yaml

部署master

在master01节点操作

kubeadm init --config /root/initconfig.yaml --upload-certs

...
...
...
You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join apiserver.k8s.local:8443 --token 8lmdqu.cqe8r0rxa0056vmm \
    --discovery-token-ca-cert-hash sha256:5ca87fff6b414a0872ab5452972d7e36e4bad7ab3a0bc385abe0138ce671eabb \
    --control-plane --certificate-key 7a1d432b2834464a82fd7cba0e9e5d8409c492cf9a4ee6328fb4f84b6a78934a

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use 
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join apiserver.k8s.local:8443 --token 8lmdqu.cqe8r0rxa0056vmm \
    --discovery-token-ca-cert-hash sha256:5ca87fff6b414a0872ab5452972d7e36e4bad7ab3a0bc385abe0138ce671eabb 

复制kubectl的kubeconfig,kubectl的kubeconfig路径默认是~/.kube/config

mkdir -p $HOME/.kube
sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

init的yaml信息实际上会存在集群的configmap里,我们可以随时查看,该yaml在其他node和master join的时候会使用到

kubectl -n kube-system get cm kubeadm-config -o yaml
设置ep的rbac

kube-apiserver的web健康检查路由有权限,我们需要开放用来监控或者对接SLB的健康检查

cat > /root/healthz-rbac.yml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: healthz-reader
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: healthz-reader
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:authenticated
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:unauthenticated
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: healthz-reader
rules:
- nonResourceURLs: ["/healthz", "/healthz/*"]
  verbs: ["get", "post"]
EOF
kubectl apply -f /root/healthz-rbac.yml 
配置其他master的k8s管理组件
for node in 192.168.33.102 192.168.33.103;do
    ssh $node 'mkdir -p /etc/kubernetes/pki/etcd'
    scp -r /root/initconfig.yaml $node:/root/initconfig.yaml
    scp -r /etc/kubernetes/pki/ca.* $node:/etc/kubernetes/pki/
    scp -r /etc/kubernetes/pki/sa.* $node:/etc/kubernetes/pki/
    scp -r /etc/kubernetes/pki/front-proxy-ca.* $node:/etc/kubernetes/pki/
    scp -r /etc/kubernetes/pki/etcd/ca.* $node:/etc/kubernetes/pki/etcd/
done
其他master join进来
kubeadm config images pull --config /root/initconfig.yaml
kubeadm join apiserver.k8s.local:8443 --token 8lmdqu.cqe8r0rxa0056vmm \
    --discovery-token-ca-cert-hash sha256:5ca87fff6b414a0872ab5452972d7e36e4bad7ab3a0bc385abe0138ce671eabb \
    --control-plane --certificate-key 7a1d432b2834464a82fd7cba0e9e5d8409c492cf9a4ee6328fb4f84b6a78934a
所有master配置kubectl
准备kubectl的kubeconfig
mkdir -p $HOME/.kube
sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
设置kubectl的补全脚本
kubectl completion bash > /etc/bash_completion.d/kubectl
source  /etc/bash_completion.d/kubectl
所有master配置etcdctl

复制出容器里的etcdctl

docker cp `docker ps -a | awk '/k8s_etcd/{print $1}'`:/usr/local/bin/etcdctl /usr/local/bin/etcdctl
cat >/etc/profile.d/etcd.sh<<'EOF'
ETCD_CERET_DIR=/etc/kubernetes/pki/etcd/
ETCD_CA_FILE=ca.crt
ETCD_KEY_FILE=healthcheck-client.key
ETCD_CERT_FILE=healthcheck-client.crt
ETCD_EP=https://192.168.33.101:2379,https://192.168.33.102:2379,https://192.168.33.103:2379

alias etcd_v3="ETCDCTL_API=3 \
    etcdctl   \
   --cert ${ETCD_CERET_DIR}/${ETCD_CERT_FILE} \
   --key ${ETCD_CERET_DIR}/${ETCD_KEY_FILE} \
   --cacert ${ETCD_CERET_DIR}/${ETCD_CA_FILE} \
   --endpoints $ETCD_EP"
EOF
source  /etc/profile.d/etcd.sh 
etcd_v3 endpoint status --write-out=table

+-----------------------------+------------------+---------+---------+-----------+-----------+------------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+-----------------------------+------------------+---------+---------+-----------+-----------+------------+
| https://192.168.33.101:2379 | c724c500884441af |  3.3.17 |  1.6 MB |      true |         7 |       1865 |
| https://192.168.33.102:2379 | 3dcceec24ad5c5d4 |  3.3.17 |  1.6 MB |     false |         7 |       1865 |
| https://192.168.33.103:2379 | bc21062efb4a5d4c |  3.3.17 |  1.5 MB |     false |         7 |       1865 |
+-----------------------------+------------------+---------+---------+-----------+-----------+------------+

etcd_v3 endpoint health --write-out=table
+-----------------------------+--------+-------------+-------+
|          ENDPOINT           | HEALTH |    TOOK     | ERROR |
+-----------------------------+--------+-------------+-------+
| https://192.168.33.103:2379 |   true | 19.288026ms |       |
| https://192.168.33.102:2379 |   true |   19.2603ms |       |
| https://192.168.33.101:2379 |   true | 22.490443ms |       |
+-----------------------------+--------+-------------+-------+

配置etcd备份脚本

mkdir -p /opt/etcd
cat>/opt/etcd/etcd_cron.sh<<'EOF'
#!/bin/bash
set -e

export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin

:  ${bak_dir:=/root/} #缺省备份目录,可以修改成存在的目录
:  ${cert_dir:=/etc/kubernetes/pki/etcd/}
:  ${endpoints:=https://192.168.33.101:2379,https://192.168.33.102:2379,https://192.168.33.103:2379}

bak_prefix='etcd-'
cmd_suffix='date +%Y-%m-%d-%H:%M'
bak_suffix='.db'

#将规范化后的命令行参数分配至位置参数($1,$2,...)
temp=`getopt -n $0 -o c:d: -u -- "$@"`

[ $? != 0 ] && {
    echo '
Examples:
  # just save once
  bash $0 /tmp/etcd.db
  # save in contab and  keep 5
  bash $0 -c 5
    '
    exit 1
    }
set -- $temp


# -c 备份保留副本数量
# -d 指定备份存放目录

while true;do
    case "$1" in
        -c)
            [ -z "$bak_count" ] && bak_count=$2
            printf -v null %d "$bak_count" &>/dev/null || \
                { echo 'the value of the -c must be number';exit 1; }
            shift 2
            ;;
        -d)
            [ ! -d "$2" ] && mkdir -p $2
            bak_dir=$2
            shift 2
            ;;
         *)
            [[ -z "$1" || "$1" == '--' ]] && { shift;break; }
            echo "Internal error!"
            exit 1
            ;;
    esac
done



function etcd_v3(){

    ETCDCTL_API=3 etcdctl   \
       --cert $cert_dir/healthcheck-client.crt \
       --key  $cert_dir/healthcheck-client.key \
       --cacert $cert_dir/ca.crt \
       --endpoints $endpoints $@
}

etcd::cron::save(){
    cd $bak_dir/
    etcd_v3 snapshot save  $bak_prefix$($cmd_suffix)$bak_suffix
    rm_files=`ls -t $bak_prefix*$bak_suffix | tail -n +$[bak_count+1]`
    if [ -n "$rm_files" ];then
        rm -f $rm_files
    fi
}

main(){
    [ -n "$bak_count" ] && etcd::cron::save || etcd_v3 snapshot save $@
}

main $@
EOF

crontab -e添加下面内容自动保留四个备份副本

0 0 * * * bash /opt/etcd/etcd_cron.sh  -c 4 -d /opt/etcd/ &>/dev/null

部署node

在node节点执行

和master的join一样,提前准备好环境和docker,然后join的时候不需要带--control-plane

kubeadm join apiserver.k8s.local:8443 --token 8lmdqu.cqe8r0rxa0056vmm \
    --discovery-token-ca-cert-hash sha256:5ca87fff6b414a0872ab5452972d7e36e4bad7ab3a0bc385abe0138ce671eabb

打标签

role只是一个label,可以打label,想显示啥就node-role.kubernetes.io/xxxx

[root@master01 ~]# kubectl get nodes
NAME       STATUS     ROLES    AGE   VERSION
master01   NotReady   master   17m   v1.16.4
master02   NotReady   master   14m   v1.16.4
master03   NotReady   master   13m   v1.16.4
node01     NotReady   <none>   24s   v1.16.4
node02     NotReady   <none>   18s   v1.16.4
node03     NotReady   <none>   11s   v1.16.4
[root@master01 ~]# kubectl label node node01 node-role.kubernetes.io/node=""
node/node01 labeled
[root@master01 ~]# kubectl label node node02 node-role.kubernetes.io/node=""
node/node02 labeled
[root@master01 ~]# kubectl label node node03 node-role.kubernetes.io/node=""
node/node03 labeled

[root@master01 ~]# kubectl get nodes 
NAME       STATUS     ROLES    AGE     VERSION
master01   NotReady   master   25m     v1.16.4
master02   NotReady   master   22m     v1.16.4
master03   NotReady   master   21m     v1.16.4
node01     NotReady   node     8m      v1.16.4
node02     NotReady   node     7m54s   v1.16.4
node03     NotReady   node     7m47s   v1.16.4

部署网络插件Calico

没有网络插件,所有节点都是notready

在master01上操作

curl https://docs.projectcalico.org/v3.11/manifests/calico.yaml -O
sed -i -e "s?192.168.0.0/16?10.244.0.0/16?g" calico.yaml
kubectl apply -f calico.yaml

测试

验证集群可用性

最基本的3master3node集群搭建完成了,必须有

  • 3个 kube-apiserver
  • 3个 kube-controller-manager
  • 3个 kube-scheduler
  • 6个 kube-proxy
  • 6个 calico-node
  • 1个 calico-kube-controllers
  • 2个 core-dns
[root@master01 ~]# kubectl get pods --all-namespaces 
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-648f4868b8-6pcqf   1/1     Running   0          2m10s
kube-system   calico-node-d4hqw                          1/1     Running   0          2m10s
kube-system   calico-node-glmcl                          1/1     Running   0          2m10s
kube-system   calico-node-qm8zz                          1/1     Running   0          2m10s
kube-system   calico-node-s64r9                          1/1     Running   0          2m10s
kube-system   calico-node-shxhv                          1/1     Running   0          2m10s
kube-system   calico-node-zx7nw                          1/1     Running   0          2m10s
kube-system   coredns-7b8f8b6cf6-kh22h                   1/1     Running   0          14m
kube-system   coredns-7b8f8b6cf6-vp9x6                   1/1     Running   0          14m
kube-system   etcd-master01                              1/1     Running   0          35m
kube-system   etcd-master02                              1/1     Running   0          33m
kube-system   etcd-master03                              1/1     Running   0          32m
kube-system   kube-apiserver-master01                    1/1     Running   0          35m
kube-system   kube-apiserver-master02                    1/1     Running   0          33m
kube-system   kube-apiserver-master03                    1/1     Running   0          31m
kube-system   kube-controller-manager-master01           1/1     Running   1          34m
kube-system   kube-controller-manager-master02           1/1     Running   0          33m
kube-system   kube-controller-manager-master03           1/1     Running   0          31m
kube-system   kube-proxy-2zbx4                           1/1     Running   0          32m
kube-system   kube-proxy-bbvqk                           1/1     Running   0          19m
kube-system   kube-proxy-j8899                           1/1     Running   0          33m
kube-system   kube-proxy-khrw5                           1/1     Running   0          19m
kube-system   kube-proxy-srpz9                           1/1     Running   0          19m
kube-system   kube-proxy-tz24q                           1/1     Running   0          36m
kube-system   kube-scheduler-master01                    1/1     Running   1          35m
kube-system   kube-scheduler-master02                    1/1     Running   0          33m
kube-system   kube-scheduler-master03                    1/1     Running   0          31m

[root@master01 ~]# kubectl get  nodes
NAME       STATUS   ROLES    AGE   VERSION
master01   Ready    master   37m   v1.16.4
master02   Ready    master   34m   v1.16.4
master03   Ready    master   33m   v1.16.4
node01     Ready    node     19m   v1.16.4
node02     Ready    node     19m   v1.16.4
node03     Ready    node     19m   v1.16.4

demo

cat<<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx
spec:
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - image: nginx:alpine
        name: nginx
        ports:
        - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx
spec:
  selector:
    app: nginx
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: busybox
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF
[root@master01 ~]# kubectl get all  -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP               NODE     NOMINATED NODE   READINESS GATES
pod/busybox                  1/1     Running   0          73s   10.244.186.194   node03   <none>           <none>
pod/nginx-5c559d5697-24zck   1/1     Running   0          73s   10.244.186.193   node03   <none>           <none>

NAME                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE   SELECTOR
service/kubernetes   ClusterIP   10.96.0.1      <none>        443/TCP   42m   <none>
service/nginx        ClusterIP   10.111.219.3   <none>        80/TCP    73s   app=nginx

NAME                    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES         SELECTOR
deployment.apps/nginx   1/1     1            1           73s   nginx        nginx:alpine   app=nginx

NAME                               DESIRED   CURRENT   READY   AGE   CONTAINERS   IMAGES         SELECTOR
replicaset.apps/nginx-5c559d5697   1         1         1       73s   nginx        nginx:alpine   app=nginx,pod-template-hash=5c559d5697
验证集群dns
[root@master01 ~]# kubectl exec -ti busybox -- nslookup kubernetes
Server:		10.96.0.10
Address:	10.96.0.10#53

Name:	kubernetes.default.svc.cluster.local
Address: 10.96.0.1
测试nginx是否通

在master上curl nginx的pod的ip出现nginx的index内容即集群正常

[root@master01 ~]# curl 10.244.186.193
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

在master上curl nginx的svc的ip出现nginx的index内容即集群正常

[root@master01 ~]# curl 10.111.219.3
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

pod里验证集群域名到pod是否正常

[root@master01 ~]# kubectl exec -ti busybox -- curl nginx
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

[root@master01 ~]# kubectl exec -ti busybox -- nslookup nginx
Server:		10.96.0.10
Address:	10.96.0.10#53

Name:	nginx.default.svc.cluster.local
Address: 10.111.219.3
ipvs验证
[root@node01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 192.168.33.101:6443          Masq    1      1          0         
  -> 192.168.33.102:6443          Masq    1      0          0         
  -> 192.168.33.103:6443          Masq    1      1          0         
TCP  10.96.0.10:53 rr
  -> 10.244.140.65:53             Masq    1      0          0         
  -> 10.244.140.67:53             Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 10.244.140.65:9153           Masq    1      0          0         
  -> 10.244.140.67:9153           Masq    1      0          0         
TCP  10.111.219.3:80 rr
  -> 10.244.186.193:80            Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 10.244.140.65:53             Masq    1      0          0         
  -> 10.244.140.67:53             Masq    1      0          0         
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值