Kubeadm部署Kubernetes集群

 

一  环境准备   

1) 环境列表

主机名

IPV4

应用

系统版本

容量

数据分区

k8s-master1  172.18.3.200 master/haproxyCentos7.650GB/data
k8s-master2  172.18.3.201 master/haproxyCentos7.650GB/data
k8s-master3 172.18.3.207masterCentos7.650GB/data
k8s-node1    172.18.3.202 node/etcdCentos7.650GB/data
k8s-node2    172.18.3.203 node/etcdCentos7.650GB/data
k8s-node3    172.18.3.204 node/etcdCentos7.650GB/data
k8s-ingress172.18.3.205 master/node/ingressCentos7.650GB/data
 172.18.3.206haproxy/keepalived/VIPCentos7.6  
2)  环境初始化

2.1 配置hosts文件

# cat /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

172.18.3.200 k8s-master1

172.18.3.201 k8s-master2

172.18.3.207 k8s-master3

172.18.3.202 k8s-node1

172.18.3.203 k8s-node2

172.18.3.204 k8s-node3

172.18.3.205 k8s-ingress

2.2 关闭防火墙

# systemctl disable firewalld

Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.

Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

2.3 关闭Selinux

setenforce 0

sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

2.4 关闭Swap

swapoff -a

yes cp /etc/fstab /etc/fstab_bak

cat /etc/fstab_bak |grep -v swap > /etc/fstab

2.5 时间同步

systemctl enable ntpdate.service

echo '*/30 * * * * /usr/sbin/ntpdate time7.aliyun.com >/dev/null 2>&1' /tmp/crontab2.tmp

crontab /tmp/crontab2.tmp

systemctl start ntpdate.service

 

ntpdate -u ntp.api.bz

2.6 文件描述符

echo "* soft nofile 65536" >> /etc/security/limits.conf

echo "* hard nofile 65536" >> /etc/security/limits.conf

echo "* soft nproc 65536"  >> /etc/security/limits.conf

echo "* hard nproc 65536"  >> /etc/security/limits.conf

echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf

echo "* hard memlock  unlimited"  >> /etc/security/limits.conf

2.7 内核参数设置

[root@localhost ~]# cat /etc/sysctl.conf

net.ipv6.conf.all.disable_ipv6 = 1

net.ipv6.conf.default.disable_ipv6 = 1

net.ipv6.conf.lo.disable_ipv6 = 1

vm.swappiness = 0

net.ipv4.neigh.default.gc_stale_time=120

net.ipv4.ip_forward = 1

# see details in https://help.aliyun.com/knowledge_detail/39428.html

net.ipv4.conf.all.rp_filter=0

net.ipv4.conf.default.rp_filter=0

net.ipv4.conf.default.arp_announce = 2

net.ipv4.conf.lo.arp_announce=2

net.ipv4.conf.all.arp_announce=2

# see details in https://help.aliyun.com/knowledge_detail/41334.html

net.ipv4.tcp_max_tw_buckets = 5000

net.ipv4.tcp_syncookies = 1

net.ipv4.tcp_max_syn_backlog = 1024

net.ipv4.tcp_synack_retries = 2

kernel.sysrq = 1

# iptables透明网桥的实现

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

net.bridge.bridge-nf-call-arptables = 1

 

 

modprobe br_netfilter

sysctl -p

2.8 配置k8s yum 源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

2.9 配置Dockeryum源

cd /etc/yum.repos.d

wget https://download.docker.com/linux/centos/docker-ce.repo

2.10 配置节点免密登录

ssh-keygen

ssh-copy-id -i ~/.ssh/id_rsa.pub  用户名字@192.168.x.xxx

2.11 安装依赖

yum install -y epel-release

yum install -y yum-utils device-mapper-persistent-data lvm2 net-tools conntrack-tools wget vim  ntpdate libseccomp libtool-ltdl lrzsz wget

2.12 配置ipvs模块

cat /etc/sysconfig/modules/ipvs.modules <<EOF

#!/bin/bash

modprobe -- ip_vs

modprobe -- ip_vs_rr

modprobe -- ip_vs_wrr

modprobe -- ip_vs_sh

modprobe -- nf_conntrack_ipv4

EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

 

yum install ipset ipvsadm -y

2.13 安装master 和node 节点安装 kubelet kubeadm kubectl

yum install -y kubelet kubeadm kubectl 

systemctl enable kubelet

二  安装Docker

1 )安装docker

yum install -y docker-ce

2 )设置开机自启动

systemctl daemon-reload

systemctl enable docker

systemctl start docker

三 安装Kubernetes

1 )创建haproxy配置文件(master节点都需要)

1.1 master  haproxy配置文件

cat >/etc/haproxy/haproxy.cfg<<EOF

global

  log 127.0.0.1 local0 err

  maxconn 5000

  uid 99

  gid 99

  #daemon

  nbproc 1

  pidfile haproxy.pid

 

defaults

  mode http

  log 127.0.0.1 local0 err

  maxconn 5000

  retries 3

  timeout connect 5s

  timeout client 30s

  timeout server 30s

  timeout check 2s

 

listen admin_stats

  mode http

  bind 0.0.0.0:1080

  log 127.0.0.1 local0 err

  stats refresh 30s

  stats uri     /haproxy-status

  stats realm   Haproxy\ Statistics

  stats auth    will:will

  stats hide-version

  stats admin if TRUE

 

frontend k8s-https

  bind 0.0.0.0:8443

  mode tcp

  #maxconn 50000

  default_backend k8s-https

 

backend k8s-https

  mode tcp

  balance roundrobin

  server k8s-master1 172.18.3.200:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3

  server k8s-master2 172.18.3.201:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3

  server k8s-master3 172.18.3.207:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3

 

EOF

1.2 安装haproxy编写配置文件(master节点都需要)

yum install -y haproxy

 

 

# cat /etc/haproxy/haproxy.cfg

global

  log 127.0.0.1 local0 err

  maxconn 5000

  uid 99

  gid 99

  #daemon

  nbproc 1

  pidfile haproxy.pid

 

defaults

  mode http

  log 127.0.0.1 local0 err

  maxconn 5000

  retries 3

  timeout connect 5s

  timeout client 30s

  timeout server 30s

  timeout check 2s

 

listen admin_stats

  mode http

  bind 0.0.0.0:1080

  log 127.0.0.1 local0 err

  stats refresh 30s

  stats uri     /haproxy-status

  stats realm   Haproxy\ Statistics

  stats auth    will:will

  stats hide-version

  stats admin if TRUE

 

frontend k8s-https

  bind 0.0.0.0:8443

  mode tcp

  #maxconn 50000

  default_backend k8s-https

 

backend k8s-https

  mode tcp

  balance roundrobin

  server k8s-master1 172.18.3.200:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3

  server k8s-master2 172.18.3.201:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3

  server k8s-master3 172.18.3.207:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3

2 )安装配置Keepalived(master节点都需要)

1.2 安装keepalived

yum install -y keepalived

2.2 配置keepalived配置文件

注意:master 配置100 master2 配置90

# cat /etc/keepalived/keepalived.conf

global_defs

   notification_email_from Alexandre.Cassen@firewall.loc

   smtp_server 127.0.0.1

   smtp_connect_timeout 30

   router_id LVS_1

}

 

vrrp_instance VI_1 {

    state MASTER         

    interface eth0

    lvs_sync_daemon_inteface eth0

    virtual_router_id 88

    advert_int 1

    priority 100        

    authentication {

        auth_type PASS

        auth_pass 1111

    }

    virtual_ipaddress {

      172.18.3.206/24

    }

}

2.3 启动haproxy和keepalived

systemctl enable keepalived && systemctl start keepalived

systemctl enable haproxy && systemctl start haproxy

3 ) 初始化集群

3.1 集群初始化

kubeadm init --config kubeadm-init.yaml

 

 

 

 

# cat kubeadm-init.yaml

apiVersion: kubeadm.k8s.io/v1beta2

kind: InitConfiguration

bootstrapTokens:

- token: "sh111a.vn43bivdu1tgpc9n"

  description: "kubeadm bootstrap token"

  ttl: "24h"

- token: "sns23j.9zvpnzyera5qdpoh"

  description: "another bootstrap token"

  usages:

  - authentication

  - signing

  groups:

  - system:bootstrappers:kubeadm:default-node-token

nodeRegistration:

  name: "k8s-master1"

  criSocket: "/var/run/dockershim.sock"

  taints:

  - key: "kubeadmNode"

    value: "master"

    effect: "NoSchedule"

  kubeletExtraArgs:

    cgroup-driver: "cgroupfs"

  ignorePreflightErrors:

  - IsPrivilegedUser

localAPIEndpoint:

  advertiseAddress: "172.18.3.200"

  bindPort: 6443

certificateKey: "5adtboomz5m8rm8lz44wzaxuathaq84oi7hlyp68kqxfzr30y4todwf1abxsei5e"

---

apiVersion: kubeadm.k8s.io/v1beta2

kind: ClusterConfiguration

etcd:

  # one of local or external

  local:

    imageRepository: "k8s.gcr.io"

    imageTag: "3.2.24"

    dataDir: "/var/lib/etcd"

    external:

     endpoints:

     "https://172.18.3.200:2379"

     "https://172.18.3.201:2379"

     "https://172.18.3.207:2379"

     caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt"

     certFile: "/etcd/kubernetes/pki/etcd/etcd.crt"

     keyFile: "/etcd/kubernetes/pki/etcd/etcd.key"

networking:

  podSubnet: "10.20.0.0/24"

  dnsDomain: "cluster.local"

kubernetesVersion: "v1.15.2"

controlPlaneEndpoint: "172.18.3.206:8443"

apiServer:

  extraArgs:

    authorization-mode: "Node,RBAC"

  certSANs:

  '127.0.0.1'

  - localhost

  '172.18.3.200'

  '172.18.3.201'

  '172.18.3.202'

  '172.18.3.203'

  '172.18.3.204'

  '172.18.3.205'

  '172.18.3.206'

  '172.18.3.207'

  timeoutForControlPlane: 4m0s

controllerManager:

  extraArgs:

    address: "0.0.0.0"

scheduler:

  extraArgs:

    address: "0.0.0.0"

certificatesDir: "/etc/kubernetes/pki"

imageRepository: "k8s.gcr.io"

useHyperKubeImage: false

clusterName: "k8s-cluster"

---

apiVersion: kubeproxy.config.k8s.io/v1alpha1

kind: KubeProxyConfiguration

mode: "ipvs"

 

 

 

 

需要记住的有用信息

  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config

 

 

 

You can now join any number of control-plane nodes by copying certificate authorities

and service account keys on each node and then running the following as root:

 

  kubeadm join 172.18.3.206:8443 --token sh111a.vn43bivdu1tgpc9n \

    --discovery-token-ca-cert-hash sha256:767aa9e99b5737a46493288d79f6f381e8bfde903fd354a768dd312d8cebc92a \

    --control-plane --certificate-key 5adtboomz5m8rm8lz44wzaxuathaq84oi7hlyp68kqxfzr30y4todwf1abxsei5e   

 

Then you can join any number of worker nodes by running the following on each as root:

 

kubeadm join 172.18.3.206:8443 --token sh111a.vn43bivdu1tgpc9n \

    --discovery-token-ca-cert-hash sha256:767aa9e99b5737a46493288d79f6f381e8bfde903fd354a768dd312d8cebc92a

3.2 kubeadm init 干了什么?

kubeadm init主要执行了以下操作:

 

[init]:指定版本进行初始化操作

 

[preflight] :初始化前的检查和下载所需要的Docker镜像文件

 

[kubelet-start] :生成kubelet的配置文件”/var/lib/kubelet/config.yaml”,没有这个文件kubelet无法启动,所以初始化之前的kubelet实际上启动失败。

 

[certificates]:生成Kubernetes使用的证书,存放在/etc/kubernetes/pki目录中。

 

[kubeconfig] :生成 KubeConfig 文件,存放在/etc/kubernetes目录中,组件之间通信需要使用对应文件。

 

[control-plane]:使用/etc/kubernetes/manifest目录下的YAML文件,安装 Master 组件。

 

[etcd]:使用/etc/kubernetes/manifest/etcd.yaml安装Etcd服务。

 

[wait-control-plane]:等待control-plan部署的Master组件启动。

 

[apiclient]:检查Master组件服务状态。

 

[uploadconfig]:更新配置

 

[kubelet]:使用configMap配置kubelet。

 

[patchnode]:更新CNI信息到Node上,通过注释的方式记录。

 

[mark-control-plane]:为当前节点打标签,打了角色Master,和不可调度标签,这样默认就不会使用Master节点来运行Pod。

 

[bootstrap-token]:生成token记录下来,后边使用kubeadm join往集群中添加节点时会用到

 

[addons]:安装附加组件CoreDNS和kube-proxy

3.3 准备配置文件

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

3.4 拷贝证书文件等到其他master节点

USER=root

CONTROL_PLANE_IPS="k8s-master2 k8s-master3"

for host in ${CONTROL_PLANE_IPS}; do

    ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"

    scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/

    scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/

    scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/

    scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/

    scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/

done

3.5 将其他master节点加入到集群

kubeadm join 172.18.3.206:8443 --token sh111a.vn43bivdu1tgpc9n \  

  --discovery-token-ca-cert-hash sha256:767aa9e99b5737a46493288d79f6f381e8bfde903fd354a768dd312d8cebc92a     --experimental-control-plane

 

 

 

    mkdir -p $HOME/.kube

    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

    sudo chown $(id -u):$(id -g) $HOME/.kube/config

3.6 查看节点

# kubectl get node

NAME          STATUS     ROLES    AGE   VERSION

k8s-master1   NotReady   master   48m   v1.15.2

k8s-master2   NotReady   master   26m   v1.15.2

k8s-master3   NotReady   master   24m   v1.15.2

 

此时可以发现是没有 NotReady状态 是因为没有网插件的原因


4.1 部署认证 rbac

kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml

4.2 下早i网络插件yaml文件

wget https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml

4.3 修改yaml文件

vim calico.yaml

 

1)修改ipip模式关闭 和typha_service_name

 

- name: CALICO_IPV4POOL_IPIP

value: "off"

 

typha_service_name: "calico-typha"

 

calico网络,默认是ipip模式(在每台node主机创建一个tunl0网口,这个隧道链接所有的node容器网络,官网推荐不同的ip网段适合,比如aws的不同区域主机),

 

修改成BGP模式,它会以daemonset方式安装在所有node主机,每台主机启动一个bird(BGP client),它会将calico网络内的所有node分配的ip段告知集群内的主机,并通过本机的网卡eth0或者ens33转发数据;

 

2)修改replicas

 

  replicas: 1

  revisionHistoryLimit: 2

 

3)修改pod的网段CALICO_IPV4POOL_CIDR

 

- name: CALICO_IPV4POOL_CIDR

value: "10.20.0.0/16"

4)如果手动下载镜像请查看calico.yaml 文件里面标注的镜像版本 否则可以直接执行会自动下载

5)部署calico

kubectl apply -f calico.yaml

 

6)查看

kubectl get po --all-namespaces

此时你会发现是pending状态是因为node节点还没有相关组件

7) 验证是否为bgp模式

4.4 将node节点加入集群

kubeadm join 172.18.3.206:8443 --token sh111a.vn43bivdu1tgpc9n \

    --discovery-token-ca-cert-hash sha256:767aa9e99b5737a46493288d79f6f381e8bfde903fd354a768dd312d8cebc92a

4.5 查看节点和pod

# kubectl get po -n kube-system -o wide

NAME                                  READY   STATUS    RESTARTS   AGE     IP             NODE          NOMINATED NODE   READINESS GATES

calico-node-5zs2f                     2/2     Running   2          62m     172.18.3.204   k8s-node3     <none>           <none>

calico-node-bh96n                     2/2     Running   2          2d15h   172.18.3.200   k8s-master1   <none>           <none>

calico-node-q84gk                     2/2     Running   2          2d15h   172.18.3.207   k8s-master3   <none>           <none>

calico-node-rd9xr                     2/2     Running   6          2d15h   172.18.3.202   k8s-node1     <none>           <none>

calico-node-rzcxm                     2/2     Running   2          2d15h   172.18.3.205   k8s-ingress   <none>           <none>

calico-node-skdnj                     2/2     Running   2          2d15h   172.18.3.203   k8s-node2     <none>           <none>

calico-node-w47sj                     2/2     Running   2          2d15h   172.18.3.201   k8s-master2   <none>           <none>

calico-typha-7f88c47797-bbkcj         1/1     Running   3          2d15h   172.18.3.202   k8s-node1     <none>           <none>

coredns-5c98db65d4-lsqr4              1/1     Running   4          2d16h   10.244.1.7     k8s-master2   <none>           <none>

coredns-5c98db65d4-mk96v              1/1     Running   4          2d16h   10.244.1.6     k8s-master2   <none>           <none>

etcd-k8s-master1                      1/1     Running   1          2d16h   172.18.3.200   k8s-master1   <none>           <none>

etcd-k8s-master2                      1/1     Running   1          2d16h   172.18.3.201   k8s-master2   <none>           <none>

etcd-k8s-master3                      1/1     Running   1          2d16h   172.18.3.207   k8s-master3   <none>           <none>

kube-apiserver-k8s-master1            1/1     Running   1          2d16h   172.18.3.200   k8s-master1   <none>           <none>

kube-apiserver-k8s-master2            1/1     Running   1          2d16h   172.18.3.201   k8s-master2   <none>           <none>

kube-apiserver-k8s-master3            1/1     Running   1          2d16h   172.18.3.207   k8s-master3   <none>           <none>

kube-controller-manager-k8s-master1   1/1     Running   2          2d16h   172.18.3.200   k8s-master1   <none>           <none>

kube-controller-manager-k8s-master2   1/1     Running   1          2d16h   172.18.3.201   k8s-master2   <none>           <none>

kube-controller-manager-k8s-master3   1/1     Running   1          2d16h   172.18.3.207   k8s-master3   <none>           <none>

kube-proxy-bx9bl                      1/1     Running   0          2m5s    172.18.3.203   k8s-node2     <none>           <none>

kube-proxy-gj6l4                      1/1     Running   0          115s    172.18.3.200   k8s-master1   <none>           <none>

kube-proxy-glf69                      1/1     Running   0          119s    172.18.3.205   k8s-ingress   <none>           <none>

kube-proxy-m7j2h                      1/1     Running   0          2m2s    172.18.3.207   k8s-master3   <none>           <none>

kube-proxy-sxzb2                      1/1     Running   0          2m16s   172.18.3.204   k8s-node3     <none>           <none>

kube-proxy-x9ch4                      1/1     Running   0          2m1s    172.18.3.202   k8s-node1     <none>           <none>

kube-proxy-zx4lx                      1/1     Running   0          2m18s   172.18.3.201   k8s-master2   <none>           <none>

kube-scheduler-k8s-master1            1/1     Running   2          2d16h   172.18.3.200   k8s-master1   <none>           <none>

kube-scheduler-k8s-master2            1/1     Running   1          2d16h   172.18.3.201   k8s-master2   <none>           <none>

kube-scheduler-k8s-master3            1/1     Running   1          2d16h   172.18.3.207   k8s-master3   <none>           <none>

 

# kubectl get node -o wide

NAME          STATUS   ROLES    AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION          CONTAINER-RUNTIME

k8s-ingress   Ready    <none>   2d16h   v1.15.2   172.18.3.205   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.1

k8s-master1   Ready    master   2d17h   v1.15.2   172.18.3.200   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.1

k8s-master2   Ready    master   2d17h   v1.15.2   172.18.3.201   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.1

k8s-master3   Ready    master   2d17h   v1.15.2   172.18.3.207   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.1

k8s-node1     Ready    <none>   2d16h   v1.15.2   172.18.3.202   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.1

k8s-node2     Ready    <none>   2d16h   v1.15.2   172.18.3.203   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.1

k8s-node3     Ready    <none>   92m     v1.15.2   172.18.3.204   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   docker://19.3.1

 

 

转载于:https://my.oschina.net/54188zz/blog/3096415

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值