使用kubeadm部署多master k8s集群部署

对kubernetes感兴趣的可以加群885763297,一起玩转kubernetes

机器初始化,相关参数

keeplivedk8siphostname
master +etcdmaster1
master2 +etcd + nodemaster2
master3 +etcd + nodemaster3
服务版本
etcdetcd-v3.2.12
k8s.gcr.io/kube-apiserver-amd64v1.10.5
k8s.gcr.io/kube-controller-manager-amd64v1.10.5
k8s.gcr.io/kube-scheduler-amd64v1.10.5
k8s.gcr.io/kube-proxy-amd64v1.10.5
quay.io/coreos/flannelv0.10.0-amd64
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd641.14.8
k8s.gcr.io/k8s-dns-sidecar-amd641.14.8
k8s.gcr.io/k8s-dns-kube-dns-amd641.14.8
k8s.gcr.io/pause-amd643.1
hostnamectl --static set-hostanme k8s-master1/k8s-slave1/k8s-slave2

cat <<EOF > /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.30.94 k8s-master1
192.168.30.182 k8s-slave1
192.168.30.187 k8s-slave2
EOF

ssh-keygen  #一路回车即可
ssh-copy-id  k8s-slave1
ssh-copy-id  k8s-slave2

yum -y remove firewalld
sed -i  's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
sed -i "s/PasswordAuthentication no/PasswordAuthentication yes/g" /etc/ssh/sshd_config
sed -i 's/.*swap.*/#&/' /etc/fstab

setenforce 0
swapoff -a

yum -y install yum-utils ntpdate
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

cat >> /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness = 0
net.ipv4.ip_forward= 1
EOF

echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
echo "* soft nproc 65536"  >> /etc/security/limits.conf
echo "* hard nproc 65536"  >> /etc/security/limits.conf
echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf
echo "* hard memlock  unlimited"  >> /etc/security/limits.conf


yum makecache fast
yum install -y --setopt=obsoletes=0 docker-ce-17.03.2.ce-1.el7.centos docker-ce-selinux-17.03.2.ce-1.el7.centos
systemctl enable docker && systemctl start docker

yum --showduplicates list kubeadm
yum install -y --setopt=obsoletes=0 kubeadm-1.10.5-0.x86_64 kubelet-1.10.5-0.x86_64 kubectl-1.10.5-0.x86_64 

yum install -y kubelet kubeadm kubectl
sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
systemctl daemon-reload
systemctl enable kubelet && systemctl start kubelet

ntpdate time.pool.aliyun.com

mv /etc/yum.repos.d/docker-ce.repo /etc/yum.repos.d/docker-ce.repo.bak

在master机器上安装cfssl工具

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
sudo mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

export PATH=/usr/local/bin:$PATH

配置ca根证书

#配置ca配置文件
cat > ca-config.json << EOF
 {
     "signing": {
         "default": {
             "expiry": "43800h"
         },
         "profiles": {
             "server": {
                 "expiry": "43800h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "server auth",
                     "client auth"
                 ]
             },
             "client": {
                 "expiry": "43800h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "client auth"
                 ]
             },
             "peer": {
                 "expiry": "43800h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "server auth",
                     "client auth"
                 ]
             }
         }
     }
 }
EOF

#创建ca前面请求
cat > ca-csr.json << EOF
 {
     "CN": "etcd",
     "key": {
         "algo": "rsa",
         "size": 2048
     },
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "k8s",
        "OU": "System"
      }
    ]
 }
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

生成client,客户端证书

 cat >client.json <<EOF
 {
     "CN": "client",
     "key": {
         "algo": "ecdsa",
         "size": 256
     }
 }
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client.json | cfssljson -bare client

生成peer 和server的证书(etcd使用)

#每个etcd机器都要有一份
cfssl print-defaults csr > config.json

{
    "CN": "master1",  #机器名
    "hosts": [
        "10.10.0.220", #机器ip
        "master1"
    ],
    "key": {
        "algo": "ecdsa",
        "size": 256
    },
    "names": [
        {
            "C": "US",
            "L": "CA",
            "ST": "San Francisco"
        }
    ]
}


 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server config.json | cfssljson -bare server
 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer config.json | cfssljson -bare peer

证书相关注意事项

  • etcd使用的证书,可以都放到/etc/kubernetes/etcd/pki/下面
  • kubernetes集群使用的相关证书,可以都放到/etc/kubernetes/pki/下面
  • 如果reset集群以后,只会清除/etc/kubernetes/pki/下面的内容

systemd 方式运行 etcd

cat >/etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
Restart=always
RestartSec=5s
LimitNOFILE=40000
TimeoutStartSec=0
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --name {{ETCD_NODE1_NAME}} \
  --cert-file={{PATH}}/server.pem \
  --key-file={{PATH}}/server-key.pem \
  --peer-cert-file={{PATH}}/peer.pem \
  --peer-key-file={{PATH}}/peer-key.pem \
  --trusted-ca-file={{PATH}}/ca.pem \
  --peer-trusted-ca-file={{PATH}}/ca.pem \
  --initial-advertise-peer-urls https://{{ETCD_NODE1_IP}}:2380 \
  --listen-peer-urls https://{{ETCD_NODE1_IP}}:2380 \
  --listen-client-urls https://{{ETCD_NODE1_IP}}:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://{{ETCD_NODE1_IP}}:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster {{ETCD_NODE1_NAME}}=https://{{ETCD_NODE1_IP}}:2380,{{ETCD_NODE2_NAME}}=https://{{ETCD_NODE2_IP}}:2380,{{ETCD_NODE3_NAME}}=https://{{ETCD_NODE3_IP}}:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF


#启动etcd
systemctl daemon-reload
systemctl start etcd
systemctl status etcd

#验证是否可用
## ps: 如果加证书不能使用就直接使用 etcdctl --prefix --keys-only=true get /
etcdctl  --cacert=/etc/kubernetes/pki/etcd/ca.pem --cert=/etc/kubernetes/pki/etcd/server.pem --key=/etc/kubernetes/pki/etcd/server-key.pem --endpoints="https://192.168.30.182:2379" --prefix --keys-only=true get /

# 或者
## ps: 如果加证书不能使用就直接使用 etcdctl cluster-health
etcdctl --endpoints=https://192.168.150.181:2379,https://192.168.150.182:2379,https://192.168.150.183:2379 \
  --ca-file=/etc/kubernetes/pki/ca.pem \
  --cert-file=/etc/kubernetes/pki/etcd/server.pem \
  --key-file=/etc/kubernetes/pki/etcd/server-key.pem  cluster-health

# 删除etcd数据
## ps: 如果加证书不能使用就直接使用 etcdctl del /

配置kubelet

sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# 开启kubelet的api,后边prometheus通过kubelet来获取监控数据的时候,创建的处理usterrole,然后通过此api获取数据。
# Environment="KUBELET_EXTRA_ARGS=--authentication-token-webhook"
systemctl daemon-reload
systemctl enable kubelet && systemctl start kubelet

初始化节点master1

cat <<EOF > /etc/kubernetes/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
kubernetesVersion: 1.10.5
api:
  advertiseAddress: "192.168.30.94"   #本节点ip
etcd:
  endpoints:
  #etcd地址
  - https://192.168.30.94:2379
  - https://192.168.30.182:2379
  - https://192.168.30.187:2379
  caFile: /etc/kubernetes/pki/etcd/ca.pem
  certFile: /etc/kubernetes/pki/etcd/client.pem
  keyFile: /etc/kubernetes/pki/etcd/client-key.pem
networking:
  #pod网段
  podSubnet: 10.244.0.0/16
apiServerCertSANs:
  #允许访问apiserver的地址
- 192.168.30.189   #apiserver的负载均衡ip,或者是slb的ip
- k8s-master1
- k8s-slave1
- k8s-slave2
- 192.168.30.94
- 192.168.30.182
- 192.168.30.187
#apiServerExtraArgs:
#  apiserver-count: "3"
#  endpoint-reconciler-type: lease
EOF

安装kubetnetes集群

kubeadm init --config=kubeadm-config.yaml


#如果失败reset
kubeadm reset
#或
rm -rf /etc/kubernetes/*.conf
rm -rf /etc/kubernetes/manifests/*.yaml
docker ps -a |awk '{print $1}' |xargs docker rm -f
systemctl  stop kubelet

注意初始化集群完后的操作:

初始化完成以后,需要把新生成的证书拷贝到其他节点上

安装flannel网络

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl create -f  kube-flannel.yml


[root@k8s-master1 kubernetes]# kubectl get pod -n kube-system  -o wide
NAME                                  READY     STATUS    RESTARTS   AGE       IP               NODE
kube-apiserver-k8s-master1            1/1       Running   0          19h       192.168.30.94    k8s-master1
kube-controller-manager-k8s-master1   1/1       Running   0          19h       192.168.30.94    k8s-master1
kube-dns-86f4d74b45-bwm2h             3/3       Running   0          19h       10.244.0.13      k8s-master1
kube-flannel-ds-rrwrg                 1/1       Running   0          18h       192.168.30.94    k8s-master1
kube-proxy-4zsnt                      1/1       Running   0          19h       192.168.30.94    k8s-master1
kube-scheduler-k8s-master1            1/1       Running   0          19h       192.168.30.94    k8s-master1
[root@k8s-master1 kubernetes]# kubectl get node
NAME          STATUS    ROLES     AGE       VERSION
k8s-master1   Ready     master    8m        v1.10.5

初始化节点master节点[2,3]

cat <<EOF > /etc/kubernetes/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
kubernetesVersion: 1.10.5
api:
  advertiseAddress: "192.168.30.182"
etcd:
  endpoints:
  - https://192.168.30.94:2379
  - https://192.168.30.182:2379
  - https://192.168.30.187:2379
  caFile: /etc/kubernetes/pki/etcd/ca.pem
  certFile: /etc/kubernetes/pki/etcd/client.pem
  keyFile: /etc/kubernetes/pki/etcd/client-key.pem
networking:
  podSubnet: 10.244.0.0/16
#下面的token是master1节点初始化完成后,join得到的token值
token: "1gddb4.cs1chtdrk5r9aa0i"
tokenTTL: "0s"
apiServerCertSANs:
- 192.168.30.189
- k8s-master1
- k8s-slave1
- k8s-slave2
- 192.168.30.94
- 192.168.30.182
- 192.168.30.187
#apiServerExtraArgs:
#  apiserver-count: "3"
#  endpoint-reconciler-type: lease
EOF

使用keepalived,或者使用slb代理6443 api-server的端口

  • 如果用nginx 做loadbalance ,6443 tsl用 api的ca.crt ca.key, 在/etc/kubernetes/pki下面。 主服务器挂掉,虚拟IP回自动漂移到备服务器

主keepalived

global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.30.189:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 61
    priority 100
    advert_int 1
    mcast_src_ip 192.168.30.94
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        192.168.30.182
        192.168.30.187
    }
    virtual_ipaddress {
        192.168.30.189/24
    }
    track_script {
        CheckK8sMaster
    }

}

从 [1,2] keepalived配置

global_defs {
   router_id LVS_k8s
}

global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.30.189:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface eno16777984  #网卡名
    virtual_router_id 61
    priority 90    #[从1:90,从2:80]
    advert_int 1
    mcast_src_ip 192.168.30.182  #节点ip
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        192.168.30.94  #其他节点ip
        192.168.30.187
    }
    virtual_ipaddress {
        192.168.30.189/24  #虚拟ip
    }
    track_script {
        CheckK8sMaster
    }

}

对kubernetes感兴趣的可以加群885763297,一起玩转kubernetes

  • 1
    点赞
  • 17
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值