二进制部署高可用k8s集群

ip地址规划表

k8s-master1 192.168.2.190   包含etcd存储此为etc主节点
k8s-master2 192.168.2.191
k8s-node1   192.168.2.192    包含etcd存储etcd从节点1
k8s-node2   192.168.2.193    包含etcd存储etcd从节点2
k8s-node3   192.168.2.194    包含etcd存储etcd从节点3
k8s-LB01    192.168.2.195
k8s-LB02    192.168.2.196
vip使用地址: 192.168.2.197   node节点统一链接vip去链接k8s
#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

#关闭selinux
sed -i '/^SELINUX=/c SELINUX=disabled' /etc/selinux/config  
setenforce 0

#关闭swap
#方法1
#Swap调整把mysql数据库服务器调整不使用swap
#临时调整
echo 1 >/proc/sys/vm/swappiness
#永久调整
echo vm.swappiness=1 >/etc/sysctl.conf
sysctl -p
------------------------------------------
#方法2
vim /etc/fstab
#把sawp硬盘的自启动关闭即可
#UUID=f0731248-c6fc-492c-aa36-15865053d61e swap    swap    defaults  0 0

#修改各个节点主机名,请使用k8s-node1不要加名称
hostnamectl set-hostname k8s-master1
hostnamectl set-hostname k8s-master2
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
hostnamectl set-hostname k8s-node3
hostnamectl set-hostname k8s-LB01
hostnamectl set-hostname k8s-LB02

#添加hosts解析
cat >/etc/hosts<<'EOF'
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.2.190 k8s-master1
192.168.2.191 k8s-master2
192.168.2.192 k8s-node1
192.168.2.193 k8s-node2
192.168.2.194 k8s-node3
192.168.2.195 k8s-LB01  
192.168.2.196 k8s-LB02   
EOF

#配置时间同步
ntpdate time.windows.com

部署etcd和etcd的ssl证书

#生产etcd证书,需要颁发etcd和apiserver的证书,etcd是covers用服务的发现和共享配置让其他服务读取
#上传生产证书需要软件包TLS.tar.gz
cd /tools/

#解压
tar -xf TLS.tar.gz
cd TLS/

#执行生产key和ca证书的脚本
#准备cfssl工具
./cfssl.sh
cd etcd/

#检查
[root@k8s-master1 etcd]# ls -l /usr/local/bin/
total 18808
-rwxr-xr-x 1 root root 10376657 Oct 27 04:04 cfssl
-rwxr-xr-x 1 root root  6595195 Oct 27 04:04 cfssl-certinfo
-rwxr-xr-x 1 root root  2277873 Oct 27 04:04 cfssljson


#修改请求文件中hosts字段包含所有etcd节点IP:
[root@k8s-master1 etcd]# cat server-csr.json 
{
    "CN": "etcd",
    "hosts": [
        "192.168.2.190",
        "192.168.2.192",
        "192.168.2.193",
        "192.168.2.194"
        ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}

#执行生成脚本
[root@k8s-master1 etcd]# ./generate_etcd_cert.sh 
2019/10/27 04:10:38 [INFO] generating a new CA key and certificate from CSR
2019/10/27 04:10:38 [INFO] generate received request
2019/10/27 04:10:38 [INFO] received CSR
2019/10/27 04:10:38 [INFO] generating key: rsa-2048
2019/10/27 04:10:40 [INFO] encoded CSR
2019/10/27 04:10:40 [INFO] signed certificate with serial number 339948947871364669983845782291267268650852852919
2019/10/27 04:10:40 [INFO] generate received request
2019/10/27 04:10:40 [INFO] received CSR
2019/10/27 04:10:40 [INFO] generating key: rsa-2048
2019/10/27 04:10:40 [INFO] encoded CSR
2019/10/27 04:10:40 [INFO] signed certificate with serial number 55310426155568260615251969567183056374134291850
2019/10/27 04:10:40 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

#检查
[root@k8s-master1 etcd]# ls *pem
ca-key.pem  ca.pem  server-key.pem  server.pem


#部署4个节点的etcd
#在master上传安装包etcd.tar.gz
tar -xf etcd.tar.gz
cd etcd/
#拷贝etcd的证书到安装包
/bin/cp /tools/TLS/etcd/{ca,server,server-key}.pem ssl

#部署到3个node节点
cd /tools/
scp -r etcd root@192.168.2.192:/opt
scp -r etcd root@192.168.2.193:/opt
scp -r etcd root@192.168.2.194:/opt
#拷贝启动文件
cd /tools/
scp -r etcd.service root@192.168.2.192:/usr/lib/systemd/system
scp -r etcd.service root@192.168.2.193:/usr/lib/systemd/system
scp -r etcd.service root@192.168.2.194:/usr/lib/systemd/system
#本机部署拷贝
cp -r etcd /opt/
cp etcd.service /usr/lib/systemd/system

#配置主节点
cat >/opt/etcd/cfg/etcd.conf<<'EOF'
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.2.190:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.2.190:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.2.190:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.2.190:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.2.190:2380,etcd-2=https://192.168.2.192:2380,etcd-3=https://192.168.2.193:2380,etcd-4=https://192.168.2.194:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#配置从节点1
cat >/opt/etcd/cfg/etcd.conf<<'EOF'
#[Member]
ETCD_NAME="etcd-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.2.192:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.2.192:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.2.192:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.2.192:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.2.190:2380,etcd-2=https://192.168.2.192:2380,etcd-3=https://192.168.2.193:2380,etcd-4=https://192.168.2.194:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#配置从节点2
cat >/opt/etcd/cfg/etcd.conf<<'EOF'
#[Member]
ETCD_NAME="etcd-3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.2.193:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.2.193:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.2.193:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.2.193:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.2.190:2380,etcd-2=https://192.168.2.192:2380,etcd-3=https://192.168.2.193:2380,etcd-4=https://192.168.2.194:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#配置从节点3
cat >/opt/etcd/cfg/etcd.conf<<'EOF'
#[Member]
ETCD_NAME="etcd-4"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.2.194:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.2.194:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.2.194:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.2.194:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.2.190:2380,etcd-2=https://192.168.2.192:2380,etcd-3=https://192.168.2.193:2380,etcd-4=https://192.168.2.194:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF


#启动
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
#无法启动执行命令,重新加载启动配置文件
systemctl daemon-reload


#etcd的日志查看
cat /var/log/messages

#查看集群状态命令
/opt/etcd/bin/etcdctl \
--ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem \
--endpoints="https://192.168.2.190:2379,https://192.168.2.191:2379,https://192.168.2.192:2379,https://192.168.2.193:2379" \
cluster-health
#执行结果
[root@k8s-master1 tools]# /opt/etcd/bin/etcdctl \
> --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem \
> --endpoints="https://192.168.2.190:2379,https://192.168.2.192:2379,https://192.168.2.193:2379,https://192.168.2.194:2379" \
> cluster-health
---------------
member 2778bc0f105c127b is healthy: got healthy result from https://192.168.2.192:2379
member ccea55b4954c56f7 is healthy: got healthy result from https://192.168.2.193:2379
member eb4241134de2fa54 is healthy: got healthy result from https://192.168.2.194:2379
member ff375cef98fa136e is healthy: got healthy result from https://192.168.2.190:2379
cluster is healthy

配置apiserver的证书

#生成apiserver的证书
cd /tools/TLS/k8s

#修改请求文件中hosts字段包含所有etcd节点IP:可以配置多个ip方便以后扩容etcd节点
#根据架构图需要访问apiserver的ip都需要设置,注意:node的ip是不需要填写
#在证书添加ip可信任
cat >server-csr.json<<'EOF'
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local",
      "192.168.2.190",
      "192.168.2.194",
      "192.168.2.195",
      "192.168.2.196"  #此处注意标点符号
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

#执行脚本生成证书
[root@k8s-master1 k8s]# ./generate_k8s_cert.sh 
2019/10/27 06:26:58 [INFO] generating a new CA key and certificate from CSR
2019/10/27 06:26:58 [INFO] generate received request
2019/10/27 06:26:58 [INFO] received CSR
2019/10/27 06:26:58 [INFO] generating key: rsa-2048
2019/10/27 06:26:59 [INFO] encoded CSR
2019/10/27 06:26:59 [INFO] signed certificate with serial number 54070594359511304821353690512799317132742042139
2019/10/27 06:26:59 [INFO] generate received request
2019/10/27 06:26:59 [INFO] received CSR
2019/10/27 06:26:59 [INFO] generating key: rsa-2048
2019/10/27 06:26:59 [INFO] encoded CSR
2019/10/27 06:26:59 [INFO] signed certificate with serial number 134268687628265938266510311981160449965866401005
2019/10/27 06:26:59 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2019/10/27 06:26:59 [INFO] generate received request
2019/10/27 06:26:59 [INFO] received CSR
2019/10/27 06:26:59 [INFO] generating key: rsa-2048
2019/10/27 06:27:00 [INFO] encoded CSR
2019/10/27 06:27:00 [INFO] signed certificate with serial number 291116848489049490408931444417414321070104977111
2019/10/27 06:27:00 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

#检查
[root@k8s-master1 k8s]# ls *pem
ca-key.pem  ca.pem  kube-proxy-key.pem  kube-proxy.pem  server-key.pem  server.pem

部署apiserver,controller-manager和scheduler

#部署apiserver,controller-manager和scheduler上传安装包k8s-master.tar.gz
#二进制包下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#v1161
#上传k8s的master安装包k8s-master.tar.gz
#解压安装包
tar -xf k8s-master.tar.gz

#拷贝apiserver的证书到安装包目录
cd /tools/kubernetes/ssl/
rm -f *
cp /tools/TLS/k8s/*.pem /tools/kubernetes/ssl
#拷贝软件到opt目录
cp -rf /tools/kubernetes /opt

#拷贝启动程序到启动目录
cd /tools
cp kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system

#配置好kube-apiserver.conf的ip地址,配置etcd节点的ip和要部署master本机的ip地址
cat >/opt/kubernetes/cfg/kube-apiserver.conf<<'EOF'
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://192.168.2.190:2379,https://192.168.2.192:2379,https://192.168.2.193:2379,https://192.168.2.194:2379 \
--bind-address=192.168.2.190 \
--secure-port=6443 \
--advertise-address=192.168.2.190 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
EOF

#启动
systemctl start kube-apiserver
systemctl start kube-controller-manager
systemctl start kube-scheduler
systemctl enable kube-apiserver
systemctl enable kube-controller-manager
systemctl enable kube-scheduler

#检查是否启动成功
[root@k8s-master1 ssl]# ls -l /opt/kubernetes/logs/
total 196
lrwxrwxrwx 1 root root    62 Oct 27 07:02 kube-apiserver.ERROR -> kube-apiserver.k8s-master1.root.log.ERROR.20191027-070201.2751  错误日志
lrwxrwxrwx 1 root root    61 Oct 27 07:01 kube-apiserver.INFO -> kube-apiserver.k8s-master1.root.log.INFO.20191027-070153.2751   info日志
-rw-r--r-- 1 root root   415 Oct 27 07:02 kube-apiserver.k8s-master1.root.log.ERROR.20191027-070201.2751
-rw-r--r-- 1 root root 91395 Oct 27 07:02 kube-apiserver.k8s-master1.root.log.INFO.20191027-070153.2751
-rw-r--r-- 1 root root  1357 Oct 27 07:02 kube-apiserver.k8s-master1.root.log.WARNING.20191027-070156.2751
lrwxrwxrwx 1 root root    64 Oct 27 07:01 kube-apiserver.WARNING -> kube-apiserver.k8s-master1.root.log.WARNING.20191027-070156.2751 警告日志

#检查日志是否出错
#查看apiserver的日志
cat /opt/kubernetes/logs/kube-apiserver.INFO
#查看其它组件的日志
tail -F /opt/kubernetes/logs/kube-controller-manager.INFO


#移动kubectl命令到系统变量方便使用
mv /opt/kubernetes/bin/kubectl /usr/local/bin/

#查看节点的状态,如果运行失败会直接有错误
[root@k8s-master1 ssl]# kubectl get cs
NAME                 AGE
scheduler            <unknown>
controller-manager   <unknown>
etcd-3               <unknown>
etcd-1               <unknown>
etcd-2               <unknown>
etcd-0               <unknown>

#直接报错命令结果都不会返回
[root@centos7 tools]# kubectl get cs  
#此错误是证书错误
The connection to the server localhost:8080 was refused - did you specify the right host or port?


#检查进程
[root@k8s-master1 ssl]# ps -ef|grep kube
root       2751      1  4 07:01 ?        00:00:24 /opt/kubernetes/bin/kube-apiserver --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --etcd-servers=https://192.168.2.190:2379,https://192.168.2.192:2379,https://192.168.2.193:2379,https://192.168.2.194:2379 --bind-address=192.168.2.190 --secure-port=6443 --advertise-address=192.168.2.190 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-token-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-32767 --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log
root       2773      1  1 07:02 ?        00:00:06 /opt/kubernetes/bin/kube-controller-manager --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect=true --master=127.0.0.1:8080 --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=10.244.0.0/16 --service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root       2784      1  0 07:02 ?        00:00:04 /opt/kubernetes/bin/kube-scheduler --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect --master=127.0.0.1:8080 --address=127.0.0.1
root       2939   1967  0 07:10 pts/0    00:00:00 grep --color=auto kube

#启用TLS Bootstrapping为kubelet配置证书
为kubelet TLS Bootstrapping 授权:
# cat /opt/kubernetes/cfg/token.csv 
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
node-bootstrapper"是让node能使用bootstrap的证书
格式:token,用户,uid,用户组

#执行命令给kubelet-bootstrap授权:
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

token也可自行生成替换:
head -c 16 /dev/urandom | od -An -t x | tr -d ' '

#master配置完成

二进制部署docker和kubelet

主要node上的kubelet.config文件的名称--pod-infra-container-image=lizhenliang/pause-amd64:3.0"要修改
生产不能使用此名称

部署网络查看日志是否正常
tail /opt/kubernetes/logs/kubelet.INFO -F

#在node1节点上
#上传k8s-node.tar.gz的安装包
#解压
tar zxvf k8s-node.tar.gz
tar zxvf docker-18.09.6.tgz


#把docker命令移动到/usr/bin下面做为执行命令
mv /tools/docker/* /usr/bin
mkdir /etc/docker -p
#修改仓库地址
cat >/etc/docker/daemon.json<<'EOF'
{
"registry-mirrors": ["https://x4h14tji.mirror.aliyuncs.com","https://registry.docker-cn.com"],
"insecure-registries": ["192.168.2.194"]
}
EOF
mv docker.service /usr/lib/systemd/system
systemctl start docker
systemctl enable docker
systemctl restart docker.service


kubelet安装
#移动配置文件到opt目录
cd /tools/
mv kubernetes/ /opt/
mv *service /usr/lib/systemd/system

#kubelet安装
#修改配置文件以下3个文件/opt/kubernetes/cfg/bootstrap.kubeconfig 、/opt/kubernetes/cfg/kube-proxy.conf、/opt/kubernetes/cfg/kube-proxy.kubeconfig
#修改为自己的matser主机的ip
#需要修改的文件
cd /opt/kubernetes/cfg
grep 192 *
[root@k8s-node1 cfg]# grep 192 *
bootstrap.kubeconfig:    server: https://192.168.31.61:6443
kube-proxy.kubeconfig:    server: https://192.168.31.61:6443
#执行修改命令
sed -ir.bak 's@https://192.168.31.61:6443@https://192.168.2.190:6443@g' /opt/kubernetes/cfg/bootstrap.kubeconfig
sed -ir.bak 's@https://192.168.31.61:6443@https://192.168.2.190:6443@g' /opt/kubernetes/cfg/kube-proxy.kubeconfig

#从master拷贝证书到node1
cd /tools/TLS/k8s/
scp ca.pem kube-proxy-key.pem kube-proxy.pem root@192.168.2.192:/opt/kubernetes/ssl

#查询那些文件需要修改主机名
cd /opt/kubernetes/cfg/
grep hostname *
[root@k8s-node1 cfg]# grep hostname *
kubelet.conf:--hostname-override=k8s-node1 \
kube-proxy-config.yml:hostnameOverride: k8s-node1

#非node1节点修改以文件/opt/kubernetes/cfg/kubelet.conf和/opt/kubernetes/cfg/kube-proxy-config.yml的node节点名称\
#注意名称必须和主机名称一致如主机名称为k8s-node02,这里必须也是--hostname-override=k8s-node02
sed -ir.bak 's@--hostname-override=k8s-node1@--hostname-override=k8s-node2@g' /opt/kubernetes/cfg/kubelet.conf
sed -ir.bak 's@hostnameOverride: k8s-node1@hostnameOverride: k8s-node2@g' /opt/kubernetes/cfg/kube-proxy-config.yml

#设置开机自启动和启动
systemctl start kubelet
systemctl start kube-proxy
systemctl enable kubelet
systemctl enable kube-proxy


#在master主机允许给Node颁发证书
[root@k8s-master1 k8s]#  kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-AZUSq4O0PlmrqFsVyHbaQqWEN4bHGuMnGmv5z0hewvw   5m28s   kubelet-bootstrap   Pending

#kubectl certificate approve 使用上面查询到的node名称:node-csr-AZUSq4O0PlmrqFsVyHbaQqWEN4bHGuMnGmv5z0hewvw
#执行命令
[root@k8s-master1 k8s]# kubectl certificate approve node-csr-AZUSq4O0PlmrqFsVyHbaQqWEN4bHGuMnGmv5z0hewvw
certificatesigningrequest.certificates.k8s.io/node-csr-AZUSq4O0PlmrqFsVyHbaQqWEN4bHGuMnGmv5z0hewvw approved

#检查执行结果
[root@k8s-master1 k8s]#  kubectl get node
NAME        STATUS     ROLES    AGE   VERSION
k8s-node1   NotReady   <none>   4s    v1.16.0


#添加成功查看
[root@k8s-master1 k8s]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-AZUSq4O0PlmrqFsVyHbaQqWEN4bHGuMnGmv5z0hewvw   22m     kubelet-bootstrap   Approved,Issued
node-csr-cETf-81MfYQgWJgQ2Rqz9ujCNioBk0cp-KiEkt07mSY   106s    kubelet-bootstrap   Approved,Issued
node-csr-gbYBNe7RxO9TNF1650mzVNcteA2SAqMFmOQHxsIsRB4   7m22s   kubelet-bootstrap   Approved,Issued


[root@k8s-master cfg]# kubectl get node
NAME        STATUS     ROLES    AGE   VERSION
k8s-node1   NotReady   <none>   43s   v1.16.0
k8s-node2   NotReady   <none>   44s   v1.16.0
k8s-node3   NotReady   <none>   44s   v1.16.0
[root@k8s-master cfg]#  kubectl get csr
NAME                                                   AGE    REQUESTOR           CONDITION
node-csr-1MOOuOzHlWXsg3Iz4gF05IyDR8dJWbeCDiKPHt5NzO8   115s   kubelet-bootstrap   Approved,Issued
node-csr-C1Md6d1DHqYjBONB6zEx2OnJIxJocMKqiGB55poeWCk   113s   kubelet-bootstrap   Approved,Issued
node-csr-PUXKAqFRHQCzz0nmDVZSBgPtdF2P30SqWN3bOhdU5hE   118s   kubelet-bootstrap   Approved,Issued

在node2安装

#在node2节点上
#上传k8s-node.tar.gz的安装包
#解压
tar zxvf k8s-node.tar.gz
tar zxvf docker-18.09.6.tgz

#把docker命令移动到/usr/bin下面做为执行命令
mv /tools/docker/* /usr/bin
mkdir /etc/docker -p
#修改仓库地址
cat >/etc/docker/daemon.json<<'EOF'
{
"registry-mirrors": ["https://x4h14tji.mirror.aliyuncs.com","https://registry.docker-cn.com"],
"insecure-registries": ["192.168.2.195"]
}
EOF
mv docker.service /usr/lib/systemd/system
systemctl start docker
systemctl enable docker
systemctl restart docker.service

#kubelet安装
#移动配置文件到opt目录
cd /tools/
mv kubernetes/ /opt/
mv *service /usr/lib/systemd/system

#kubelet安装
#修改配置文件以下3个文件/opt/kubernetes/cfg/bootstrap.kubeconfig 、/opt/kubernetes/cfg/kube-proxy.conf、/opt/kubernetes/cfg/kube-proxy.kubeconfig
#修改为自己的matser主机的ip
#需要修改的文件
cd /opt/kubernetes/cfg/
grep 192 *


#执行修改命令
sed -ir.bak 's@https://192.168.31.61:6443@https://192.168.2.190:6443@g' /opt/kubernetes/cfg/bootstrap.kubeconfig
sed -ir.bak 's@https://192.168.31.61:6443@https://192.168.2.190:6443@g' /opt/kubernetes/cfg/kube-proxy.kubeconfig

#从master拷贝证书到node1
cd /tools/TLS/k8s/
scp ca.pem kube-proxy-key.pem kube-proxy.pem root@192.168.2.193:/opt/kubernetes/ssl

#查询那些文件需要修改主机名
cd /opt/kubernetes/cfg/
grep hostname *


#非node1节点修改以文件/opt/kubernetes/cfg/kubelet.conf和/opt/kubernetes/cfg/kube-proxy-config.yml的node节点名称
sed -ir.bak 's@--hostname-override=k8s-node1@--hostname-override=k8s-node2@g' /opt/kubernetes/cfg/kubelet.conf
sed -ir.bak 's@hostnameOverride: k8s-node1@hostnameOverride: k8s-node2@g' /opt/kubernetes/cfg/kube-proxy-config.yml

#设置开机自启动和启动
systemctl start kubelet
systemctl start kube-proxy
systemctl enable kubelet
systemctl enable kube-proxy


#在master主机允许给Node颁发证书
[root@k8s-master1 k8s]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-AZUSq4O0PlmrqFsVyHbaQqWEN4bHGuMnGmv5z0hewvw   15m   kubelet-bootstrap   Approved,Issued
node-csr-gbYBNe7RxO9TNF1650mzVNcteA2SAqMFmOQHxsIsRB4   6s    kubelet-bootstrap   Pending  新的node2节点


#kubectl certificate approve 使用上面查询到的node名称:node-csr-gbYBNe7RxO9TNF1650mzVNcteA2SAqMFmOQHxsIsRB4
#执行命令
[root@k8s-master1 k8s]# kubectl certificate approve node-csr-gbYBNe7RxO9TNF1650mzVNcteA2SAqMFmOQHxsIsRB4
certificatesigningrequest.certificates.k8s.io/node-csr-gbYBNe7RxO9TNF1650mzVNcteA2SAqMFmOQHxsIsRB4 approved

#检查执行结果
[root@k8s-master1 k8s]# kubectl get node
NAME        STATUS     ROLES    AGE     VERSION
k8s-node1   NotReady   <none>   6m54s   v1.16.0
k8s-node2   NotReady   <none>   3s      v1.16.0

#添加成功查看
[root@k8s-master1 k8s]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-AZUSq4O0PlmrqFsVyHbaQqWEN4bHGuMnGmv5z0hewvw   22m     kubelet-bootstrap   Approved,Issued
node-csr-cETf-81MfYQgWJgQ2Rqz9ujCNioBk0cp-KiEkt07mSY   106s    kubelet-bootstrap   Approved,Issued
node-csr-gbYBNe7RxO9TNF1650mzVNcteA2SAqMFmOQHxsIsRB4   7m22s   kubelet-bootstrap   Approved,Issued

在node3安装

#在node3节点上
#上传k8s-node.tar.gz的安装包
#解压
tar zxvf k8s-node.tar.gz
tar zxvf docker-18.09.6.tgz


#把docker命令移动到/usr/bin下面做为执行命令
mv /tools/docker/* /usr/bin
mkdir /etc/docker -p
#修改仓库地址
cat >/etc/docker/daemon.json<<'EOF'
{
"registry-mirrors": ["https://x4h14tji.mirror.aliyuncs.com","https://registry.docker-cn.com"],
"insecure-registries": ["192.168.2.110"]
}
EOF
mv docker.service /usr/lib/systemd/system
systemctl start docker
systemctl enable docker
systemctl restart docker.service

#kubelet安装
#移动配置文件到opt目录
cd /tools/
mv kubernetes/ /opt/
mv *service /usr/lib/systemd/system

#kubelet安装
#修改配置文件以下3个文件/opt/kubernetes/cfg/bootstrap.kubeconfig 、/opt/kubernetes/cfg/kube-proxy.conf、/opt/kubernetes/cfg/kube-proxy.kubeconfig
#修改为自己的matser主机的ip
#需要修改的文件
cd /opt/kubernetes/cfg/
grep 192 *


#执行修改命令
sed -ir.bak 's@https://192.168.31.61:6443@https://192.168.2.190:6443@g' /opt/kubernetes/cfg/bootstrap.kubeconfig
sed -ir.bak 's@https://192.168.31.61:6443@https://192.168.2.190:6443@g' /opt/kubernetes/cfg/kube-proxy.kubeconfig

#从master拷贝证书到node1
cd /tools/TLS/k8s/
scp ca.pem kube-proxy-key.pem kube-proxy.pem root@192.168.2.194:/opt/kubernetes/ssl

#查询那些文件需要修改主机名
cd /opt/kubernetes/cfg/
grep hostname *


#非node1节点修改以文件/opt/kubernetes/cfg/kubelet.conf和/opt/kubernetes/cfg/kube-proxy-config.yml的node节点名称
sed -ir.bak 's@--hostname-override=k8s-node1@--hostname-override=k8s-node3@g' /opt/kubernetes/cfg/kubelet.conf
sed -ir.bak 's@hostnameOverride: k8s-node1@hostnameOverride: k8s-node3@g' /opt/kubernetes/cfg/kube-proxy-config.yml

#设置开机自启动和启动
systemctl restart kubelet
systemctl restart kube-proxy
systemctl enable kubelet
systemctl enable kube-proxy


#在master主机允许给Node颁发证书
[root@k8s-master1 k8s]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-AZUSq4O0PlmrqFsVyHbaQqWEN4bHGuMnGmv5z0hewvw   20m     kubelet-bootstrap   Approved,Issued
node-csr-cETf-81MfYQgWJgQ2Rqz9ujCNioBk0cp-KiEkt07mSY   2s      kubelet-bootstrap   Pending新的node3节点
node-csr-gbYBNe7RxO9TNF1650mzVNcteA2SAqMFmOQHxsIsRB4   5m38s   kubelet-bootstrap   Approved,Issued


#kubectl certificate approve 使用上面查询到的node名称:node-csr-cETf-81MfYQgWJgQ2Rqz9ujCNioBk0cp-KiEkt07mSY
#执行命令
[root@k8s-master1 k8s]# kubectl certificate approve node-csr-cETf-81MfYQgWJgQ2Rqz9ujCNioBk0cp-KiEkt07mSY
certificatesigningrequest.certificates.k8s.io/node-csr-cETf-81MfYQgWJgQ2Rqz9ujCNioBk0cp-KiEkt07mSY approved



#检查执行结果
[root@k8s-master1 k8s]# kubectl get node
NAME        STATUS     ROLES    AGE     VERSION
k8s-node1   NotReady   <none>   13m     v1.16.0
k8s-node2   NotReady   <none>   6m38s   v1.16.0
k8s-node3   NotReady   <none>   22s     v1.16.0

#添加成功查看
[root@k8s-master1 k8s]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-AZUSq4O0PlmrqFsVyHbaQqWEN4bHGuMnGmv5z0hewvw   22m     kubelet-bootstrap   Approved,Issued
node-csr-cETf-81MfYQgWJgQ2Rqz9ujCNioBk0cp-KiEkt07mSY   106s    kubelet-bootstrap   Approved,Issued
node-csr-gbYBNe7RxO9TNF1650mzVNcteA2SAqMFmOQHxsIsRB4   7m22s   kubelet-bootstrap   Approved,Issued

安装k8s的网络flannel

把master上的node节点状态NotReady改为就绪状态
[root@k8s-master1 k8s]# kubectl get node
NAME        STATUS     ROLES    AGE     VERSION
k8s-node1   NotReady   <none>   13m     v1.16.0
k8s-node2   NotReady   <none>   6m38s   v1.16.0
k8s-node3   NotReady   <none>   22s     v1.16.0


#在node1、node2、node3统一创建目录
mkdir /opt/cni/bin /etc/cni/net.d -p

#解压统一在node1、node2、node3统一解压
cd /tools/
tar zxvf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin


#确保使用cni
[root@k8s-node1 tools]# cat /opt/kubernetes/cfg/kubelet.conf
KUBELET_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--hostname-override=k8s-node3 \
--network-plugin=cni \    表示启用cni
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet-config.yml \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"

#在master生成yaml问存放目录
mkdir /yaml -p
cd /yaml

#在master执行
#上传文件kube-flannel.yaml
rz -y kube-flannel.yaml

#当kube-flannel.yaml文件不行时使用
kubectl apply -f https://docs.projectcalico.org/v3.9/manifests/calico.yaml

#执行yaml文件生成pod
[root@k8s-master1 yaml]# kubectl apply -f kube-flannel.yaml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created

#检查等待镜像下载完成
[root@k8s-master1 yaml]# kubectl get pods -n kube-system
NAME                          READY   STATUS     RESTARTS   AGE
kube-flannel-ds-amd64-8956m   0/1     Init:0/1   0          29s
kube-flannel-ds-amd64-cdv4t   0/1     Init:0/1   0          29s
kube-flannel-ds-amd64-cp5nz   0/1     Init:0/1   0          29s


#更新完成
[root@k8s-master1 yaml]# kubectl get pods -n kube-system
NAME                          READY   STATUS    RESTARTS   AGE
kube-flannel-ds-amd64-8956m   1/1     Running   0          107s
kube-flannel-ds-amd64-cdv4t   1/1     Running   0          107s
kube-flannel-ds-amd64-cp5nz   1/1     Running   0          107s

#再次上传文件apiserver-to-kubelet-rbac.yaml
#授权apiserver访问kubelet,为提供安全性,kubelet禁止匿名访问,必须授权才可以。
#执行命令产生pod
[root@k8s-master1 yaml]# kubectl apply -f apiserver-to-kubelet-rbac.yaml
clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created
clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created


node的状态已经变为就绪
[root@k8s-master1 yaml]# kubectl get node
NAME        STATUS   ROLES    AGE    VERSION
k8s-node1   Ready    <none>   106m   v1.16.0
k8s-node2   Ready    <none>   99m    v1.16.0
k8s-node3   Ready    <none>   93m    v1.16.0

#创建容器测试
[root@k8s-master1 yaml]# kubectl create deployment web --image=nginx
deployment.apps/web created、

#检查
[root@k8s-master1 yaml]# kubectl get pods -o wide
NAME                  READY   STATUS    RESTARTS   AGE   IP           NODE        NOMINATED NODE   READINESS GATES
web-d86c95cc9-pmtxr   1/1     Running   0          97s   10.244.2.2   k8s-node3   <none>           <none>

#创建测试svc网络暴露80到口到随机的pode端口
[root@k8s-master1 yaml]# kubectl expose deployment web --port=80 --type=NodePort
service/web exposed

#查看-flannel的日志方法
kubectl logs kube-flannel-ds-adm64-kjbl6 -n kube-system

#检查
[root@k8s-master1 yaml]# kubectl get pods,svc
NAME                      READY   STATUS    RESTARTS   AGE
pod/web-d86c95cc9-pmtxr   1/1     Running   0          4m5s

NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP        34h
service/web          NodePort    10.0.0.252   <none>        80:32533/TCP   63s
暴露端口为32533

web端测试
在这里插入图片描述
在这里插入图片描述

安装k8s的ui管理界面

#上传dashboard-adminuser.yaml和dashboard.yaml,部署ui界面
[root@k8s-master1 yaml]# ls
dashboard-adminuser.yaml  dashboard.yaml


#部署
[root@k8s-master1 yaml]# kubectl apply -f dashboard.yaml 
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

#执行检查
[root@k8s-master1 yaml]# kubectl get pods -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-566cddb686-nw7w2   1/1     Running   0          61s
kubernetes-dashboard-7b5bf5d559-qrc8w        1/1     Running   0          61s

#确定访问地址端口为30001
[root@k8s-master1 yaml]# kubectl get pods,svc -n kubernetes-dashboard
NAME                                             READY   STATUS    RESTARTS   AGE
pod/dashboard-metrics-scraper-566cddb686-nw7w2   1/1     Running   0          81s
pod/kubernetes-dashboard-7b5bf5d559-qrc8w        1/1     Running   0          81s

NAME                                TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
service/dashboard-metrics-scraper   ClusterIP   10.0.0.106   <none>        8000/TCP        81s
service/kubernetes-dashboard        NodePort    10.0.0.250   <none>        443:30001/TCP   82s


#创建授权使用的pod
[root@k8s-master1 yaml]# kubectl apply -f dashboard-adminuser.yaml 
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

#获取token
[root@k8s-master1 yaml]# kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-67kmg
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 70ad7177-f7f4-4c4d-97ee-72c6b3ba5864

Type:  kubernetes.io/service-account-token

Data
====
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjZ0YWhBLUhLZVpQal9YRDBYUUlXcjZsbk9MNlVzdmVIbi1HTENkSzN1djQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTY3a21nIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI3MGFkNzE3Ny1mN2Y0LTRjNGQtOTdlZS03MmM2YjNiYTU4NjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.D7K85SgCRTjRH9S6WVAWc9-zIGQ5j-kEXnI28aUpmgvBMQOVcbyTuqHFcpGNE1tm-HdYGL2uojkc4PcEEfzvkFd0dbdJr7Gwt7uy_XwSAh4IncYZAYzoVb7id_o6BIj9XtWuSRICbvhhbla1Ow8yoES09jq0nCVN8Ofs8OyrBtBxBo9VkZIjmukq13EBgud43uZ404X66r323oxUb6_GTX8V5kq1ztn_WyEdywKsnX5n59yI14tm528mFKRA_NxHSS4_dyfQeNObeVWc29LUd1cHzflZioMuOc47r045CnKHvlJvUL2clRj3sRNbX_CkFCd8m-7wzTtPaMg2KBSs5w
ca.crt:     1359 bytes


访问
https://192.168.2.192:30001/#/login

安装DNS

CoreDNS是用于service做dns解析的,部署完成之后就可以通过service的名称访问service实现访问pod
CoreDNS是当前k8s的默认dns
#部署dns上传bs.yaml和coredns.yaml
[root@k8s-master1 yaml]# ls
bs.yaml  coredns.yaml 

#部署
[root@k8s-master1 yaml]# kubectl apply -f coredns.yaml 
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

#检查
[root@k8s-master1 yaml]# kubectl get pods -n kube-system
NAME                          READY   STATUS              RESTARTS   AGE
coredns-6d8cfdd59d-87b7p      0/1     ContainerCreating   0          40s
kube-flannel-ds-amd64-4cg4t   1/1     Running             0          16h
kube-flannel-ds-amd64-8phq6   1/1     Running             0          16h
kube-flannel-ds-amd64-r7lld   1/1     Running             0          16h
正在下载

#下载完成
[root@k8s-master1 yaml]# kubectl get pods -n kube-system
NAME                          READY   STATUS    RESTARTS   AGE
coredns-6d8cfdd59d-7dfjz      1/1     Running   0          3m44s
kube-flannel-ds-amd64-4cg4t   1/1     Running   0          17h
kube-flannel-ds-amd64-8phq6   1/1     Running   0          17h
kube-flannel-ds-amd64-r7lld   1/1     Running   0          17h


#创建测试pod
[root@k8s-master1 yaml]# kubectl apply -f bs.yaml 
pod/busybox created

#检查
[root@k8s-master1 yaml]# kubectl get pods -o wide
NAME                  READY   STATUS    RESTARTS   AGE     IP           NODE        NOMINATED NODE   READINESS GATES
busybox               1/1     Running   0          5m59s   10.244.2.6   k8s-node3   <none>           <none>
web-d86c95cc9-pmtxr   1/1     Running   0          17h     10.244.2.2   k8s-node3   <none>           <none>


#安装好dns插件后能在容器里面直接ping通web机器
[root@k8s-master1 yaml]# kubectl exec -it busybox sh
/ # ping web
PING web (10.0.0.252): 56 data bytes
64 bytes from 10.0.0.252: seq=0 ttl=64 time=0.135 ms
64 bytes from 10.0.0.252: seq=1 ttl=64 time=0.158 ms
64 bytes from 10.0.0.252: seq=2 ttl=64 time=0.236 ms
64 bytes from 10.0.0.252: seq=3 ttl=64 time=0.127 ms
实现pod的互通

#出现下面报错是hosts解析出错重新配置hosts解析
Error from server: error dialing backend: dial tcp: lookup
#添加hosts解析
cat >/etc/hosts<<'EOF'
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.2.190 k8s-master1
192.168.2.191 k8s-master2
192.168.2.192 k8s-node1
192.168.2.193 k8s-node2
192.168.2.194 k8s-node3
192.168.2.195 k8s-LB01  
192.168.2.196 k8s-LB02   
EOF

部署高可用的k8s-master2

#高可用的k8s-master2搭建
#在master2主机创建etcd的目录
mkdir -p /opt/etcd

#在master1上拷贝k8s到master2
scp -r /opt/kubernetes root@192.168.2.191:/opt
#在master1上拷贝证书到master节点
scp -r /opt/etcd/ssl root@192.168.2.191:/opt/etcd
#在master1上拷贝启动程序
scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@192.168.2.191:/usr/lib/systemd/system
#在matser1上拷贝kubectl命令
scp -r /usr/local/bin/kubectl root@192.168.2.191:/usr/local/bin/kubectl


#在matser2上修改配置文件的ip为自己的ip
cat >/opt/kubernetes/cfg/kube-apiserver.conf<<'EOF'
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://192.168.2.190:2379,https://192.168.2.192:2379,https://192.168.2.193:2379,https://192.168.2.194:2379 \
--bind-address=192.168.2.191 \  #修改这里的ip其他不用修改即可
--secure-port=6443 \
--advertise-address=192.168.2.191 \#修改这里的ip其他不用修改即可
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
EOF

#启动并且设置为开机启动
systemctl start kube-apiserver
systemctl start kube-controller-manager
systemctl start kube-scheduler
systemctl enable kube-apiserver
systemctl enable kube-controller-manager
systemctl enable kube-scheduler

#检查
[root@k8s-master2 ~]# ps -ef|grep kube
root       8183      1 13 15:06 ?        00:00:02 /opt/kubernetes/bin/kube-apiserver --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --etcd-servers=https://192.168.2.191:2379,https://192.168.2.192:2379,https://192.168.2.193:2379,https://192.168.2.194:2379 --bind-address=192.168.2.191 --secure-port=6443 --advertise-address=192.168.2.191 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-token-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-32767 --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log
root       8194      1 11 15:06 ?        00:00:02 /opt/kubernetes/bin/kube-controller-manager --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect=true --master=127.0.0.1:8080 --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=10.244.0.0/16 --service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root       8204      1  9 15:06 ?        00:00:02 /opt/kubernetes/bin/kube-scheduler --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect --master=127.0.0.1:8080 --address=127.0.0.1
root       8278   8043  0 15:06 pts/0    00:00:00 grep --color=auto kube

#检查nodes
[root@k8s-master2 cfg]# kubectl get node
NAME        STATUS   ROLES    AGE   VERSION
k8s-node1   Ready    <none>   23h   v1.16.0
k8s-node2   Ready    <none>   23h   v1.16.0
k8s-node3   Ready    <none>   23h   v1.16.0
检查pods
[root@k8s-master2 cfg]# kubectl get pods
NAME                  READY   STATUS    RESTARTS   AGE
busybox               1/1     Running   4          4h48m
web-d86c95cc9-pmtxr   1/1     Running   0          21h

#k8s组件全部重启
systemctl restart kube-apiserver
systemctl restart kube-controller-manager
systemctl restart kube-scheduler

#k8s组件全部停止
systemctl stop kube-apiserver
systemctl stop kube-controller-manager
systemctl stop kube-scheduler

部署LB节点负载均衡

#安装nginx
rpm -vih http://nginx.org/packages/rhel/7/x86_64/RPMS/nginx-1.16.0-1.el7.ngx.x86_64.rpm

配置nginx.conf的配置文件
cat >/etc/nginx/nginx.conf<<'EOF'
user  nginx;
worker_processes  4;

error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}

stream {

    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
                server 192.168.2.190:6443;
                server 192.168.2.191:6443;
            }
    
    server {
       listen 6443;
       proxy_pass k8s-apiserver;
    }
}


http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    keepalive_timeout  65;

    #gzip  on;

    include /etc/nginx/conf.d/*.conf;
}
EOF

#启动nginx
systemctl start nginx
systemctl enable nginx


#安装keepalived
yum install keepalived -y

#配置keepalived-master的配置文件
cat >/etc/keepalived/keepalived.conf<<'EOF'
global_defs { 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER
} 

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER 
    interface ens192
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 100    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.2.197/24
    } 
    track_script {
        check_nginx
    } 
}
EOF

#配置keepalived-slave的配置文件
cat >/etc/keepalived/keepalived.conf<<'EOF'
global_defs { 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_BACKUP
} 

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state BACKUP 
    interface ens192
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 90    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.2.197/24
    } 
    track_script {
        check_nginx
    } 
}
EOF

#keepalived的主从配置检查脚本
cat >/etc/keepalived/check_nginx.sh<<'EOF'
#!/bin/bash
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
    exit 1
else
    exit 0
fi
EOF
chmod +x /etc/keepalived/check_nginx.sh


#启动keepalivd和设置开机自启动
systemctl start keepalived
systemctl enable keepalived

#检查vip在主节点
[root@k8s-lb01 ~]# ip a|grep ens192
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    inet 192.168.2.195/24 brd 192.168.2.255 scope global noprefixroute ens192
    inet 192.168.2.197/24 scope global secondary ens192
#从节点
[root@k8s-lb02 keepalived]# ip a|grep ens192
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    inet 192.168.2.196/24 brd 192.168.2.255 scope global noprefixroute ens192


#关闭主节点的nginx
systemctl stop nginx

#检查主节点
[root@k8s-lb01 keepalived]# ip a|grep ens192
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    inet 192.168.2.195/24 brd 192.168.2.255 scope global noprefixroute ens192

#vip成功飘逸到从节点
[root@k8s-lb02 keepalived]# ip a|grep ens192
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    inet 192.168.2.196/24 brd 192.168.2.255 scope global noprefixroute ens192
    inet 192.168.2.197/24 scope global secondary ens192

#恢复nginx
[root@k8s-lb01 keepalived]# systemctl start nginx
#检查vip
[root@k8s-lb01 keepalived]# ip a|grep ens192
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    inet 192.168.2.195/24 brd 192.168.2.255 scope global noprefixroute ens192
    inet 192.168.2.197/24 scope global secondary ens192

#最后修改node链接的ip地址为vip地址实现master高可用
cd /opt/kubernetes/cfg
grep 192 *
sed -i 's#192.168.2.190#192.168.2.197#g' *
#执行命令结果
node1
[root@k8s-node1 cfg]# grep 192 *
bootstrap.kubeconfig:    server: https://192.168.2.197:6443
kubelet.kubeconfig:    server: https://192.168.2.197:6443
kube-proxy.kubeconfig:    server: https://192.168.2.197:6443
node2
[root@k8s-node2 cfg]# grep 192 *
bootstrap.kubeconfig:    server: https://192.168.2.197:6443
kubelet.kubeconfig:    server: https://192.168.2.197:6443
kube-proxy.kubeconfig:    server: https://192.168.2.197:6443
node3
[root@k8s-node3 cfg]# grep 192 *
bootstrap.kubeconfig:    server: https://192.168.2.197:6443
kubelet.kubeconfig:    server: https://192.168.2.197:6443
kube-proxy.kubeconfig:    server: https://192.168.2.197:6443


#所有node节点重启
systemctl restart kube-proxy
systemctl restart kubelet

#LB01节点观察nginx的日志出现3个node节点6条记录就正常
[root@k8s-lb01 keepalived]# tail -F /var/log/nginx/k8s-access.log 
192.168.2.192 192.168.2.190:6443 - [29/Oct/2019:17:22:13 +0800] 200 1160
192.168.2.192 192.168.2.191:6443 - [29/Oct/2019:17:22:13 +0800] 200 1161
192.168.2.193 192.168.2.191:6443 - [29/Oct/2019:17:22:17 +0800] 200 1159
192.168.2.193 192.168.2.191:6443 - [29/Oct/2019:17:22:17 +0800] 200 1160
192.168.2.194 192.168.2.190:6443 - [29/Oct/2019:17:22:20 +0800] 200 1160
192.168.2.194 192.168.2.191:6443 - [29/Oct/2019:17:22:20 +0800] 200 1160

#在k8s-master1和k8s-master2进行检查
[root@k8s-master1 ssl]# kubectl get node
NAME        STATUS     ROLES    AGE   VERSION
k8s-node1   NotReady   <none>   25h   v1.16.0
k8s-node2   Ready      <none>   25h   v1.16.0
k8s-node3   Ready      <none>   25h   v1.16.0

这里的tenkon查看的地方
[root@k8s-master1 ssl]# cat /opt/kubernetes/cfg/token.csv 
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"

#最后对api进行访问测试
[root@k8s-node1 cfg]# curl -k --header "Authorization: Bearer c47ffb939f5ca36231d9e3121a252940" https://192.168.2.197:6443/version
{
  "major": "1",
  "minor": "16",
  "gitVersion": "v1.16.0",
  "gitCommit": "2bd9643cee5b3b3a5ecbd3af49d09018f0773c77",
  "gitTreeState": "clean",
  "buildDate": "2019-09-18T14:27:17Z",
  "goVersion": "go1.12.9",
  "compiler": "gc",
  "platform": "linux/amd64"
}

并且nginx的日志是会滚动的那说明访问成功
[root@k8s-lb01 keepalived]# tail -F /var/log/nginx/k8s-access.log 
192.168.2.192 192.168.2.191:6443 - [29/Oct/2019:17:49:21 +0800] 200 476
192.168.2.192 192.168.2.191:6443 - [29/Oct/2019:17:50:22 +0800] 200 476
192.168.2.192 192.168.2.191:6443 - [29/Oct/2019:17:50:22 +0800] 200 476
192.168.2.192 192.168.2.190:6443 - [29/Oct/2019:17:50:23 +0800] 200 476
192.168.2.192 192.168.2.190:6443 - [29/Oct/2019:17:50:18 +0800] 200 476
192.168.2.192 192.168.2.190:6443 - [29/Oct/2019:17:50:19 +0800] 200 476

这里的node1节点处未就绪状态带解决

YYQ运维技术博客_运维的工作学习之路
https://www.yeyouqing.top
https://yeyouqing.top
yeyouqing.top
www.yeyouqing.top

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值