转载地址:https://blog.csdn.net/qq_37950254/article/details/95204011
<link rel="stylesheet" href="https://csdnimg.cn/release/phoenix/template/css/ck_htmledit_views-833878f763.css">
<link rel="stylesheet" href="https://csdnimg.cn/release/phoenix/template/css/ck_htmledit_views-833878f763.css">
<div class="htmledit_views" id="content_views">
<h3><a name="t0"></a><a name="t0"></a>1、安装环境准备:</h3>
部署节点说明
IP地址 | 主机名 | CPU | 内存 | 磁盘 |
---|---|---|---|---|
192.168.1.108 | qas-k8s-master01 | 2 | 2G | 30G |
192.168.1.165 | qas-k8s-node01 | 2 | 2G | 30G |
192.168.1.215 | qas-k8s-node02 | 2 | 2G | 30G |
2.下载1.15的包
地址:https://kubernetes.io/docs/setup/release/notes/
或者下载百度网盘的:链接 提取码:op7d
2.1 下载tar.gz包到服务器并解压到master节点的root目录下
-
[root@qas-k8s-master01 bin]
# pwd
-
/root/kubernetes/server/bin
-
[root@qas-k8s-master01 bin]
# ll
-
total 1570456
-
-rwxr-xr-x 1 root root 42793600 Apr 9 01:51 apiextensions-apiserver
-
-rwxr-xr-x 1 root root 100402880 Apr 9 01:51 cloud-controller-manager
-
-rw-r--r-- 1 root root 8 Apr 9 01:47 cloud-controller-manager.docker_tag
-
-rw-r--r-- 1 root root 144294400 Apr 9 01:47 cloud-controller-manager.tar
-
-rwxr-xr-x 1 root root 211183808 Apr 9 01:51 hyperkube
-
-rwxr-xr-x 1 root root 39587104 Apr 9 01:51 kubeadm
-
-rwxr-xr-x 1 root root 167554400 Apr 9 01:51 kube-apiserver
-
-rw-r--r-- 1 root root 8 Apr 9 01:47 kube-apiserver.docker_tag
-
-rw-r--r-- 1 root root 211445760 Apr 9 01:47 kube-apiserver.tar
-
-rwxr-xr-x 1 root root 115579424 Apr 9 01:51 kube-controller-manager
-
-rw-r--r-- 1 root root 8 Apr 9 01:47 kube-controller-manager.docker_tag
-
-rw-r--r-- 1 root root 159471104 Apr 9 01:47 kube-controller-manager.tar
-
-rwxr-xr-x 1 root root 43115328 Apr 9 01:51 kubectl
-
-rwxr-xr-x 1 root root 127940544 Apr 9 01:51 kubelet
-
-rwxr-xr-x 1 root root 36685440 Apr 9 01:51 kube-proxy
-
-rw-r--r-- 1 root root 8 Apr 9 01:47 kube-proxy.docker_tag
-
-rw-r--r-- 1 root root 83982848 Apr 9 01:47 kube-proxy.tar
-
-rwxr-xr-x 1 root root 39258304 Apr 9 01:51 kube-scheduler
-
-rw-r--r-- 1 root root 8 Apr 9 01:47 kube-scheduler.docker_tag
-
-rw-r--r-- 1 root root 83149824 Apr 9 01:47 kube-scheduler.tar
-
-rwxr-xr-x 1 root root 1648224 Apr 9 01:51 mounter
-
[root@qas-k8s-master01 bin]
#
二、Kubernetes 安装及配置
1、初始化环境
1.1、设置关闭防火墙及SELINUX
-
systemctl stop firewalld && systemctl
disable firewalld
-
setenforce 0
-
vi /etc/selinux/config
-
SELINUX=disabled
1.2、关闭Swap
-
swapoff -a && sysctl -w vm.swappiness=0
-
vi /etc/fstab
-
#UUID=7bff6243-324c-4587-b550-55dc34018ebf swap swap defaults 0 0
1.3、设置Docker所需参数
cat << EOF | tee /etc/sysctl.d/k8s.conf net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl -p /etc/sysctl.d/k8s.conf
报错提示不存在的话 安装这个模块
modprobe br_netfilter
1.4、安装 Docker
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo mv docker-ce.repo /etc/yum.repos.d/ yum install docker-ce -y systemctl start docker && systemctl enable docker
1.5、创建安装目录
mkdir /k8s/etcd/{bin,cfg,ssl} -p mkdir /k8s/kubernetes/{bin,cfg,ssl} -p
1.6、安装及配置CFSSL
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl mv cfssljson_linux-amd64 /usr/local/bin/cfssljson mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
1.7、创建认证证书
创建 ETCD 证书
-
cat
<< EOF | tee ca-config.json
-
{
-
"signing": {
-
"default": {
-
"expiry": "87600h"
-
},
-
"profiles": {
-
"www": {
-
"expiry": "87600h",
-
"usages": [
-
"signing",
-
"key encipherment",
-
"server auth",
-
"client auth"
-
]
-
}
-
}
-
}
-
}
-
EOF
创建 ETCD CA 配置文件
-
cat
<< EOF | tee ca-csr.json
-
{
-
"CN": "etcd CA",
-
"key": {
-
"algo": "rsa",
-
"size": 2048
-
},
-
"names": [
-
{
-
"C": "CN",
-
"L": "Shenzhen",
-
"ST": "Shenzhen"
-
}
-
]
-
}
-
EOF
创建 ETCD Server 证书
-
cat
<< EOF | tee server-csr.json
-
{
-
"CN": "etcd",
-
"hosts": [
-
"10.0.0.1",
-
"192.168.1.108",
-
"192.168.1.165",
-
"192.168.1.215"
-
],
-
"key": {
-
"algo": "rsa",
-
"size": 2048
-
},
-
"names": [
-
{
-
"C": "CN",
-
"L": "Shenzhen",
-
"ST": "Shenzhen"
-
}
-
]
-
}
-
EOF
生成 ETCD CA 证书和私钥
cfssl gencert -initca ca-csr.json | cfssljson -bare ca - cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
创建 Kubernetes CA 证书
-
cat << EOF | tee ca-config.json
-
{
-
"signing": {
-
"default": {
-
"expiry":
"87600h"
-
},
-
"profiles": {
-
"kubernetes": {
-
"expiry":
"87600h",
-
"usages": [
-
"signing",
-
"key encipherment",
-
"server auth",
-
"client auth"
-
]
-
}
-
}
-
}
-
}
-
EOF
-
cat
<< EOF | tee ca-csr.json
-
{
-
"CN": "kubernetes",
-
"key": {
-
"algo": "rsa",
-
"size": 2048
-
},
-
"names": [
-
{
-
"C": "CN",
-
"L": "Shenzhen",
-
"ST": "Shenzhen",
-
"O": "k8s",
-
"OU": "System"
-
}
-
]
-
}
-
EOF
-
-
生成证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
生成API_SERVER证书
-
cat
<< EOF | tee server-csr.json
-
{
-
"CN": "kubernetes",
-
"hosts": [
-
"192.168.1.108",
-
"192.168.1.165",
-
"192.168.1.215",
-
"kubernetes",
-
"kubernetes.default",
-
"kubernetes.default.svc",
-
"kubernetes.default.svc.cluster",
-
"kubernetes.default.svc.cluster.local"
-
],
-
"key": {
-
"algo": "rsa",
-
"size": 2048
-
},
-
"names": [
-
{
-
"C": "CN",
-
"L": "Shenzhen",
-
"ST": "Shenzhen",
-
"O": "k8s",
-
"OU": "System"
-
}
-
]
-
}
-
EOF
-
生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
创建 Kubernetes Proxy 证书
-
cat
<< EOF | tee kube-proxy-csr.json
-
{
-
"CN": "system:kube-proxy",
-
"hosts": [],
-
"key": {
-
"algo": "rsa",
-
"size": 2048
-
},
-
"names": [
-
{
-
"C": "CN",
-
"L": "Shenzhen",
-
"ST": "Shenzhen",
-
"O": "k8s",
-
"OU": "System"
-
}
-
]
-
}
-
EOF
-
-
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
1.8、 ssh-key认证
# ssh-keygen Generating public/private rsa key pair. Enter file in which to save the key (/root/.ssh/id_rsa): Created directory '/root/.ssh'. Enter passphrase (empty for no passphrase): Enter same passphrase again: Your identification has been saved in /root/.ssh/id_rsa. Your public key has been saved in /root/.ssh/id_rsa.pub. The key fingerprint is: SHA256:FQjjiRDp8IKGT+UDM+GbQLBzF3DqDJ+pKnMIcHGyO/o root@qas-k8s-master01 The key's randomart image is: +---[RSA 2048]----+ |o.==o o. .. | |ooB+o+ o. . | |B++@o o . | |=X**o . | |o=O. . S | |..+ | |oo . | |* . | |o+E | +----[SHA256]-----+ # ssh-copy-id 192.168.1.165 # ssh-copy-id 192.168.1.215
2 、部署ETCD
安装包地址:
解压安装文件
tar -xvf etcd-v3.3.10-linux-amd64.tar.gz cd etcd-v3.3.10-linux-amd64/ cp etcd etcdctl /k8s/etcd/bin/
设置etcd的配置文件
-
[root@qas-k8s-master01 ssl]
# cat /k8s/etcd/cfg/etcd
-
#[Member]
-
ETCD_NAME=
"etcd01"
-
ETCD_DATA_DIR=
"/var/lib/etcd/default.etcd"
-
ETCD_LISTEN_PEER_URLS=
"https://192.168.1.108:2380"
-
ETCD_LISTEN_CLIENT_URLS=
"https://192.168.1.108:2379"
-
-
#[Clustering]
-
ETCD_INITIAL_ADVERTISE_PEER_URLS=
"https://192.168.1.108:2380"
-
ETCD_ADVERTISE_CLIENT_URLS=
"https://192.168.1.108:2379"
-
ETCD_INITIAL_CLUSTER=
"etcd01=https://192.168.1.108:2380,etcd02=https://192.168.1.165:2380,etcd03=https://192.168.1.215:2380"
-
ETCD_INITIAL_CLUSTER_TOKEN=
"etcd-cluster"
-
ETCD_INITIAL_CLUSTER_STATE=
"new"
-
[root@qas-k8s-master01 ssl]
#
创建 etcd的 systemd unit 文件
-
[root@qas-k8s-master01 ssl]
# cat /usr/lib/systemd/system/etcd.service
-
[Unit]
-
Description=Etcd Server
-
After=network.target
-
After=network-online.target
-
Wants=network-online.target
-
-
[Service]
-
Type=notify
-
EnvironmentFile=
/k8s/etcd
/cfg/etcd
-
ExecStart=
/k8s/etcd
/bin/etcd \
-
--name=${ETCD_NAME} \
-
--data-dir=${ETCD_DATA_DIR} \
-
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
-
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},
http:/
/127.0.0.1:2379 \
-
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
-
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
-
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
-
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
-
--initial-cluster-state=new \
-
--cert-file=/k8s/etcd/ssl/server.pem \
-
--key-file=
/k8s/etcd
/ssl/server-key.pem \
-
--peer-cert-file=
/k8s/etcd
/ssl/server.pem \
-
--peer-key-file=
/k8s/etcd
/ssl/server-key.pem \
-
--trusted-ca-file=
/k8s/etcd
/ssl/ca.pem \
-
--peer-trusted-ca-file=
/k8s/etcd
/ssl/ca.pem
-
Restart=on-failure
-
LimitNOFILE=
65536
-
-
[Install]
-
WantedBy=multi-user.target
-
[root@qas-k8s-master01 ssl]
#
拷贝证书文件
cp ca*pem server*pem /k8s/etcd/ssl
启动ETCD服务
systemctl daemon-reload systemctl enable etcd systemctl start etcd
查看etcd状态
-
[root@qas-k8s-master01 ssl]
# systemctl status etcd
-
● etcd.service - Etcd Server
-
Loaded: loaded (
/usr/lib
/systemd/system
/etcd.service; enabled; vendor preset: disabled)
-
Active: active (running) since Tue 2019-07-09 15:55:58 CST; 1h 12min ago
-
Main PID: 29830 (etcd)
-
Tasks: 12
-
Memory: 66.7M
-
CGroup: /system.slice/etcd.service
-
└─
29830 /k8s/etcd/bin/etcd --name=etcd01 --data-dir=
/var/lib
/etcd/default.etcd --
listen-peer-urls=https:
//
192.168.
1.10...
-
-
Jul 09
17:
07:
39 qas-k8s-master01 etcd[
29830]: the clock difference against peer
2369b1ebd5f356e7 is too high [
1.340843978
s ...SAGE
")
-
Jul 09 17:07:39 qas-k8s-master01 etcd[29830]: the clock difference against peer 2369b1ebd5f356e7 is too high [1.343013157s ...SHOT")
-
Jul 09
17:08:
04 qas-k8s-master01 etcd[
29830]: the clock difference against peer
67ad1178efd89256 is too high [
1.287315582
s ...SHOT
")
-
Jul 09 17:08:04 qas-k8s-master01 etcd[29830]: the clock difference against peer 67ad1178efd89256 is too high [1.285244104s ...SAGE")
-
Jul 09
17:08:09 qas-k8s-master01 etcd[
29830]: the clock difference against peer
2369b1ebd5f356e7 is too high [
1.340602239
s ...SAGE
")
-
Jul 09 17:08:09 qas-k8s-master01 etcd[29830]: the clock difference against peer 2369b1ebd5f356e7 is too high [1.342891162s ...SHOT")
-
Jul 09
17:08:
34 qas-k8s-master01 etcd[
29830]: the clock difference against peer
67ad1178efd89256 is too high [
1.285259365
s ...SAGE
")
-
Jul 09 17:08:34 qas-k8s-master01 etcd[29830]: the clock difference against peer 67ad1178efd89256 is too high [1.287337806s ...SHOT")
-
Jul 09
17:08:
39 qas-k8s-master01 etcd[
29830]: the clock difference against peer
2369b1ebd5f356e7 is too high [
1.342999378
s ...SHOT
")
-
Jul 09 17:08:39 qas-k8s-master01 etcd[29830]: the clock difference against peer 2369b1ebd5f356e7 is too high [1.339802949s ...SAGE")
-
Hint: Some lines were ellipsized,
use -l to show in full.
-
[root@qas-k8s-master01 ssl]
#
将启动文件、配置文件拷贝到 节点1、节点2
cd /k8s/ scp -r etcd 192.168.1.165:/k8s/ scp -r etcd 192.168.1.215:/k8s/ scp /usr/lib/systemd/system/etcd.service 192.168.1.165:/usr/lib/systemd/system/etcd.service scp /usr/lib/systemd/system/etcd.service 192.168.1.215:/usr/lib/systemd/system/etcd.service
node01节点修改
-
[root@qas-k8s-node01 cfg]
# cat /k8s/etcd/cfg/etcd
-
#[Member]
-
ETCD_NAME=
"etcd02"
-
ETCD_DATA_DIR=
"/var/lib/etcd/default.etcd"
-
ETCD_LISTEN_PEER_URLS=
"https://192.168.1.165:2380"
-
ETCD_LISTEN_CLIENT_URLS=
"https://192.168.1.165:2379"
-
-
#[Clustering]
-
ETCD_INITIAL_ADVERTISE_PEER_URLS=
"https://192.168.1.165:2380"
-
ETCD_ADVERTISE_CLIENT_URLS=
"https://192.168.1.165:2379"
-
ETCD_INITIAL_CLUSTER=
"etcd01=https://192.168.1.108:2380,etcd02=https://192.168.1.165:2380,etcd03=https://192.168.1.215:2380"
-
ETCD_INITIAL_CLUSTER_TOKEN=
"etcd-cluster"
-
ETCD_INITIAL_CLUSTER_STATE=
"new"
-
[root@qas-k8s-node01 cfg]
#
node02节点修改
-
[root@qas-k8s-node02 bin]
# cat /k8s/etcd/cfg/etcd
-
#[Member]
-
ETCD_NAME=
"etcd03"
-
ETCD_DATA_DIR=
"/var/lib/etcd/default.etcd"
-
ETCD_LISTEN_PEER_URLS=
"https://192.168.1.215:2380"
-
ETCD_LISTEN_CLIENT_URLS=
"https://192.168.1.215:2379"
-
-
#[Clustering]
-
ETCD_INITIAL_ADVERTISE_PEER_URLS=
"https://192.168.1.215:2380"
-
ETCD_ADVERTISE_CLIENT_URLS=
"https://192.168.1.215:2379"
-
ETCD_INITIAL_CLUSTER=
"etcd01=https://192.168.1.108:2380,etcd02=https://192.168.1.165:2380,etcd03=https://192.168.1.215:2380"
-
ETCD_INITIAL_CLUSTER_TOKEN=
"etcd-cluster"
-
ETCD_INITIAL_CLUSTER_STATE=
"new"
-
[root@qas-k8s-node02 bin]
#
验证集群是否正常运行
./etcdctl \ --ca-file=/k8s/etcd/ssl/ca.pem \ --cert-file=/k8s/etcd/ssl/server.pem \ --key-file=/k8s/etcd/ssl/server-key.pem \ --endpoints="https://192.168.1.108,\ https://192.168.1.165:2379,\ https://192.168.1.215:2379" cluster-health 注意: 启动ETCD集群同时启动二个节点,启动一个节点集群是无法正常启动的;
返回以下表示ok
member 5db3ea816863435 is healthy: got healthy result from https://192.168.1.108:2379 member 991b5845cecb31b is healthy: got healthy result from https://192.168.1.165:2379 member c67ee2780d64a0d4 is healthy: got healthy result from https://192.168.1.215:2379 cluster is healthy
3、部署Flannel网络
向 etcd 写入集群 Pod 网段信息
cd /k8s/etcd/ssl/ /k8s/etcd/bin/etcdctl \ --ca-file=ca.pem --cert-file=server.pem \ --key-file=server-key.pem \ --endpoints="https://192.168.1.108:2379,\ https://192.168.1.165:2379,https://192.168.1.215:2379" \ set /coreos.com/network/config '{ "Network": "172.18.0.0/16", "Backend": {"Type": "vxlan"}}'
- flanneld 当前版本 (v0.10.0) 不支持 etcd v3,故使用 etcd v2 API 写入配置 key 和网段数据;
- 写入的 Pod 网段 ${CLUSTER_CIDR} 必须是 /16 段地址,必须与 kube-controller-manager 的 –cluster-cidr 参数值一致;
解压安装
tar -xvf flannel-v0.10.0-linux-amd64.tar.gz mv flanneld mk-docker-opts.sh /k8s/kubernetes/bin/
配置Flannel
-
[root@qas-k8s-master01 ssl]
# cat /k8s/kubernetes/cfg/flanneld
-
FLANNEL_OPTIONS=
"--etcd-endpoints=https://192.168.1.108:2379,https://192.168.1.165:2379,https://192.168.1.215:2379 -etcd-cafile=/k8s/etcd/ssl/ca.pem -etcd-certfile=/k8s/etcd/ssl/server.pem -etcd-keyfile=/k8s/etcd/ssl/server-key.pem"
-
[root@qas-k8s-master01 ssl]
#
创建 flanneld 的 systemd unit 文件
-
[root@qas-k8s-master01 ssl]
# cat /usr/lib/systemd/system/flanneld.service
-
[Unit]
-
Description=Flanneld overlay address etcd agent
-
After=network-online.target network.target
-
Before=docker.service
-
-
[Service]
-
Type=notify
-
EnvironmentFile=/k8s/kubernetes/cfg/flanneld
-
ExecStart=/k8s/kubernetes/bin/flanneld --ip-masq
$FLANNEL_OPTIONS
-
ExecStartPost=/k8s/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
-
Restart=on-failure
-
-
[Install]
-
WantedBy=multi-user.target
-
[root@qas-k8s-master01 ssl]
#
- mk-docker-opts.sh 脚本将分配给 flanneld 的 Pod 子网网段信息写入 /run/flannel/docker 文件,后续 docker 启动时 使用这个文件中的环境变量配置 docker0 网桥;
- flanneld 使用系统缺省路由所在的接口与其它节点通信,对于有多个网络接口(如内网和公网)的节点,可以用 -iface 参数指定通信接口,如上面的 eth0 接口;
- flanneld 运行时需要 root 权限;
配置Docker启动指定子网段
-
[root@qas-k8s-master01 ssl]
# cat /usr/lib/systemd/system/docker.service
-
[Unit]
-
Description=Docker Application Container Engine
-
Documentation=https://docs.docker.com
-
After=network-online.target firewalld.service
-
Wants=network-online.target
-
-
[Service]
-
Type=notify
-
EnvironmentFile=/run/flannel/subnet.env
-
ExecStart=/usr/bin/dockerd
$DOCKER_NETWORK_OPTIONS
-
ExecReload=/bin/
kill -s HUP
$MAINPID
-
LimitNOFILE=infinity
-
LimitNPROC=infinity
-
LimitCORE=infinity
-
TimeoutStartSec=0
-
Delegate=yes
-
KillMode=process
-
Restart=on-failure
-
StartLimitBurst=3
-
StartLimitInterval=60s
-
-
[Install]
-
WantedBy=multi-user.target
-
[root@qas-k8s-master01 ssl]
#
将flanneld systemd unit 文件到所有节点
cd /k8s/ scp -r kubernetes 192.168.1.165:/k8s/ scp -r kubernetes 192.168.1.215:/k8s/ scp /k8s/kubernetes/cfg/flanneld 192.168.1.165:/k8s/kubernetes/cfg/flanneld scp /k8s/kubernetes/cfg/flanneld 192.168.1.215:/k8s/kubernetes/cfg/flanneld scp /usr/lib/systemd/system/docker.service 192.168.1.165:/usr/lib/systemd/system/docker.service scp /usr/lib/systemd/system/docker.service 192.168.1.215:/usr/lib/systemd/system/docker.service scp /usr/lib/systemd/system/flanneld.service 192.168.1.165:/usr/lib/systemd/system/flanneld.service scp /usr/lib/systemd/system/flanneld.service 192.168.1.215:/usr/lib/systemd/system/flanneld.service
启动服务
-
systemctl daemon-reload
-
systemctl
start flanneld
-
systemctl
enable flanneld
-
systemctl restart docker
查看node01服务
-
[root@qas-k8s-node01 cfg]# systemctl status flanneld
-
● flanneld.service - Flanneld overlay address etcd agent
-
Loaded: loaded (/usr/lib/systemd/system/flanneld.service; enabled; vendor preset: disabled)
-
Active: active (running) since Tue
2019
-07
-09
14:
51:
52 +
08;
2h
4min ago
-
Main PID:
2191 (flanneld)
-
Tasks:
10
-
Memory:
14.8M
-
CGroup: /system.slice/flanneld.service
-
└─
2191 /k8s/kubernetes/bin/flanneld --ip-masq --etcd-endpoints=https:
//192.168.1.108:2379,https://192.168.1.165:2379,h...
-
-
Jul
09
14:
51:
52 qas-k8s-node01 flanneld[
2191]: I0709
14:
51:
52.577551
2191 iptables.
go:
167] Deleting iptables rule: ! -s ...RETURN
-
Jul
09
14:
51:
52 qas-k8s-node01 flanneld[
2191]: I0709
14:
51:
52.578182
2191 iptables.
go:
167] Deleting iptables rule: ! -s ...UERADE
-
Jul
09
14:
51:
52 qas-k8s-node01 flanneld[
2191]: I0709
14:
51:
52.578982
2191 iptables.
go:
167] Deleting iptables rule: -d
17...ACCEPT
-
Jul
09
14:
51:
52 qas-k8s-node01 flanneld[
2191]: I0709
14:
51:
52.579680
2191 iptables.
go:
155] Adding iptables rule: -s
172....ACCEPT
-
Jul
09
14:
51:
52 qas-k8s-node01 flanneld[
2191]: I0709
14:
51:
52.580578
2191 iptables.
go:
155] Adding iptables rule: -s
172....RETURN
-
Jul
09
14:
51:
52 qas-k8s-node01 flanneld[
2191]: I0709
14:
51:
52.582778
2191 iptables.
go:
155] Adding iptables rule: -s
172....UERADE
-
Jul
09
14:
51:
52 qas-k8s-node01 flanneld[
2191]: I0709
14:
51:
52.583542
2191 iptables.
go:
155] Adding iptables rule: -d
172....ACCEPT
-
Jul
09
14:
51:
52 qas-k8s-node01 flanneld[
2191]: I0709
14:
51:
52.584722
2191 iptables.
go:
155] Adding iptables rule: ! -s
17...RETURN
-
Jul
09
14:
51:
52 qas-k8s-node01 flanneld[
2191]: I0709
14:
51:
52.587207
2191 iptables.
go:
155] Adding iptables rule: ! -s
17...UERADE
-
Jul
09
14:
51:
52 qas-k8s-node01 systemd[
1]: Started Flanneld overlay address etcd agent.
-
Hint: Some lines were ellipsized, use -l to show in full.
-
[root@qas-k8s-node01 cfg]#
查看node02服务
-
[root@qas-k8s-node02 bin]# systemctl status flanneld
-
● flanneld.service - Flanneld overlay address etcd agent
-
Loaded: loaded (/usr/lib/systemd/system/flanneld.service; enabled; vendor preset: disabled)
-
Active: active (running) since Tue
2019
-07
-09
14:
51:
57 CST;
2h
5min ago
-
Main PID:
12986 (flanneld)
-
Tasks:
11
-
Memory:
10.2M
-
CGroup: /system.slice/flanneld.service
-
└─
12986 /k8s/kubernetes/bin/flanneld --ip-masq --etcd-endpoints=https:
//192.168.1.108:2379,https://192...
-
-
Jul
09
14:
51:
56 qas-k8s-node02 flanneld[
12986]: I0709
14:
51:
56.992347
12986 iptables.
go:
167] Deleting iptab...CEPT
-
Jul
09
14:
51:
56 qas-k8s-node02 flanneld[
12986]: I0709
14:
51:
56.992940
12986 iptables.
go:
155] Adding iptable...TURN
-
Jul
09
14:
51:
56 qas-k8s-node02 flanneld[
12986]: I0709
14:
51:
56.994247
12986 iptables.
go:
155] Adding iptable...RADE
-
Jul
09
14:
51:
56 qas-k8s-node02 flanneld[
12986]: I0709
14:
51:
56.995163
12986 iptables.
go:
167] Deleting iptab...CEPT
-
Jul
09
14:
51:
56 qas-k8s-node02 flanneld[
12986]: I0709
14:
51:
56.995931
12986 main.
go:
429] Waiting
for
22h59m...ease
-
Jul
09
14:
51:
57 qas-k8s-node02 flanneld[
12986]: I0709
14:
51:
56.999655
12986 iptables.
go:
155] Adding iptable...TURN
-
Jul
09
14:
51:
57 qas-k8s-node02 flanneld[
12986]: I0709
14:
51:
57.000668
12986 iptables.
go:
155] Adding iptable...CEPT
-
Jul
09
14:
51:
57 qas-k8s-node02 flanneld[
12986]: I0709
14:
51:
57.004930
12986 iptables.
go:
155] Adding iptable...RADE
-
Jul
09
14:
51:
57 qas-k8s-node02 flanneld[
12986]: I0709
14:
51:
57.005116
12986 iptables.
go:
155] Adding iptable...CEPT
-
Jul
09
14:
51:
57 qas-k8s-node02 systemd[
1]: Started Flanneld overlay address etcd agent.
-
Hint: Some lines were ellipsized, use -l to show in full.
查看是否生效
-
[root@qas-k8s-node02 bin]
# ip add
-
1: lo: <LOOPBACK,UP,LOWER_UP> mtu
65536 qdisc noqueue
state UNKNOWN group default qlen
1000
-
link/loopback
00:
00:
00:
00:
00:
00 brd
00:
00:
00:
00:
00:
00
-
inet
127.0.
0.
1/
8 scope host lo
-
valid_lft forever preferred_lft forever
-
inet6 ::
1/
128 scope host
-
valid_lft forever preferred_lft forever
-
2: enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu
1500 qdisc pfifo_fast
state UP group default qlen
1000
-
link/ether 08:
00:
27:a
0:
69:
83 brd ff:ff:ff:ff:ff:ff
-
inet
192.168.
1.215/
24 brd
192.168.
1.255 scope global noprefixroute enp0s3
-
valid_lft forever preferred_lft forever
-
inet6 fe8
0::
1fd3:aade:
96a5:e5b4/
64 scope
link noprefixroute
-
valid_lft forever preferred_lft forever
-
3: docker
0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu
1500 qdisc noqueue
state DOWN group default
-
link/ether
02:
42:
68:
7c:ef:e8 brd ff:ff:ff:ff:ff:ff
-
inet
172.18.
88.1/
24 brd
172.18.
88.255 scope global docker
0
-
valid_lft forever preferred_lft forever
-
4: flannel.
1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu
1450 qdisc noqueue
state UNKNOWN group default
-
link/ether ee:
7c:
89:a4:
9e:
20 brd ff:ff:ff:ff:ff:ff
-
inet
172.18.
88.0/
32 scope global flannel.
1
-
valid_lft forever preferred_lft forever
-
inet6 fe8
0::ec7c:
89ff:fea4:
9e2
0/
64 scope
link
-
valid_lft forever preferred_lft forever
-
[root@qas-k8s-node02 bin]
#
4、部署 master 节点
kubernetes master 节点运行如下组件:
- kube-apiserver
- kube-scheduler
- kube-controller-manager
kube-scheduler 和 kube-controller-manager 可以以集群模式运行,通过 leader 选举产生一个工作进程,其它进程处于阻塞模式。
将二进制文件解压拷贝到master 节点
tar -xvf kubernetes-server-linux-amd64.tar.gz cd kubernetes/server/bin/ cp kube-scheduler kube-apiserver kube-controller-manager kubectl /k8s/kubernetes/bin/
拷贝认证
cp *pem /k8s/kubernetes/ssl/
部署 kube-apiserver 组件
创建 TLS Bootstrapping Token
# head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 2366a641f656a0a025abb4aabda4511b vim /k8s/kubernetes/cfg/token.csv 2366a641f656a0a025abb4aabda4511b,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
创建apiserver配置文件
-
[root@qas-k8s-master01 ssl]
# cat /k8s/kubernetes/cfg/kube-apiserver
-
KUBE_APISERVER_OPTS=
"--logtostderr=true \
-
--v=4 \
-
--etcd-servers=https://192.168.1.108:2379,https://192.168.1.165:2379,https://192.168.1.215:2379 \
-
--bind-address=192.168.1.108 \
-
--secure-port=6443 \
-
--advertise-address=192.168.1.108 \
-
--allow-privileged=true \
-
--service-cluster-ip-range=10.0.0.0/24 \
-
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
-
--authorization-mode=RBAC,Node \
-
--enable-bootstrap-token-auth \
-
--token-auth-file=/k8s/kubernetes/cfg/token.csv \
-
--service-node-port-range=30000-50000 \
-
--tls-cert-file=/k8s/kubernetes/ssl/server.pem \
-
--tls-private-key-file=/k8s/kubernetes/ssl/server-key.pem \
-
--client-ca-file=/k8s/kubernetes/ssl/ca.pem \
-
--service-account-key-file=/k8s/kubernetes/ssl/ca-key.pem \
-
--etcd-cafile=/k8s/etcd/ssl/ca.pem \
-
--etcd-certfile=/k8s/etcd/ssl/server.pem \
-
--etcd-keyfile=/k8s/etcd/ssl/server-key.pem"
-
[root@qas-k8s-master01 ssl]
#
创建 kube-apiserver systemd unit 文件
-
[
root@qas-k8s-master01 ssl]
# cat /usr/lib/systemd/system/kube-apiserver.service
-
[
Unit]
-
Description=Kubernetes API Server
-
Documentation=https:
//github.com/kubernetes/kubernetes
-
-
[
Service]
-
EnvironmentFile=-/k8s/kubernetes/cfg/kube-apiserver
-
ExecStart=/k8s/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
-
Restart=
on-failure
-
-
[
Install]
-
WantedBy=multi-user.target
-
[
root@qas-k8s-master01 ssl]
#
启动服务
systemctl daemon-reload systemctl enable kube-apiserver systemctl restart kube-apiserver
查看apiserver是否运行
-
[root@qas-k8s-master01 ssl]# systemctl status kube-apiserver
-
● kube-apiserver.service - Kubernetes API Server
-
Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
-
Active: active (running) since Tue
2019
-07
-09
15:
56:
17 CST;
1h
5min ago
-
Docs: https:
//github.com/kubernetes/kubernetes
-
Main PID:
29887 (kube-apiserver)
-
Tasks:
11
-
Memory:
294.0M
-
CGroup: /system.slice/kube-apiserver.service
-
└─
29887 /k8s/kubernetes/bin/kube-apiserver --logtostderr=
true --v=
4 --etcd-servers=https:
//192.168.1.108:2379,https://...
-
-
Jul
09
17:
01:
24 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
24.393003
29887 wrap.
go:
47] GET /api/v1/namespaces/def..
.6070]
-
Jul
09
17:
01:
24 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
24.429849
29887 wrap.
go:
47] GET /api/v1/namespaces/kub..
.0182]
-
Jul
09
17:
01:
24 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
24.433622
29887 wrap.
go:
47] PUT /api/v1/namespaces/kub..
.0182]
-
Jul
09
17:
01:
25 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
25.392111
29887 wrap.
go:
47] GET /api/v1/namespaces/kub..
.1694]
-
Jul
09
17:
01:
25 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
25.399618
29887 wrap.
go:
47] PUT /api/v1/namespaces/kub..
.1694]
-
Jul
09
17:
01:
26 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
26.270688
29887 wrap.
go:
47] GET /apis/coordination.k8s..
.3914]
-
Jul
09
17:
01:
26 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
26.275799
29887 wrap.
go:
47] PUT /apis/coordination.k8s..
.3914]
-
Jul
09
17:
01:
26 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
26.283494
29887 wrap.
go:
47] GET /api/v1/nodes: (
2.0970..
.0182]
-
Jul
09
17:
01:
26 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
26.461482
29887 wrap.
go:
47] GET /api/v1/namespaces/kub..
.0182]
-
Jul
09
17:
01:
26 qas-k8s-master01 kube-apiserver[
29887]: I0709
17:
01:
26.466095
29887 wrap.
go:
47] PUT /api/v1/namespaces/kub..
.0182]
-
Hint: Some lines were ellipsized, use -l to show in full.
-
[root@qas-k8s-master01 ssl]#
部署kube-scheduler
创建kube-scheduler配置文件
-
[root@qas-k8s-master01 ssl]
# cat /k8s/kubernetes/cfg/kube-scheduler
-
KUBE_SCHEDULER_OPTS=
"--logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect"
-
[root@qas-k8s-master01 ssl]
#
创建kube-scheduler systemd unit 文件
-
[root@qas-k8s-master01 ssl]
# cat /usr/lib/systemd/system/kube-scheduler.service
-
[Unit]
-
Description=Kubernetes Scheduler
-
Documentation=https://github.com/kubernetes/kubernetes
-
-
[Service]
-
EnvironmentFile=-/k8s/kubernetes/cfg/kube-scheduler
-
ExecStart=/k8s/kubernetes/bin/kube-scheduler
$KUBE_SCHEDULER_OPTS
-
Restart=on-failure
-
-
[Install]
-
WantedBy=multi-user.target
-
[root@qas-k8s-master01 ssl]
#
启动服务
systemctl daemon-reload systemctl enable kube-scheduler.service systemctl restart kube-scheduler.service
查看kube-scheduler是否运行
-
[root@qas-k8s-master01 ssl]# systemctl status kube-scheduler.service
-
● kube-scheduler.service - Kubernetes Scheduler
-
Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
-
Active: active (running) since Tue
2019
-07
-09
15:
56:
35 CST;
1h
7min ago
-
Docs: https:
//github.com/kubernetes/kubernetes
-
Main PID:
29993 (kube-scheduler)
-
Tasks:
10
-
Memory:
43.1M
-
CGroup: /system.slice/kube-scheduler.service
-
└─
29993 /k8s/kubernetes/bin/kube-scheduler --logtostderr=
true --v=
4 --master=
127.0
.0
.1:
8080 --leader-elect
-
-
Jul
09
16:
56:
04 qas-k8s-master01 kube-scheduler[
29993]: I0709
16:
56:
04.018734
29993 reflector.
go:
385] k8s.io/client-
go/inf...eived
-
Jul
09
16:
56:
39 qas-k8s-master01 kube-scheduler[
29993]: I0709
16:
56:
39.010296
29993 reflector.
go:
385] k8s.io/client-
go/inf...eived
-
Jul
09
16:
58:
58 qas-k8s-master01 kube-scheduler[
29993]: I0709
16:
58:
58.041986
29993 reflector.
go:
385] k8s.io/client-
go/inf...eived
-
Jul
09
16:
59:
43 qas-k8s-master01 kube-scheduler[
29993]: I0709
16:
59:
43.994356
29993 reflector.
go:
385] k8s.io/client-
go/inf...eived
-
Jul
09
17:
00:
24 qas-k8s-master01 kube-scheduler[
29993]: I0709
17:
00:
24.022221
29993 reflector.
go:
385] k8s.io/client-
go/inf...eived
-
Jul
09
17:
00:
26 qas-k8s-master01 kube-scheduler[
29993]: I0709
17:
00:
26.001668
29993 reflector.
go:
385] k8s.io/client-
go/inf...eived
-
Jul
09
17:
00:
45 qas-k8s-master01 kube-scheduler[
29993]: I0709
17:
00:
45.012870
29993 reflector.
go:
385] k8s.io/client-
go/inf...eived
-
Jul
09
17:
02:
49 qas-k8s-master01 kube-scheduler[
29993]: I0709
17:
02:
49.017949
29993 reflector.
go:
385] k8s.io/client-
go/inf...eived
-
Jul
09
17:
03:
00 qas-k8s-master01 kube-scheduler[
29993]: I0709
17:
03:
00.021114
29993 reflector.
go:
385] k8s.io/kubernetes/cm...eived
-
Jul
09
17:
03:
20 qas-k8s-master01 kube-scheduler[
29993]: I0709
17:
03:
20.002870
29993 reflector.
go:
385] k8s.io/client-
go/inf...eived
-
Hint: Some lines were ellipsized, use -l to show in full.
-
[root@qas-k8s-master01 ssl]#
部署kube-controller-manager
创建kube-controller-manager配置文件
-
[root@qas-k8s-master01 ssl]# cat /k8s/kubernetes/cfg/kube-controller-manager
-
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
-
--v=4 \
-
--master=127.0.0.1:8080 \
-
--leader-elect=true \
-
--address=127.0.0.1 \
-
--service-cluster-ip-range=10.0.0.0/24 \
-
--cluster-name=kubernetes \
-
--cluster-signing-cert-file=/k8s/kubernetes/ssl/ca.pem \
-
--cluster-signing-key-file=/k8s/kubernetes/ssl/ca-key.pem \
-
--root-ca-file=/k8s/kubernetes/ssl/ca.pem \
-
--service-account-private-key-file=/k8s/kubernetes/ssl/ca-key.pem"
-
[root@qas-k8s-master01 ssl]#
创建kube-controller-manager systemd unit 文件
-
[
root@qas-k8s-master01 ssl]
# cat /usr/lib/systemd/system/kube-controller-manager.service
-
[
Unit]
-
Description=Kubernetes Controller Manager
-
Documentation=https:
//github.com/kubernetes/kubernetes
-
-
[
Service]
-
EnvironmentFile=-/k8s/kubernetes/cfg/kube-controller-manager
-
ExecStart=/k8s/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
-
Restart=
on-failure
-
-
[
Install]
-
WantedBy=multi-user.target
-
[
root@qas-k8s-master01 ssl]
#
启动服务
systemctl daemon-reload systemctl enable kube-controller-manager systemctl restart kube-controller-manager
查看kube-controller-manager是否运行
-
[root@qas-k8s-master01 ssl]# systemctl status kube-controller-manager
-
● kube-controller-manager.service - Kubernetes Controller Manager
-
Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
-
Active: active (running) since Tue
2019
-07
-09
15:
56:
45 CST;
1h
8min ago
-
Docs: https:
//github.com/kubernetes/kubernetes
-
Main PID:
30021 (kube-controller)
-
Tasks:
9
-
Memory:
132.4M
-
CGroup: /system.slice/kube-controller-manager.service
-
└─
30021 /k8s/kubernetes/bin/kube-controller-manager --logtostderr=
true --v=
4 --master=
127.0
.0
.1:
8080 --leader-elect=tr...
-
-
Jul
09
17:
05:
04 qas-k8s-master01 kube-controller-manager[
30021]: I0709
17:
05:
04.041136
30021 request.
go:
530] Throttling req...=
32s
-
Jul
09
17:
05:
04 qas-k8s-master01 kube-controller-manager[
30021]: I0709
17:
05:
04.085887
30021 request.
go:
530] Throttling req...=
32s
-
Jul
09
17:
05:
04 qas-k8s-master01 kube-controller-manager[
30021]: I0709
17:
05:
04.961864
30021 reflector.
go:
243] k8s.io/clien...sync
-
Jul
09
17:
05:
05 qas-k8s-master01 kube-controller-manager[
30021]: I0709
17:
05:
05.038589
30021 reflector.
go:
243] k8s.io/clien...sync
-
Jul
09
17:
05:
05 qas-k8s-master01 kube-controller-manager[
30021]: I0709
17:
05:
05.421412
30021 reflector.
go:
243] k8s.io/clien...sync
-
Jul
09
17:
05:
05 qas-k8s-master01 kube-controller-manager[
30021]: I0709
17:
05:
05.564957
30021 pv_controller_base.
go:
419] res...ller
-
Jul
09
17:
05:
06 qas-k8s-master01 kube-controller-manager[
30021]: I0709
17:
05:
06.380376
30021 gc_controller.
go:
144] GC
'ing orphaned
-
Jul 09 17:05:06 qas-k8s-master01 kube-controller-manager[30021]: I0709 17:05:06.383448 30021 gc_controller.go:173] GC'ing u...ing.
-
Jul
09
17:
05:
07 qas-k8s-master01 kube-controller-manager[
30021]: I0709
17:
05:
07.419696
30021 cronjob_controller.
go:
128] Fou...jobs
-
Jul
09
17:
05:
07 qas-k8s-master01 kube-controller-manager[
30021]: I0709
17:
05:
07.419707
30021 cronjob_controller.
go:
134] Fou...oups
-
Hint: Some lines were ellipsized, use -l to show in full.
-
[root@qas-k8s-master01 ssl]#
将可执行文件路/k8s/kubernetes/ 添加到 PATH 变量中
vim /etc/profile PATH=/k8s/kubernetes/bin:$PATH:$HOME/bin source /etc/profile
查看master集群状态
-
[root@qas-k8s-master01 ssl]
# kubectl get cs,nodes
-
NAME STATUS MESSAGE ERROR
-
componentstatus/controller-manager Healthy ok
-
componentstatus/scheduler Healthy ok
-
componentstatus/etcd-
2 Healthy {
"health":
"true"}
-
componentstatus/etcd-
1 Healthy {
"health":
"true"}
-
componentstatus/etcd-
0 Healthy {
"health":
"true"}
-
-
[root@qas-k8s-master01 ssl]
#
5、部署node 节点
kubernetes work 节点运行如下组件:
- docker 前面已经部署
- kubelet
- kube-proxy
部署 kubelet 组件
- kublet 运行在每个 worker 节点上,接收 kube-apiserver 发送的请求,管理 Pod 容器,执行交互式命令,如exec、run、logs 等;
- kublet 启动时自动向 kube-apiserver 注册节点信息,内置的 cadvisor 统计和监控节点的资源使用情况;
- 为确保安全,本文档只开启接收 https 请求的安全端口,对请求进行认证和授权,拒绝未授权的访问(如apiserver、heapster)。
将kubelet 二进制文件拷贝node节点
cp kubelet kube-proxy /k8s/kubernetes/bin/ scp kubelet kube-proxy 192.168.1.165:/k8s/kubernetes/bin/ scp kubelet kube-proxy 192.168.1.215:/k8s/kubernetes/bin/
创建 kubelet bootstrap kubeconfig 文件
此脚本在/k8s/kubernetes/ssl下创建
vim environment.sh # 创建kubelet bootstrapping kubeconfig BOOTSTRAP_TOKEN=2366a641f656a0a025abb4aabda4511b KUBE_APISERVER="https://192.168.1.108:6443" # 设置集群参数 kubectl config set-cluster kubernetes \ --certificate-authority=./ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=bootstrap.kubeconfig # 设置客户端认证参数 kubectl config set-credentials kubelet-bootstrap \ --token=${BOOTSTRAP_TOKEN} \ --kubeconfig=bootstrap.kubeconfig # 设置上下文参数 kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=bootstrap.kubeconfig # 设置默认上下文 kubectl config use-context default --kubeconfig=bootstrap.kubeconfig #---------------------- # 创建kube-proxy kubeconfig文件 kubectl config set-cluster kubernetes \ --certificate-authority=./ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy \ --client-certificate=./kube-proxy.pem \ --client-key=./kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
运行脚本
-
[root@qas-k8s-master01 ssl]
# pwd
-
/k8s/kubernetes/ssl
-
[root@qas-k8s-master01 ssl]
# ./environment.sh
将bootstrap kubeconfig kube-proxy.kubeconfig 文件拷贝到所有 nodes节点
cp bootstrap.kubeconfig kube-proxy.kubeconfig /k8s/kubernetes/cfg/ scp bootstrap.kubeconfig kube-proxy.kubeconfig 192.168.1.165:/k8s/kubernetes/cfg/ scp bootstrap.kubeconfig kube-proxy.kubeconfig 192.168.1.215:/k8s/kubernetes/cfg/
创建kubelet 参数配置文件拷贝到所有 nodes节点
创建 kubelet 参数配置模板文件:
-
[root@qas-k8s-master01 ssl]
# cat /k8s/kubernetes/cfg/kubelet.config
-
kind: KubeletConfiguration
-
apiVersion: kubelet.config.k8s.io/v1beta1
-
address: 192.168.1.108
-
port: 10250
-
readOnlyPort: 10255
-
cgroupDriver: cgroupfs
-
clusterDNS: [
"10.0.0.2"]
-
clusterDomain: cluster.local.
-
failSwapOn:
false
-
authentication:
-
anonymous:
-
enabled:
true
-
[root@qas-k8s-master01 ssl]
#
创建kubelet配置文件
-
[root@qas-k8s-master01 ssl]
# cat /k8s/kubernetes/cfg/kubelet
-
KUBELET_OPTS=
"--logtostderr=true \
-
--v=4 \
-
--hostname-override=192.168.1.108 \
-
--kubeconfig=/k8s/kubernetes/cfg/kubelet.kubeconfig \
-
--bootstrap-kubeconfig=/k8s/kubernetes/cfg/bootstrap.kubeconfig \
-
--config=/k8s/kubernetes/cfg/kubelet.config \
-
--cert-dir=/k8s/kubernetes/ssl \
-
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
-
[root@qas-k8s-master01 ssl]
#
创建kubelet systemd unit 文件
-
[root@qas-k8s-master01 ssl]
# cat /usr/lib/systemd/system/kubelet.service
-
[Unit]
-
Description=Kubernetes Kubelet
-
After=docker.service
-
Requires=docker.service
-
-
[Service]
-
EnvironmentFile=
/k8s/kubernetes
/cfg/kubelet
-
ExecStart=
/k8s/kubernetes
/bin/kubelet $KUBELET_OPTS
-
Restart=on-failure
-
KillMode=process
-
-
[Install]
-
WantedBy=multi-user.target
-
[root@qas-k8s-master01 ssl]
#
将kubelet-bootstrap用户绑定到系统集群角色
kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper \ --user=kubelet-bootstrap
启动服务
systemctl daemon-reload systemctl enable kubelet systemctl restart kubelet
approve kubelet CSR 请求
可以手动或自动 approve CSR 请求。推荐使用自动的方式,因为从 v1.8 版本开始,可以自动轮转approve csr 后生成的证书。
手动 approve CSR 请求
查看 CSR 列表:
# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-An1VRgJ7FEMMF_uyy6iPjyF5ahuLx6tJMbk2SMthwLs 39m kubelet-bootstrap Pending node-csr-dWPIyP_vD1w5gBS4iTZ6V5SJwbrdMx05YyybmbW3U5s 5m5s kubelet-bootstrap Pending # kubectl certificate approve node-csr-An1VRgJ7FEMMF_uyy6iPjyF5ahuLx6tJMbk2SMthwLs certificatesigningrequest.certificates.k8s.io/node-csr-An1VRgJ7FEMMF_uyy6iPjyF5ahuLx6tJMbk2SMthwLs # kubectl certificate approve node-csr-dWPIyP_vD1w5gBS4iTZ6V5SJwbrdMx05YyybmbW3U5s certificatesigningrequest.certificates.k8s.io/node-csr-dWPIyP_vD1w5gBS4iTZ6V5SJwbrdMx05YyybmbW3U5s approved # kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-An1VRgJ7FEMMF_uyy6iPjyF5ahuLx6tJMbk2SMthwLs 41m kubelet-bootstrap Approved,Issued node-csr-dWPIyP_vD1w5gBS4iTZ6V5SJwbrdMx05YyybmbW3U5s 7m32s kubelet-bootstrap Approved,Issued
- Requesting User:请求 CSR 的用户,kube-apiserver 对它进行认证和授权;
- Subject:请求签名的证书信息;
- 证书的 CN 是 system:node:kube-node2, Organization 是 system:nodes,kube-apiserver 的 Node 授权模式会授予该证书的相关权限;
查看集群状态
-
[root@qas-k8s-master01 bin]
# kubectl get nodes
-
NAME STATUS ROLES AGE VERSION
-
192.168.1.108 Ready master 68m v1.15.0
-
192.168.1.165 Ready node 68m v1.15.0
-
192.168.1.215 Ready node 68m v1.15.0
-
[root@qas-k8s-master01 bin]
#
部署 kube-proxy 组件
kube-proxy 运行在所有 node节点上,它监听 apiserver 中 service 和 Endpoint 的变化情况,创建路由规则来进行服务负载均衡。
创建 kube-proxy 配置文件
-
[root@qas-k8s-master01 ssl]
# cat /k8s/kubernetes/cfg/kube-proxy
-
KUBE_PROXY_OPTS=
"--logtostderr=true \
-
--v=4 \
-
--hostname-override=192.168.1.108 \
-
--cluster-cidr=10.0.0.0/24 \
-
--kubeconfig=/k8s/kubernetes/cfg/kube-proxy.kubeconfig"
-
[root@qas-k8s-master01 ssl]
#
- bindAddress: 监听地址;
- clientConnection.kubeconfig: 连接 apiserver 的 kubeconfig 文件;
- clusterCIDR: kube-proxy 根据 –cluster-cidr 判断集群内部和外部流量,指定 –cluster-cidr 或 –masquerade-all 选项后 kube-proxy 才会对访问 Service IP 的请求做 SNAT;
- hostnameOverride: 参数值必须与 kubelet 的值一致,否则 kube-proxy 启动后会找不到该 Node,从而不会创建任何 ipvs 规则;
- mode: 使用 ipvs 模式;
创建kube-proxy systemd unit 文件
-
[root@qas-k8s-master01 ssl]
# cat /usr/lib/systemd/system/kube-proxy.service
-
[Unit]
-
Description=Kubernetes Proxy
-
After=network.target
-
-
[Service]
-
EnvironmentFile=-
/k8s/kubernetes
/cfg/kube-proxy
-
ExecStart=
/k8s/kubernetes
/bin/kube-proxy $KUBE_PROXY_OPTS
-
Restart=on-failure
-
-
[Install]
-
WantedBy=multi-user.target
-
[root@qas-k8s-master01 ssl]
#
启动服务
systemctl daemon-reload systemctl enable kube-proxy systemctl restart kube-proxy
查看 kube-proxy状态
-
[root@qas-k8s-master01 ssl]
# systemctl status kube-proxy
-
● kube-proxy.service - Kubernetes Proxy
-
Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
-
Active: active (running) since Tue 2019-07-09 15:56:51 CST; 55min ago
-
Main PID: 30062 (kube-proxy)
-
Tasks: 0
-
Memory: 43.0M
-
CGroup: /system.slice/kube-proxy.service
-
‣ 30062 /k8s/kubernetes/bin/kube-proxy --logtostderr=
true --v=4 --hostname-override=192.168.1.108 --cluster-cidr=10.0....
-
-
Jul 09 16:51:58 qas-k8s-master01 kube-proxy[30062]: I0709 16:51:58.710766 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
Jul 09 16:51:59 qas-k8s-master01 kube-proxy[30062]: I0709 16:51:59.594996 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
Jul 09 16:52:00 qas-k8s-master01 kube-proxy[30062]: I0709 16:52:00.717813 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
Jul 09 16:52:01 qas-k8s-master01 kube-proxy[30062]: I0709 16:52:01.608540 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
Jul 09 16:52:02 qas-k8s-master01 kube-proxy[30062]: I0709 16:52:02.734118 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
Jul 09 16:52:03 qas-k8s-master01 kube-proxy[30062]: I0709 16:52:03.629360 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
Jul 09 16:52:04 qas-k8s-master01 kube-proxy[30062]: I0709 16:52:04.742294 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
Jul 09 16:52:05 qas-k8s-master01 kube-proxy[30062]: I0709 16:52:05.636544 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
Jul 09 16:52:06 qas-k8s-master01 kube-proxy[30062]: I0709 16:52:06.752374 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
Jul 09 16:52:07 qas-k8s-master01 kube-proxy[30062]: I0709 16:52:07.646475 30062 config.go:132] Calling handler.OnEndpointsUpdate
-
[root@qas-k8s-master01 ssl]
#
集群状态
打node 或者master 节点的标签
kubectl label node 192.168.1.108 node-role.kubernetes.io/master='master' kubectl label node 192.168.1.165 node-role.kubernetes.io/node='node' kubectl label node 192.168.1.215 node-role.kubernetes.io/node='node'
-
[root@qas-k8s-master01 bin]
# kubectl get node,cs
-
NAME STATUS ROLES AGE VERSION
-
node/
192.168.
1.108 Ready master
69
m v1.
15.0
-
node/
192.168.
1.165 Ready node
70
m v1.
15.0
-
node/
192.168.
1.215 Ready node
69
m v1.
15.0
-
-
NAME STATUS MESSAGE ERROR
-
componentstatus/scheduler Healthy ok
-
componentstatus/controller-manager Healthy ok
-
componentstatus/etcd-
0 Healthy {
"health":
"true"}
-
componentstatus/etcd-
2 Healthy {
"health":
"true"}
-
componentstatus/etcd-
1 Healthy {
"health":
"true"}
-
[root@qas-k8s-master01 bin]
#