原文地址:http://www.maogx.win/posts/35/

简介

本文章主要介绍如何通过使用官方提供的二进制包安装配置k8s集群

实验环境说明

实验架构
1
2
3
lab1: master 11.11.11.111
lab2: node 11.11.11.112
lab3: node 11.11.11.113
实验使用的Vagrantfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# -*- mode: ruby -*-
# vi: set ft=ruby :

ENV["LC_ALL"] = "en_US.UTF-8"

Vagrant.configure("2") do |config|
   (1..3).each do |i|
     config.vm.define "lab#{i}" do |node|
       node.vm.box = "centos-7.4-docker-17"
       node.ssh.insert_key = false
       node.vm.hostname = "lab#{i}"
       node.vm.network "private_network", ip: "11.11.11.11#{i}"
       node.vm.provision "shell",
         inline: "echo hello from node #{i}"
       node.vm.provider "virtualbox" do |v|
         v.cpus = 2
         v.customize ["modifyvm", :id, "--name", "lab#{i}", "--memory", "2048"]
       end
     end
   end
end

安装

配置系统相关参数

如下操作在所有节点操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# 临时禁用selinux
# 永久关闭 修改/etc/sysconfig/selinux文件设置
sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux
setenforce 0

# 临时关闭swap
# 永久关闭 注释/etc/fstab文件里swap相关的行
swapoff -a

# 开启forward
# Docker从1.13版本开始调整了默认的防火墙规则
# 禁用了iptables filter表中FOWARD链
# 这样会引起Kubernetes集群中跨Node的Pod无法通信

iptables -P FORWARD ACCEPT

# 配置转发相关参数,否则可能会出错
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF
sysctl --system

# 加载ipvs相关内核模块
# 如果重新开机,需要重新加载
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
lsmod | grep ip_vs
配置hosts解析

如下操作在所有节点操作

1
2
3
4
5
cat >>/etc/hosts<<EOF
11.11.11.111 lab1
11.11.11.112 lab2
11.11.11.113 lab3
EOF
安装配置docker

v1.11.0版本推荐使用docker v17.03,
v1.11,v1.12,v1.13, 也可以使用,再高版本的docker可能无法正常使用。
测试发现17.09无法正常使用,不能使用资源限制(内存CPU)

如下操作在所有节点操作

安装docker
1
2
3
4
5
# 卸载安装指定版本docker-ce
yum remove -y docker-ce docker-ce-selinux container-selinux
yum install -y --setopt=obsoletes=0 \
docker-ce-17.03.1.ce-1.el7.centos \
docker-ce-selinux-17.03.1.ce-1.el7.centos
启动docker
1
systemctl enable docker && systemctl restart docker
安装CFSSL

只在lab1节点操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 下载
# 百度云链接:https://pan.baidu.com/s/1kgV40nwHy1IKnnLD6zH4cQ 密码:alyj
mkdir -pv /server/software/k8s
cd /server/software/k8s
yum install -y wget
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

# 安装
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl*
配置CA

只在lab1节点操作

此处的CA配置,后面配置etcd和k8s时都需要使用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
mkdir -pv $HOME/ssl && cd $HOME/ssl
cat >ca-config.json<<EOF
{
 "signing": {
   "default": {
     "expiry": "87600h"
   },
   "profiles": {
     "kubernetes": {
       "usages": [
           "signing",
           "key encipherment",
           "server auth",
           "client auth"
       ],
       "expiry": "87600h"
     }
   }
 }
}
EOF
配置etcd集群
生成etcd-ca

只在lab1节点操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# 写入配置
cat >etcd-ca-csr.json<<EOF
{
 "CN": "etcd",
 "key": {
   "algo": "rsa",
   "size": 2048
 },
 "names": [
   {
     "C": "CN",
     "ST": "BeiJing",
     "L": "BeiJing",
     "O": "etcd",
     "OU": "Etcd Security"
   }
 ]
}
EOF

# 生成 etcd root ca
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca

cat >etcd-csr.json<<EOF
{
   "CN": "etcd",
   "hosts": [
     "127.0.0.1",
     "11.11.11.111",
     "11.11.11.112",
     "11.11.11.113"
   ],
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "etcd",
           "OU": "Etcd Security"
       }
   ]
}
EOF

# 生成 etcd ca
cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
mkdir -pv /etc/etcd/ssl
cp etcd*.pem /etc/etcd/ssl
ls /etc/etcd/ssl/etcd*.pem

# 复制到其他节点
cd /etc/etcd && tar cvzf etcd-ssl.tgz ssl/
scp /etc/etcd/etcd-ssl.tgz lab2:~/
scp /etc/etcd/etcd-ssl.tgz lab3:~/
ssh lab2 'mkdir -pv /etc/etcd && tar xf etcd-ssl.tgz -C /etc/etcd && ls -l /etc/etcd/ssl'
ssh lab3 'mkdir -pv /etc/etcd && tar xf etcd-ssl.tgz -C /etc/etcd && ls -l /etc/etcd/ssl'
安装启动etcd

如下操作在所有节点操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# 安装
# 百度云链接:https://pan.baidu.com/s/1IVHyMqiJrlq9gmbF49Ly3Q 密码:w5nx
mkdir -pv /server/software/k8s
cd /server/software/k8s
yum install -y wget
wget https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz
tar xf etcd-v3.2.18-linux-amd64.tar.gz
mv etcd-v3.2.18-linux-amd64 /usr/local/etcd-v3.2.18
ln -sv /usr/local/etcd-v3.2.18 /usr/local/etcd
cd /usr/local/etcd && mkdir bin && mv etcd etcdctl bin
/usr/local/etcd/bin/etcd --version
cd $HOME

# 配置启动脚本
export ETCD_NAME=$(hostname)
export INTERNAL_IP=$(hostname -i | awk '{print $NF}')
export ECTD_CLUSTER='lab1=https://11.11.11.111:2380,lab2=https://11.11.11.112:2380,lab3=https://11.11.11.113:2380'
mkdir -pv /data/etcd
cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/data/etcd
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/local/etcd/bin/etcd \\
 --name ${ETCD_NAME} \\
 --cert-file=/etc/etcd/ssl/etcd.pem \\
 --key-file=/etc/etcd/ssl/etcd-key.pem \\
 --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
 --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
 --trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \\
 --peer-trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \\
 --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
 --listen-peer-urls https://${INTERNAL_IP}:2380 \\
 --listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
 --advertise-client-urls https://${INTERNAL_IP}:2379 \\
 --initial-cluster-token my-etcd-token \\
 --initial-cluster $ECTD_CLUSTER \\
 --initial-cluster-state new \\
 --data-dir=/data/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 启动并设置开机启动
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
查看etcd集群状态
1
2
3
4
5
/usr/local/etcd/bin/etcdctl --endpoints "https://127.0.0.1:2379" \
 --ca-file=/etc/etcd/ssl/etcd-ca.pem \
 --cert-file=/etc/etcd/ssl/etcd.pem \
 --key-file=/etc/etcd/ssl/etcd-key.pem \
 cluster-health
生成k8s集群的CA
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
# 进入相关目录
cd $HOME/ssl

# 配置 root ca
cat >ca-csr.json<<EOF
{
 "CN": "kubernetes",
 "key": {
   "algo": "rsa",
   "size": 2048
 },
 "names": [
   {
     "C": "CN",
     "ST": "BeiJing",
     "L": "BeiJing",
     "O": "k8s",
     "OU": "System"
   }
 ],
 "ca": {
    "expiry": "87600h"
 }
}
EOF

# 生成 root ca
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
ls ca*.pem

# 配置 kube-apiserver ca
# 10.96.0.1 是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP
cat >kube-apiserver-csr.json<<EOF
{
   "CN": "kube-apiserver",
   "hosts": [
     "127.0.0.1",
     "11.11.11.111",
     "11.11.11.112",
     "11.11.11.113",
     "10.96.0.1",
     "kubernetes",
     "kubernetes.default",
     "kubernetes.default.svc",
     "kubernetes.default.svc.cluster",
     "kubernetes.default.svc.cluster.local"
   ],
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "k8s",
           "OU": "System"
       }
   ]
}
EOF

# 生成 kube-apiserver ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
ls kube-apiserver*.pem

# 配置 kube-controller-manager ca
cat >kube-controller-manager-csr.json<<EOF
{
   "CN": "system:kube-controller-manager",
   "hosts": [
     "127.0.0.1",
     "11.11.11.111",
     "11.11.11.112",
     "11.11.11.113"
   ],
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "system:kube-controller-manager",
           "OU": "System"
       }
   ]
}
EOF

# 生成 kube-controller-manager ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
ls kube-controller-manager*.pem

# 配置 kube-scheduler ca
cat >kube-scheduler-csr.json<<EOF
{
   "CN": "system:kube-scheduler",
   "hosts": [
     "127.0.0.1",
     "11.11.11.111",
     "11.11.11.112",
     "11.11.11.113"
   ],
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "system:kube-scheduler",
           "OU": "System"
       }
   ]
}
EOF

# 生成 kube-scheduler ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
ls kube-scheduler*.pem

# 配置 kube-proxy ca
cat >kube-proxy-csr.json<<EOF
{
   "CN": "system:kube-proxy",
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "system:kube-proxy",
           "OU": "System"
       }
   ]
}
EOF

# 生成 kube-proxy ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
ls kube-proxy*.pem

# 配置 admin ca
cat >admin-csr.json<<EOF
{
   "CN": "admin",
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "system:masters",
           "OU": "System"
       }
   ]
}
EOF

# 生成 admin ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
ls admin*.pem

# 复制生成的ca
mkdir -pv /etc/kubernetes/pki
cp ca*.pem admin*.pem kube-proxy*.pem kube-scheduler*.pem kube-controller-manager*.pem kube-apiserver*.pem /etc/kubernetes/pki
cd /etc/kubernetes && tar cvzf pki.tgz pki/
scp /etc/kubernetes/pki.tgz lab2:~/
scp /etc/kubernetes/pki.tgz lab3:~/
ssh lab2 'mkdir -pv /etc/kubernetes && tar xf pki.tgz -C /etc/kubernetes && ls -l /etc/kubernetes/pki'
ssh lab3 'mkdir -pv /etc/kubernetes && tar xf pki.tgz -C /etc/kubernetes && ls -l /etc/kubernetes/pki'
cd $HOME
安装k8s文件
1
2
3
4
5
6
7
8
9
10
11
12
13
# 下载文件
# 需要×××,如果不能×××使用如下链接下载
# 链接:https://pan.baidu.com/s/1OI9Q4BRp7jNJUmsA8IAkbA 密码:tnx5
cd /server/software/k8s
wget https://dl.k8s.io/v1.11.0/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
mkdir -pv /usr/local/kubernetes-v1.11.0/bin
cp kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet kubectl /usr/local/kubernetes-v1.11.0/bin
ln -sv /usr/local/kubernetes-v1.11.0 /usr/local/kubernetes
cp /usr/local/kubernetes/bin/kubectl /usr/local/bin/kubectl
kubectl version
cd $HOME
生成kubeconfig
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# 使用 TLS Bootstrapping 
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat > /etc/kubernetes/token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

# 创建 kubelet bootstrapping kubeconfig
cd /etc/kubernetes
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=kubelet-bootstrap.conf
kubectl config set-credentials kubelet-bootstrap \
 --token=${BOOTSTRAP_TOKEN} \
 --kubeconfig=kubelet-bootstrap.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=kubelet-bootstrap \
 --kubeconfig=kubelet-bootstrap.conf
kubectl config use-context default --kubeconfig=kubelet-bootstrap.conf

# 创建 kube-controller-manager kubeconfig
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=kube-controller-manager.conf
kubectl config set-credentials kube-controller-manager \
 --client-certificate=/etc/kubernetes/pki/kube-controller-manager.pem \
 --client-key=/etc/kubernetes/pki/kube-controller-manager-key.pem \
 --embed-certs=true \
 --kubeconfig=kube-controller-manager.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=kube-controller-manager \
 --kubeconfig=kube-controller-manager.conf
kubectl config use-context default --kubeconfig=kube-controller-manager.conf

# 创建 kube-scheduler kubeconfig
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=kube-scheduler.conf
kubectl config set-credentials kube-scheduler \
 --client-certificate=/etc/kubernetes/pki/kube-scheduler.pem \
 --client-key=/etc/kubernetes/pki/kube-scheduler-key.pem \
 --embed-certs=true \
 --kubeconfig=kube-scheduler.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=kube-scheduler \
 --kubeconfig=kube-scheduler.conf
kubectl config use-context default --kubeconfig=kube-scheduler.conf

# 创建 kube-proxy kubeconfig
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=kube-proxy.conf
kubectl config set-credentials kube-proxy \
 --client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
 --client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
 --embed-certs=true \
 --kubeconfig=kube-proxy.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=kube-proxy \
 --kubeconfig=kube-proxy.conf
kubectl config use-context default --kubeconfig=kube-proxy.conf

# 创建 admin kubeconfig
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=admin.conf
kubectl config set-credentials admin \
 --client-certificate=/etc/kubernetes/pki/admin.pem \
 --client-key=/etc/kubernetes/pki/admin-key.pem \
 --embed-certs=true \
 --kubeconfig=admin.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=admin \
 --kubeconfig=admin.conf
kubectl config use-context default --kubeconfig=admin.conf

# 把 kube-proxy.conf 复制到其他节点
scp kubelet-bootstrap.conf kube-proxy.conf lab2:/etc/kubernetes
scp kubelet-bootstrap.conf kube-proxy.conf lab3:/etc/kubernetes
cd $HOME
配置master相关组件

只在lab1节点操作

配置启动kube-apiserver
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# 复制 etcd ca
mkdir -pv /etc/kubernetes/pki/etcd
cd /etc/etcd/ssl
cp etcd-ca.pem etcd-key.pem etcd.pem /etc/kubernetes/pki/etcd

# 生成 service account key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
ls /etc/kubernetes/pki/sa.*
cd $HOME

# 启动文件
cat >/etc/systemd/system/kube-apiserver.service<<EOF
[Unit]
Description=Kubernetes API Service
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/local/kubernetes/bin/kube-apiserver \\
   \$KUBE_LOGTOSTDERR \\
   \$KUBE_LOG_LEVEL \\
   \$KUBE_ETCD_ARGS \\
   \$KUBE_API_ADDRESS \\
   \$KUBE_SERVICE_ADDRESSES \\
   \$KUBE_ADMISSION_CONTROL \\
   \$KUBE_APISERVER_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 该配置文件同时被 kube-apiserver, kube-controller-manager
# kube-scheduler, kubelet, kube-proxy 使用
cat >/etc/kubernetes/config<<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=2"
EOF

cat >/etc/kubernetes/apiserver<<EOF
KUBE_API_ADDRESS="--advertise-address=11.11.11.111"
KUBE_ETCD_ARGS="--etcd-servers=https://11.11.11.111:2379,https://11.11.11.112:2379,https://11.11.11.113:2379 --etcd-cafile=/etc/kubernetes/pki/etcd/etcd-ca.pem --etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem --etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.96.0.0/12"
KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
KUBE_APISERVER_ARGS="--allow-privileged=true --authorization-mode=Node,RBAC --enable-bootstrap-token-auth=true --token-auth-file=/etc/kubernetes/token.csv --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --service-account-key-file=/etc/kubernetes/pki/sa.pub --enable-swagger-ui=true --secure-port=6443 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --anonymous-auth=false --kubelet-client-certificate=/etc/kubernetes/pki/admin.pem --kubelet-client-key=/etc/kubernetes/pki/admin-key.pem"
EOF

# 启动
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

# 浏览器访问测试
https://11.11.11.111:6443/swaggerapi
配置启动kube-controller-manager
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# 启动文件
cat >/etc/systemd/system/kube-controller-manager.service<<EOF
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/local/kubernetes/bin/kube-controller-manager \\
   \$KUBE_LOGTOSTDERR \\
   \$KUBE_LOG_LEVEL \\
   \$KUBECONFIG \\
   \$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

cat >/etc/kubernetes/controller-manager<<EOF
KUBECONFIG="--kubeconfig=/etc/kubernetes/kube-controller-manager.conf"
KUBE_CONTROLLER_MANAGER_ARGS="--address=127.0.0.1 --cluster-cidr=10.244.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem --service-account-private-key-file=/etc/kubernetes/pki/sa.key --root-ca-file=/etc/kubernetes/pki/ca.pem --leader-elect=true --use-service-account-credentials=true --node-monitor-grace-period=10s --pod-eviction-timeout=10s --allocate-node-cidrs=true --controllers=*,bootstrapsigner,tokencleaner"
EOF

# 启动
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager
配置启动kube-scheduler
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
cat >/etc/systemd/system/kube-scheduler.service<<EOF
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/local/kubernetes/bin/kube-scheduler \\
           \$KUBE_LOGTOSTDERR \\
           \$KUBE_LOG_LEVEL \\
           \$KUBECONFIG \\
           \$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

cat >/etc/kubernetes/scheduler<<EOF
KUBECONFIG="--kubeconfig=/etc/kubernetes/kube-scheduler.conf"
KUBE_SCHEDULER_ARGS="--leader-elect=true --address=127.0.0.1"
EOF

# 启动
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler
配置kubectl使用
1
2
3
4
5
rm -rf $HOME/.kube
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get no
查看组件状态
1
kubectl get componentstatuses
配置kubelet使用bootstrap
1
2
3
4
# 将 bootstrap token 文件中的 kubelet-bootstrap 用户赋予 system:node-bootstrapper cluster 角色
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
配置node相关组件

如下操作在所有节点操作

安装cni
1
2
3
4
5
6
7
8
# 安装 cni
# 百度云链接:https://pan.baidu.com/s/1-PputObLs5jouXLnuBCI6Q 密码:tzqm
cd /server/software/k8s
wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
mkdir -pv /opt/cni/bin
tar xf cni-plugins-amd64-v0.7.1.tgz -C /opt/cni/bin
ls -l /opt/cni/bin
cd $HOME
配置启动kubelet
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# 启动文件
mkdir -pv /data/kubelet
cat >/etc/systemd/system/kubelet.service<<EOF
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/data/kubelet
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/local/kubernetes/bin/kubelet \\
           \$KUBE_LOGTOSTDERR \\
           \$KUBE_LOG_LEVEL \\
           \$KUBELET_CONFIG \\
           \$KUBELET_HOSTNAME \\
           \$KUBELET_POD_INFRA_CONTAINER \\
           \$KUBELET_ARGS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

cat >/etc/kubernetes/config<<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=2"
EOF

# 注意修改相关ip
cat >/etc/kubernetes/kubelet<<EOF
KUBELET_HOSTNAME="--hostname-override=11.11.11.111"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"
KUBELET_CONFIG="--config=/etc/kubernetes/kubelet-config.yml"
KUBELET_ARGS="--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cert-dir=/etc/kubernetes/pki --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d"
EOF

# 注意修改相关ip
# lab1 lab2 lab3 使用各自ip
cat >/etc/kubernetes/kubelet-config.yml<<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 11.11.11.111
port: 10250
cgroupDriver: cgroupfs
clusterDNS:
 - 10.96.0.10
clusterDomain: cluster.local.
hairpinMode: promiscuous-bridge
serializeImagePulls: false
authentication:
 x509:
   clientCAFile: /etc/kubernetes/pki/ca.pem
EOF

# 启动
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet
通过证书请求
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 在配置了kubectl的节点上执行如下操作

# 查看
kubectl get csr

# 通过
kubectl certificate approve node-csr-Yiiv675wUCvQl3HH11jDr0cC9p3kbrXWrxvG3EjWGoE

# 查看节点
# 此时节点状态为 NotReady
kubectl get nodes

# 在node节点查看生成的文件
ls -l /etc/kubernetes/kubelet.conf
ls -l /etc/kubernetes/pki/kubelet*
配置启动kube-proxy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# 安装
yum install -y conntrack-tools

# 启动文件
cat >/etc/systemd/system/kube-proxy.service<<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/local/kubernetes/bin/kube-proxy \\
   \$KUBE_LOGTOSTDERR \\
   \$KUBE_LOG_LEVEL \\
   \$KUBECONFIG \\
   \$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 注意修改相关ip
# lab1 lab2 lab3 使用各自ip
# 由于 1.11.0 ipvs 在centos7上有bug无法正常使用
# 实验使用 iptables 模式
# 以后版本可以使用 ipvs 模式
cat >/etc/kubernetes/proxy<<EOF
KUBECONFIG="--kubeconfig=/etc/kubernetes/kube-proxy.conf"
KUBE_PROXY_ARGS="--bind-address=11.11.11.111 --proxy-mode=iptables --hostname-override=11.11.11.111 --cluster-cidr=10.244.0.0/16"
EOF

# 启动
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy
设置集群角色
1
2
3
4
5
6
7
8
9
10
11
12
13
# 设置 lab1 为 master
kubectl label nodes 11.11.11.111 node-role.kubernetes.io/master=

# 设置 lab2 lab3 为 node
kubectl label nodes 11.11.11.112 node-role.kubernetes.io/node=
kubectl label nodes 11.11.11.113 node-role.kubernetes.io/node=

# 设置 master 一般情况下不接受负载
kubectl taint nodes 11.11.11.111 node-role.kubernetes.io/master=true:NoSchedule

# 查看节点
# 此时节点状态为 NotReady
kubectl get no
配置使用flannel网络

lab1操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# 下载配置
mkdir flannel && cd flannel
wget https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml

# 修改配置
# 此处的ip配置要与上面kubeadm的pod-network一致
 net-conf.json: |
   {
     "Network": "10.244.0.0/16",
     "Backend": {
       "Type": "vxlan"
     }
   }

# 修改镜像
image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64

# 如果Node有多个网卡的话,参考flannel issues 39701,
# https://github.com/kubernetes/kubernetes/issues/39701
# 目前需要在kube-flannel.yml中使用--iface参数指定集群主机内网网卡的名称,
# 否则可能会出现dns无法解析。容器无法通信的情况,需要将kube-flannel.yml下载到本地,
# flanneld启动参数加上--iface=<iface-name>
   containers:
     - name: kube-flannel
       image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64
       command:
       - /opt/bin/flanneld
       args:
       - --ip-masq
       - --kube-subnet-mgr
       - --iface=eth1

# 启动
kubectl apply -f kube-flannel.yml

# 查看
kubectl get pods -n kube-system
kubectl get svc -n kube-system

# 查看节点状态
# 当 flannel pod 全部启动之后,节点状态为 Ready
kubectl get no
配置使用coredns

lab1操作

1
2
3
4
5
6
7
8
9
10
11
12
# 安装
# 10.96.0.10 kubelet中配置的dns
cd $HOME && mkdir coredns && cd coredns
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
chmod +x deploy.sh
./deploy.sh -i 10.96.0.10 > coredns.yml
kubectl apply -f coredns.yml

# 查看
kubectl get pods -n kube-system
kubectl get svc -n kube-system

测试

启动
1
2
3
4
kubectl run nginx --replicas=2 --image=nginx:alpine --port=80
kubectl expose deployment nginx --type=NodePort --name=example-service-nodeport
kubectl expose deployment nginx --name=example-service
kubectl scale --replicas=3 deployment/nginx
查看状态
1
2
3
4
kubectl get deploy -o wide
kubectl get pods -o wide
kubectl get svc -o wide
kubectl describe svc example-service
DNS解析
1
2
3
4
kubectl run curl --image=radial/busyboxplus:curl -i --tty
nslookup kubernetes
nslookup example-service
curl example-service
访问测试
1
2
3
4
5
6
7
# 10.96.59.56 为查看svc时获取到的clusterip
curl "10.107.91.153:80"

# 32223 为查看svc时获取到的 nodeport
http://11.11.11.111:32223/
http://11.11.11.112:32223/
http://11.11.11.113:32223/
清理
1
2
kubectl delete svc example-service example-service-nodeport
kubectl delete deploy nginx curl

参考文档