1、资源准备
master 192.168.187.164
work 192.168.187.165
1.1 允许使用root登陆
修改vim /etc/ssh/sshd_config文件将PermitRootLogin改为yes
# PermitRootLogin prohibit-password
PermitRootLogin yes
然后重启ssh即可
root@ubuntu16server:/home/yufeiliu/work# vim /etc/ssh/sshd_config
root@ubuntu16server:/home/yufeiliu/work#
root@ubuntu16server:/home/yufeiliu/work# /etc/init.d/ssh restart
[ ok ] Restarting ssh (via systemctl): ssh.service.
1.2 互相免密登陆
在master结点操作:
root@ubuntu16server:/home/yufeiliu# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:4Lp1TB/wNgVgpAhcvc1CkJ2KgE8Vu6cbV6cywm5sX7Q root@ubuntu16server
The key's randomart image is:
+---[RSA 2048]----+
|...o=* o+.. |
|o o..o=o . |
| + .ooo+. . |
| o .oo.oo . |
| . o.S * |
| . + = * o |
| .* = E . |
| .+B = |
| o+.. |
+----[SHA256]-----+
root@ubuntu16server:/home/yufeiliu#
root@ubuntu16server:/home/yufeiliu# ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.187.165
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host '192.168.187.165 (192.168.187.165)' can't be established.
ECDSA key fingerprint is SHA256:JGf+Uo2Ap1BOugBmfdlj8uNnsJw4acQTJzOPRCxuWpM.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@192.168.187.165's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@192.168.187.165'"
and check to make sure that only the key(s) you wanted were added.
这样master在访问work就不用输密码了,在work结点重新操作一次,便可以双向免密登陆
1.3 关闭swap
root@ubuntu16server:/home/yufeiliu# swapoff -a
2、生成证书
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.187.164",
"192.168.187.165",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
3、安装etcd集群
这里我们只部署一台机器就可以了,不再部署集群,如果需要部署集群,请看k8s搭建(失败)
安装包:
[root@ubuntu16server k8s-master /home/yufeiliu/work/etcd]
# pwd
/home/yufeiliu/work/etcd
[root@ubuntu16server k8s-master /home/yufeiliu/work/etcd]
# ls
etcd etcdctl
为了操作简单,我们全部采用命令行启动的方式,不再使用service方式启动
创建数据目录:
[root@ubuntu16server k8s-master /home/yufeiliu/work/etcd]
# mkdir /home/yufeiliu/work/etcd/xuanchi.etcd
[root@ubuntu16server k8s-master /home/yufeiliu/work/etcd]
# ls
etcd etcdctl start.sh xuanchi.etcd
启动脚本:
[root@ubuntu16server k8s-master /home/yufeiliu/work/etcd]
# cat start.sh
nohup /home/yufeiliu/work/etcd/etcd --data-dir /home/yufeiliu/work/etcd/xuanchi.etcd --listen-client-urls https://192.168.187.164:2379 --advertise-client-urls https://192.168.187.164:2379 --initial-cluster-state new --client-cert-auth --cert-file=/home/yufeiliu/work/ssl/server.pem --key-file=/home/yufeiliu/work/ssl/server-key.pem --peer-client-cert-auth --peer-trusted-ca-file=/home/yufeiliu/work/ssl/ca.pem --trusted-ca-file=/home/yufeiliu/work/ssl/ca.pem --peer-cert-file=/home/yufeiliu/work/ssl/server.pem --peer-key-file=/home/yufeiliu/work/ssl/server-key.pem &
测试:
[root@ubuntu16server k8s-master /home/yufeiliu/work/etcd]
# /home/yufeiliu/work/etcd/etcdctl --ca-file=/home/yufeiliu/work/ssl/ca.pem --cert-file=/home/yufeiliu/work/ssl/server.pem --key-file=/home/yufeiliu/work/ssl/server-key.pem --endpoints=https://192.168.187.164:2379 set key value
value
[root@ubuntu16server k8s-master /home/yufeiliu/work/etcd]
# /home/yufeiliu/work/etcd/etcdctl --ca-file=/home/yufeiliu/work/ssl/ca.pem --cert-file=/home/yufeiliu/work/ssl/server.pem --key-file=/home/yufeiliu/work/ssl/server-key.pem --endpoints=https://192.168.187.164:2379 get key
value
4、安装flannel
4.1 安装包
[root@ubuntu16server k8s-master /home/yufeiliu/work/flannel]
# ls
flanneld mk-docker-opts.sh
4.2 初始化网络
/home/yufeiliu/work/etcd/etcdctl --ca-file=/home/yufeiliu/work/ssl/ca.pem --cert-file=/home/yufeiliu/work/ssl/server.pem --key-file=/home/yufeiliu/work/ssl/server-key.pem --endpoints=https://192.168.187.164:2379 set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
4.3 启动脚本
[root@ubuntu16server k8s-master /home/yufeiliu/work/flannel]
# cat start.sh
nohup /home/yufeiliu/work/flannel/flanneld --ip-masq --etcd-endpoints=https://192.168.187.164:2379 -etcd-cafile=/home/yufeiliu/work/ssl/ca.pem -etcd-certfile=/home/yufeiliu/work/ssl/server.pem -etcd-keyfile=/home/yufeiliu/work/ssl/server-key.pem &
/home/yufeiliu/work/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /home/yufeiliu/work/flannel/subnet.env
4.3 验证
5、docker安装
nohup dockerd --bip=172.17.13.1/24 --ip-masq=false --mtu=1450 &
验证:
# ifconfig
docker0 Link encap:Ethernet HWaddr 02:42:4f:8d:92:80
inet addr:172.17.53.1 Bcast:172.17.53.255 Mask:255.255.255.0
UP BROADCAST MULTICAST MTU:1500 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
ens33 Link encap:Ethernet HWaddr 00:0c:29:cb:a1:8e
inet addr:192.168.187.165 Bcast:192.168.187.255 Mask:255.255.255.0
inet6 addr: fe80::20c:29ff:fecb:a18e/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:150597 errors:0 dropped:0 overruns:0 frame:0
TX packets:11801 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:214347704 (214.3 MB) TX bytes:1209026 (1.2 MB)
flannel.1 Link encap:Ethernet HWaddr a2:d3:28:7a:76:28
inet addr:172.17.53.0 Bcast:0.0.0.0 Mask:255.255.255.255
inet6 addr: fe80::a0d3:28ff:fe7a:7628/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:8 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:168 (168.0 B) TX bytes:168 (168.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:164 errors:0 dropped:0 overruns:0 frame:0
TX packets:164 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1
RX bytes:12136 (12.1 KB) TX bytes:12136 (12.1 KB)
6、安装master
6.1 安装包
6.2 生成token
# cat k8s-token.sh
token=$(head -c 10 /dev/urandom |od -An -t x|tr -d ' ')
cat <<EOF >/home/yufeiliu/work/k8s/token.csv
${token},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
执行脚本生成:
[root@ubuntu16server k8s-master /home/yufeiliu/work/k8s]
# ls
k8s-token.sh kubernetes token.csv
6.2 kube-apiserver
[root@ubuntu16server k8s-master /home/yufeiliu/work/k8s]
# cat k8s-apiserver.sh
nohup /home/yufeiliu/work/k8s/kubernetes/server/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.187.164:2379 --bind-address=192.168.187.164 --secure-port=6443 --advertise-address=192.168.187.164 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/home/yufeiliu/work/k8s/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/home/yufeiliu/work/ssl/server.pem --tls-private-key-file=/home/yufeiliu/work/ssl/server-key.pem --client-ca-file=/home/yufeiliu/work/ssl/ca.pem --service-account-key-file=/home/yufeiliu/work/ssl/ca-key.pem --etcd-cafile=/home/yufeiliu/work/ssl/ca.pem --etcd-certfile=/home/yufeiliu/work/ssl/server.pem --etcd-keyfile=/home/yufeiliu/work/ssl/server-key.pem &
6.3 kube-controller-manger
[root@ubuntu16server k8s-master /home/yufeiliu/work/k8s]
# ls
k8s-apiserver.sh k8s-controller-manger.sh k8s-token.sh kubernetes nohup.out token.csv
[root@ubuntu16server k8s-master /home/yufeiliu/work/k8s]
# cat k8s-controller-manger.sh
nohup /home/yufeiliu/work/k8s/kubernetes/server/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=127.0.0.1 --service-cluster-ip-range=10.0.0.0/24 --cluster-name=kubernetes --cluster-signing-cert-file=/home/yufeiliu/work/ssl/ca.pem --cluster-signing-key-file=/home/yufeiliu/work/ssl/ca-key.pem --root-ca-file=/home/yufeiliu/work/ssl/ca.pem --service-account-private-key-file=/home/yufeiliu/work/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s &
6.4 kube-scheduler
[root@ubuntu16server k8s-master /home/yufeiliu/work/k8s]
# ls
k8s-apiserver.sh k8s-controller-manger.sh k8s-scheduler.sh k8s-token.sh kubernetes nohup.out token.csv
[root@ubuntu16server k8s-master /home/yufeiliu/work/k8s]
# cat k8s-scheduler.sh
nohup /home/yufeiliu/work/k8s/kubernetes/server/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect &
6.5 配置
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl delete clusterrolebinding kubelet-bootstrap
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl config set-cluster kubernetes --certificate-authority=/home/yufeiliu/work/ssl/ca.pem --embed-certs=true --server=https://192.168.187.164:6443 --kubeconfig=bootstrap.kubeconfig
# token是token.csv中的值
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl config set-credentials kubelet-bootstrap --token=8f2e917dbbecc54800006c2e --kubeconfig=bootstrap.kubeconfig
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=bootstrap.kubeconfig
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl config set-cluster kubernetes --certificate-authority=/home/yufeiliu/work/ssl/ca.pem --embed-certs=true --server=https://192.168.187.164:6443 --kubeconfig=kube-proxy.kubeconfig
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl config set-credentials kube-proxy --client-certificate=/home/yufeiliu/work/ssl/kube-proxy.pem --client-key=/home/yufeiliu/work/ssl/kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
可以得到:
[root@k8s-master k8s-master /root]
# ls
bootstrap.kubeconfig kube-proxy.kubeconfig
需要将这两个配置文件传到work上
6.6 安装worker
安装包:
root@k8s-work:/home/yufeiliu/work/k8s# ls
bootstrap.kubeconfig kubelet kube-proxy kube-proxy.kubeconfig
6.6.1 kubelet
配置文件
root@k8s-work:/home/yufeiliu/work/k8s# cat kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.187.165
port: 10250
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local.
failSwapOn: false
readOnlyPort: 10255
authentication:
anonymous:
enabled: true
kubelet启动脚本:
root@k8s-work:/home/yufeiliu/work/k8s# cat kubelet.sh
nohup /home/yufeiliu/work/k8s/kubelet --logtostderr=true --v=4 --address=192.168.187.165 --hostname-override=192.168.187.165 --kubeconfig=/home/yufeiliu/work/k8s/kubelet.kubeconfig --experimental-bootstrap-kubeconfig=/home/yufeiliu/work/k8s/bootstrap.kubeconfig --config=/home/yufeiliu/work/k8s/kubelet.config --cert-dir=/home/yufeiliu/work/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 &
6.6.2 kube-proxy
root@k8s-work:/home/yufeiliu/work/k8s# cat kube-proxy.sh
nohup /home/yufeiliu/work/k8s/kube-proxy --logtostderr=true --v=4 --hostname-override=192.168.187.165 --cluster-cidr=10.0.0.0/24 --proxy-mode=ipvs --kubeconfig=/home/yufeiliu/work/k8s/kube-proxy.kubeconfig &
在work结点上启动了两个进程:
root@k8s-work:/home/yufeiliu/work/k8s# ps -aux | grep kube
root 3207 0.1 4.3 441704 62384 pts/0 Sl 18:30 0:00 /home/yufeiliu/work/k8s/kubelet --logtostderr=true --v=4 --address=192.168.187.165 --hostname-override=192.168.187.165 --kubeconfig=/home/yufeiliu/work/k8s/kubelet.kubeconfig --experimental-bootstrap-kubeconfig=/home/yufeiliu/work/k8s/bootstrap.kubeconfig --config=/home/yufeiliu/work/k8s/kubelet.config --cert-dir=/home/yufeiliu/work/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
root 3510 1.7 2.1 138988 31340 pts/0 Sl 18:34 0:00 /home/yufeiliu/work/k8s/kube-proxy --logtostderr=true --v=4 --hostname-override=192.168.187.165 --cluster-cidr=10.0.0.0/24 --proxy-mode=ipvs --kubeconfig=/home/yufeiliu/work/k8s/kube-proxy.kubeconfig
7、最终配置
在master结点上:
[root@k8s-master k8s-master /root]
# /home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-jLWlE-QtWWw-2S_qy21kf1d6o_BZVR9U2SU0at7wmDQ 7m33s kubelet-bootstrap Pending
这里面说明有一个结点需要加入集群,下面我们通过该请求:
[root@k8s-master k8s-master /root]
# kubectl certificate approve node-csr-jLWlE-QtWWw-2S_qy21kf1d6o_BZVR9U2SU0at7wmDQ
kubectl: command not found
[root@k8s-master k8s-master /root]
# /home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl certificate approve node-csr-jLWlE-QtWWw-2S_qy21kf1d6o_BZVR9U2SU0at7wmDQ
certificatesigningrequest.certificates.k8s.io/node-csr-jLWlE-QtWWw-2S_qy21kf1d6o_BZVR9U2SU0at7wmDQ approved
[root@k8s-master k8s-master /root]
# /home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-jLWlE-QtWWw-2S_qy21kf1d6o_BZVR9U2SU0at7wmDQ 9m15s kubelet-bootstrap Approved,Issued
[root@k8s-master k8s-master /root]
# /home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.187.165 Ready <none> 16s v1.14.3
8、使用k8s部署服务
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl run nginx --image=nginx
/home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl expose deployment nginx --port=80 --target-port=80 --name=nginx-svc
# /home/yufeiliu/work/k8s/kubernetes/server/bin/kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-7db9fccd9b-2vzm9 1/1 Running 0 76m