一、集群机器
linux-node1:192.168.56.11 ---master
部署的服务:
etcd kube-apiserver kube-controller-manager kube-scheduler docker linux-node2:192.168.56.12 ---node
部署的服务:
etcd kubelet kube-proxy docker linux-node3:192.168.56.13 ---node
部署的服务:
etcd kubelet kube-proxy docker
二、环境准备
1、设置主机名
hostnamectl set-hostname linux-node1
hostnamectl set-hostname linux-node2
hostnamectl set-hostname linux-node3
---------- 2、设置部署节点到其它 所有节点的SSH免密码登(包括本机)
[root@linux-node1 ~]# ssh-keygen -t rsa
[root@linux-node1 ~]# ssh-copy-id linux-node1
[root@linux-node1 ~]# ssh-copy-id linux-node2
[root@linux-node1 ~]# ssh-copy-id linux-node3 ---------- 3、绑定主机host
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.56.11 linux-node1
192.168.56.12 linux-node2
192.168.56.13 linux-node3
EOF
---------- 4、关闭防火墙和selinux
systemctl disable firewalld
systemctl stop firewalld
#关闭selinux
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/SELINUXTYPE=targeted/SELINUXTYPE=disabled/g" /etc/sysconfig/selinux
---------- 5、其他配置
yum install -y ntpdate wget lrzsz vim net-tools
#加入crontab 1 * * * * /usr/sbin/ntpdate ntp1.aliyun.com >/dev/null 2>&1 #vim /etc/profile 高亮显示 export PS1="\[\e]0;\a\]\n\[\e[1;32m\]\[\e[1;33m\]\H\[\e[1;35m\]<\$(date +\"%Y-%m-%d %T\")> \[\e[32m\]\w\[\e[0m\]\n\u>\\$ " #设置时区
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime #SSH登录慢
sed -i "s/#UseDNS yes/UseDNS no/" /etc/ssh/sshd_config
sed -i "s/GSSAPIAuthentication yes/GSSAPIAuthentication no/" /etc/ssh/sshd_config
systemctl restart sshd.service
三、正式安装
1.安装Docker(三台机器都要操作)
第一步:使用国内Docker源
cd /etc/yum.repos.d/
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
第二步:Docker安装
yum install -y docker-ce
第三步:启动后台进程
[root@linux-node2 ~]# systemctl start docker
[root@linux-node2 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
2.安装k8s
1.准备部署目录
[root@linux-node1 ~]# mkdir -p /opt/kubernetes/{cfg,bin,ssl,log} #添加环境变量
[root@linux-node1 ~]# echo "PATH=$PATH:/opt/kubernetes/bin" >> /etc/profile
[root@linux-node1 ~]# source /etc/profile
或者
[root@linux-node1 ~]# vim .bash_profile
PATH=$PATH:$HOME/bin:/opt/kubernetes/bin
[root@linux-node1 ~]# source .bash_profile
2.准备软件包
#github下载链接
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#server-binaries
wget https://storage.googleapis.com/kubernetes-release/release/v1.10.8/kubernetes-server-linux-amd64.tar.gz
wget https://storage.googleapis.com/kubernetes-release/release/v1.10.8/kubernetes-client-linux-amd64.tar.gz
wget https://storage.googleapis.com/kubernetes-release/release/v1.10.8/kubernetes-node-linux-amd64.tar.gz
wget https://storage.googleapis.com/kubernetes-release/release/v1.10.8/kubernetes.tar.gz
3.解压软件包
tar -zxvf kubernetes.tar.gz -C /usr/local/src/
tar -zxvf kubernetes-server-linux-amd64.tar.gz -C /usr/local/src/
tar -zxvf kubernetes-client-linux-amd64.tar.gz -C /usr/local/src/
tar -zxvf kubernetes-node-linux-amd64.tar.gz -C /usr/local/src/
3.手动制作CA证书(只在master上进行即可)
1.安装 CFSSL
[root@linux-node1 ~]# cd /usr/local/src
[root@linux-node1 src]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@linux-node1 src]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@linux-node1 src]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@linux-node1 src]# chmod +x cfssl*
[root@linux-node1 src]# mv cfssl-certinfo_linux-amd64 /opt/kubernetes/bin/cfssl-certinfo
[root@linux-node1 src]# mv cfssljson_linux-amd64 /opt/kubernetes/bin/cfssljson
[root@linux-node1 src]# mv cfssl_linux-amd64 /opt/kubernetes/bin/cfssl #复制cfssl命令文件到k8s-node1和k8s-node2节点。如果实际中多个节点,就都需要同步复制。
[root@linux-node1 ~]# scp /opt/kubernetes/bin/cfssl* 192.168.56.12:/opt/kubernetes/bin
[root@linux-node1 ~]# scp /opt/kubernetes/bin/cfssl* 192.168.56.13:/opt/kubernetes/bin 2.初始化cfssl
[root@linux-node1 src]# mkdir ssl && cd ssl
[root@linux-node1 ssl]# cfssl print-defaults config > config.json --生成ca-config.json的样例(可省略)
[root@linux-node1 ssl]# cfssl print-defaults csr > csr.json --生成ca-csr.json的样例(可省略) 3.创建用来生成 CA 文件的 JSON 配置文件
[root@linux-node1 ssl]#
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
[root@linux-node1 ssl]# 4.创建用来生成 CA 证书签名请求(CSR)的 JSON 配置文件
[root@linux-node1 ssl]#
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
5.生成CA证书(ca.pem)和密钥(ca-key.pem)
[root@ linux-node1 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca - #执行上面的命令后,会生成下面三个文件
ca.csr ca-key.pem ca.pem
[root@ linux-node1 ssl]# ls -l ca*
-rw-r--r-- 1 root root 290 Mar 4 13:45 ca-config.json
-rw-r--r-- 1 root root 1001 Mar 4 14:09 ca.csr
-rw-r--r-- 1 root root 208 Mar 4 13:51 ca-csr.json
-rw------- 1 root root 1679 Mar 4 14:09 ca-key.pem
-rw-r--r-- 1 root root 1359 Mar 4 14:09 ca.pem
6.分发证书
[root@linux-node1 ssl]# cp ca.csr ca.pem ca-key.pem ca-config.json /opt/kubernetes/ssl #SCP证书到k8s-node1和k8s-node2节点
[root@linux-node1 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json 192.168.56.12:/opt/kubernetes/ssl
[root@linux-node1 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json 192.168.56.13:/opt/kubernetes/ssl
4.手动部署ETCD集群
0.准备etcd软件包
wget https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz
[root@linux-node1 src]# tar zxf etcd-v3.2.18-linux-amd64.tar.gz
[root@linux-node1 src]# cd etcd-v3.2.18-linux-amd64
[root@linux-node1 etcd-v3.2.18-linux-amd64]# cp etcd etcdctl /opt/kubernetes/bin/
[root@linux-node1 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl 192.168.56.12:/opt/kubernetes/bin/
[root@linux-node1 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl 192.168.56.13:/opt/kubernetes/bin/
1.创建 etcd 证书签名请求:
[root@linux-node1]# cd /usr/local/src/ssl
cat > etcd-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.56.11",
"192.168.56.12",
"192.168.56.13"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
2.生成 etcd 证书和私钥:
[root@linux-node1 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
会生成以下证书文件
[root@k8s-master ~]# ls -l etcd*
-rw-r--r-- 1 root root 1045 Mar 5 11:27 etcd.csr
-rw-r--r-- 1 root root 257 Mar 5 11:25 etcd-csr.json
-rw------- 1 root root 1679 Mar 5 11:27 etcd-key.pem
-rw-r--r-- 1 root root 1419 Mar 5 11:27 etcd.pem
3.将证书移动到/opt/kubernetes/ssl目录下
[root@k8s-master ~]# cp etcd*.pem /opt/kubernetes/ssl
[root@linux-node1 ~]# scp etcd*.pem 192.168.56.12:/opt/kubernetes/ssl
[root@linux-node1 ~]# scp etcd*.pem 192.168.56.13:/opt/kubernetes/ssl
[root@k8s-master ~]# rm -f etcd.csr etcd-csr.json
4.设置ETCD配置文件
[root@linux-node1 ~]# vim /opt/kubernetes/cfg/etcd.conf #[member]
ETCD_NAME="etcd-node1"
ETCD_DATA_DIR="/var/lib/etcd" #ETCD_SNAPSHOT_COUNTER="10000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.56.11:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.56.11:2379,https://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" #ETCD_CORS="" #[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.56.11:2380" # if you use different ETCD_NAME (e.g. test), # set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.56.11:2380,etcd-node2=https://192.168.56.12:2380,etcd-node3=https://192.168.56.13:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.56.11:2379" #[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
5.创建ETCD系统服务
[root@linux-node1 ~]# vim /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify
[Install]
WantedBy=multi-user.target
6.将文件同步到其他节点(并修改差异的地方)
[root@linux-node1 ~]# scp /opt/kubernetes/cfg/etcd.conf 192.168.56.12:/opt/kubernetes/cfg/
[root@linux-node1 ~]# scp /etc/systemd/system/etcd.service 192.168.56.12:/etc/systemd/system/
[root@linux-node1 ~]# scp /opt/kubernetes/cfg/etcd.conf 192.168.56.13:/opt/kubernetes/cfg/
[root@linux-node1 ~]# scp /etc/systemd/system/etcd.service 192.168.56.13:/etc/systemd/system/ #node2的etcd.conf配置
[root@linux-node2 ~]# vim /opt/kubernetes/cfg/etcd.conf #[member]
ETCD_NAME="etcd-node2"
ETCD_DATA_DIR="/var/lib/etcd" #ETCD_SNAPSHOT_COUNTER="10000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.56.12:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.56.12:2379,https://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" #ETCD_CORS="" #[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.56.12:2380" # if you use different ETCD_NAME (e.g. test), # set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.56.11:2380,etcd-node2=https://192.168.56.12:2380,etcd-node3=https://192.168.56.13:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.56.12:2379" #[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem" #node3的etcd.conf配置
[root@linux-node3 ~]# vim /opt/kubernetes/cfg/etcd.conf #[member]
ETCD_NAME="etcd-node3"
ETCD_DATA_DIR="/var/lib/etcd" #ETCD_SNAPSHOT_COUNTER="10000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.56.13:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.56.13:2379,https://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" #ETCD_CORS="" #[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.56.13:2380" # if you use different ETCD_NAME (e.g. test), # set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.56.11:2380,etcd-node2=https://192.168.56.12:2380,etcd-node3=https://192.168.56.13:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.56.13:2379" #[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
在所有节点上创建etcd存储目录并启动etcd
[root@linux-node1 ~]# mkdir /var/lib/etcd
[root@linux-node1 ~]# systemctl daemon-reload
[root@linux-node1 ~]# systemctl enable etcd
[root@linux-node1 ~]# systemctl restart etcd
[root@linux-node1 ~]# systemctl status etcd
以上需要大家在所有的 etcd 节点重复上面的步骤,直到所有机器的 etcd 服务都已启动。
7.验证集群
[root@linux-node1 ~]# etcdctl --endpoints=https://192.168.56.11:2379 \
--ca-file=/opt/kubernetes/ssl/ca.pem \
--cert-file=/opt/kubernetes/ssl/etcd.pem \
--key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health
member 435fb0a8da627a4c is healthy: got healthy result from https://192.168.56.12:2379
member 6566e06d7343e1bb is healthy: got healthy result from https://192.168.56.11:2379
member ce7b884e428b6c8c is healthy: got healthy result from https://192.168.56.13:2379
cluster is healthy
5.Master节点部署
部署Kubernetes API服务部署
0.准备软件包
[root@linux-node1 ~]# cd /usr/local/src/kubernetes
[root@linux-node1 kubernetes]# cp server/bin/kube-apiserver /opt/kubernetes/bin/
[root@linux-node1 kubernetes]# cp server/bin/kube-controller-manager /opt/kubernetes/bin/
[root@linux-node1 kubernetes]# cp server/bin/kube-scheduler /opt/kubernetes/bin/ 1.创建生成CSR的 JSON 配置文件
[root@linux-node1 ~]# cd /usr/local/src/ssl
[root@linux-node1 src]# vim kubernetes-csr.json
cat > kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.56.11",
"10.1.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
2.生成 kubernetes 证书和私钥
[root@linux-node1 src]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
#执行上面的命令,会生成如下两个文件
kubernetes-key.pem kubernetes.pem
[root@linux-node1 src]# cp kubernetes*.pem /opt/kubernetes/ssl/
[root@linux-node1 ~]# scp kubernetes*.pem 192.168.56.12:/opt/kubernetes/ssl/
[root@linux-node1 ~]# scp kubernetes*.pem 192.168.56.13:/opt/kubernetes/ssl/ 3.创建 kube-apiserver 使用的客户端 token 文件
[root@linux-node1 ~]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
ad6d5bb607a186796d8861557df0d17f
[root@linux-node1 ~]# vim /opt/kubernetes/ssl/bootstrap-token.csv
ad6d5bb607a186796d8861557df0d17f,kubelet-bootstrap,10001,"system:kubelet-bootstrap" 4.创建基础用户名/密码认证配置
[root@linux-node1 ~]# vim /opt/kubernetes/ssl/basic-auth.csv
admin,admin,1 readonly,readonly,2 5.部署Kubernetes API Server
[root@linux-node1 ~]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--bind-address=192.168.56.11 \
--insecure-bind-address=127.0.0.1 \
--authorization-mode=Node,RBAC \
--runtime-config=rbac.authorization.k8s.io/v1 \
--kubelet-https=true \
--anonymous-auth=false \
--basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \
--service-cluster-ip-range=10.1.0.0/16 \
--service-node-port-range=20000-40000 \
--tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/kubernetes/ssl/ca.pem \
--etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem \
--etcd-servers=https://192.168.56.11:2379,https://192.168.56.12:2379,https://192.168.56.13:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/log/api-audit.log \
--event-ttl=1h \
--v=2 \
--logtostderr=false \
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
6.启动API Server服务
[root@linux-node1 ~]# systemctl daemon-reload
[root@linux-node1 ~]# systemctl enable kube-apiserver
[root@linux-node1 ~]# systemctl start kube-apiserver
查看API Server服务状态
[root@linux-node1 ~]# systemctl status kube-apiserver
部署Controller Manager服务
[root@linux-node1 ~]# vim /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
--address=127.0.0.1 \ --master=http://127.0.0.1:8080 \ --allocate-node-cidrs=true \ --service-cluster-ip-range=10.1.0.0/16 \ --cluster-cidr=10.2.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \ --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \ --root-ca-file=/opt/kubernetes/ssl/ca.pem \ --leader-elect=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
3.启动Controller Manager
[root@linux-node1 ~]# systemctl daemon-reload
[root@linux-node1 scripts]# systemctl enable kube-controller-manager
[root@linux-node1 scripts]# systemctl start kube-controller-manager 4.查看服务状态
[root@linux-node1 scripts]# systemctl status kube-controller-manager
部署Kubernetes Scheduler
[root@linux-node1 ~]# vim /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/opt/kubernetes/bin/kube-scheduler \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--leader-elect=true \
--v=2 \
--logtostderr=false \
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
2.部署服务
[root@linux-node1 ~]# systemctl daemon-reload
[root@linux-node1 scripts]# systemctl enable kube-scheduler
[root@linux-node1 scripts]# systemctl start kube-scheduler
[root@linux-node1 scripts]# systemctl status kube-scheduler
部署kubectl 命令行工具
1.准备二进制命令包
[root@linux-node1 ~]# cd /usr/local/src/kubernetes/client/bin
[root@linux-node1 bin]# cp kubectl /opt/kubernetes/bin/ #需要将kubectl复制到node节点
[root@linux-node1 bin]# scp /opt/kubernetes/bin/kubectl linux-node2:/opt/kubernetes/bin/
[root@linux-node1 bin]# scp /opt/kubernetes/bin/kubectl linux-node3:/opt/kubernetes/bin/ 2.创建 admin 证书签名请求
[root@linux-node1 ~]# cd /usr/local/src/ssl/
[root@linux-node1 ssl]#
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
3.生成 admin 证书和私钥:
[root@linux-node1 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
[root@linux-node1 ssl]# ls -l admin*
-rw-r--r-- 1 root root 1009 Mar 5 12:29 admin.csr
-rw-r--r-- 1 root root 229 Mar 5 12:28 admin-csr.json
-rw------- 1 root root 1675 Mar 5 12:29 admin-key.pem
-rw-r--r-- 1 root root 1399 Mar 5 12:29 admin.pem
[root@linux-node1 src]# mv admin*.pem /opt/kubernetes/ssl/ 4.设置集群参数
[root@linux-node1 src]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.56.11:6443
Cluster "kubernetes" set.
5.设置客户端认证参数
[root@linux-node1 src]# kubectl config set-credentials admin \
--client-certificate=/opt/kubernetes/ssl/admin.pem \
--embed-certs=true \
--client-key=/opt/kubernetes/ssl/admin-key.pem
User "admin" set.
6.设置上下文参数
[root@linux-node1 src]# kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin
Context "kubernetes" created.
7.设置默认上下文
[root@linux-node1 src]# kubectl config use-context kubernetes
Switched to context "kubernetes".
8.使用kubectl工具
[root@linux-node1 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-1 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
6.Node节点部署
部署kubelet
1.二进制包准备 将软件包从linux-node1复制到linux-node2中去。
[root@linux-node1 ~]# cd /usr/local/src/kubernetes/server/bin/
[root@linux-node1 bin]# cp kubelet kube-proxy /opt/kubernetes/bin/
[root@linux-node1 bin]# scp kubelet kube-proxy 192.168.56.12:/opt/kubernetes/bin/
[root@linux-node1 bin]# scp kubelet kube-proxy 192.168.56.13:/opt/kubernetes/bin/
2.创建角色绑定
[root@linux-node1 ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding "kubelet-bootstrap" created
3.创建 kubelet bootstrapping kubeconfig 文件 设置集群参数
[root@linux-node1 ~]# kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://192.168.56.11:6443 \ --kubeconfig=bootstrap.kubeconfig
Cluster "kubernetes" set.
设置客户端认证参数
[root@linux-node1 ~]# kubectl config set-credentials kubelet-bootstrap \ --token=ad6d5bb607a186796d8861557df0d17f \ --kubeconfig=bootstrap.kubeconfig User "kubelet-bootstrap" set.
设置上下文参数
[root@linux-node1 ~]# kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=bootstrap.kubeconfig Context "default" created.
选择默认上下文
[root@linux-node1 ~]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
Switched to context "default".
[root@linux-node1 kubernetes]# cp bootstrap.kubeconfig /opt/kubernetes/cfg
[root@linux-node1 kubernetes]# scp bootstrap.kubeconfig 192.168.56.12:/opt/kubernetes/cfg
[root@linux-node1 kubernetes]# scp bootstrap.kubeconfig 192.168.56.13:/opt/kubernetes/cfg
在node节点上操作,部署kubelet 1.设置CNI支持
[root@linux-node2 ~]# mkdir -p /etc/cni/net.d
[root@linux-node2 ~]# vim /etc/cni/net.d/10-default.conf
cat > /etc/cni/net.d/10-default.conf <<EOF
{
"name": "flannel",
"type": "flannel",
"delegate": {
"bridge": "docker0",
"isDefaultGateway": true,
"mtu": 1400
}
}
EOF
2.创建kubelet目录
[root@linux-node2 ~]# mkdir /var/lib/kubelet 3.创建kubelet服务配置
[root@k8s-node2 ~]# vim /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \
--address=192.168.56.12 \ --hostname-override=192.168.56.12 \ --pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \ --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \ --cert-dir=/opt/kubernetes/ssl \ --network-plugin=cni \ --cni-conf-dir=/etc/cni/net.d \ --cni-bin-dir=/opt/kubernetes/bin/cni \ --cluster-dns=10.1.0.2 \ --cluster-domain=cluster.local. \ --hairpin-mode hairpin-veth \ --allow-privileged=true \ --fail-swap-on=false \ --logtostderr=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
4.启动Kubelet
[root@linux-node2 ~]# systemctl daemon-reload
[root@linux-node2 ~]# systemctl enable kubelet
[root@linux-node2 ~]# systemctl start kubelet 5.查看服务状态
[root@linux-node2 kubernetes]# systemctl status kubelet 6.查看csr请求 注意是在linux-node1上执行。
[root@linux-node1 ~]# kubectl get csr NAME AGE REQUESTOR CONDITION
node-csr-0_w5F1FM_la_SeGiu3Y5xELRpYUjjT2icIFk9gO9KOU 1m kubelet-bootstrap Pending
7.批准kubelet 的 TLS 证书请求
[root@linux-node1 ~]# kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
成功后是Approved的状态
[root@linux-node1 bin]# kubectl get csr NAME AGE REQUESTOR CONDITION
node-csr-IPMSFbKvwgq2icOeIo2v_WA-qb8QCyA7MT5h4eDmjxg 2m kubelet-bootstrap Approved,Issued
执行完毕后,查看节点状态已经是Ready的状态了
[root@linux-node1 ~]# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.56.12 Ready <none> 2m v1.10.8
部署Kubernetes Proxy
1.配置kube-proxy使用LVS
[root@linux-node2 ~]# yum install -y ipvsadm ipset conntrack 2.创建 kube-proxy 证书请求
[root@linux-node1 ~]# cd /usr/local/src/ssl/
[root@linux-node1 ~]# vim kube-proxy-csr.json
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
3.生成证书
[root@linux-node1~]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
4.分发证书到所有Node节点
[root@linux-node1 ssl]# cp kube-proxy*.pem /opt/kubernetes/ssl/
[root@linux-node1 ssl]# scp kube-proxy*.pem linux-node2:/opt/kubernetes/ssl/
[root@linux-node1 ssl]# scp kube-proxy*.pem linux-node3:/opt/kubernetes/ssl/ 5.创建kube-proxy配置文件
[root@linux-node1 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.56.11:6443 \
--kubeconfig=kube-proxy.kubeconfig
Cluster "kubernetes" set.
[root@linux-node1 ~]# kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
User "kube-proxy" set.
[root@linux-node1 ~]# kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
Context "default" created.
[root@linux-node1 ~]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
Switched to context "default".
6.分发kubeconfig配置文件
[root@linux-node1 ssl]# cp kube-proxy.kubeconfig /opt/kubernetes/cfg/
[root@linux-node1 ~]# scp kube-proxy.kubeconfig 192.168.56.12:/opt/kubernetes/cfg/
[root@linux-node1 ~]# scp kube-proxy.kubeconfig 192.168.56.13:/opt/kubernetes/cfg/ 7.创建kube-proxy服务配置
[root@linux-node2 bin]# mkdir /var/lib/kube-proxy
[root@k8s-node2 ~]# vim /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \
--bind-address=192.168.56.12 \
--hostname-override=192.168.56.12 \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \
--masquerade-all \
--feature-gates=SupportIPVSProxyMode=true \
--proxy-mode=ipvs \
--ipvs-min-sync-period=5s \
--ipvs-sync-period=5s \
--ipvs-scheduler=rr \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
8.启动Kubernetes Proxy
[root@linux-node2 ~]# systemctl daemon-reload
[root@linux-node2 ~]# systemctl enable kube-proxy
[root@linux-node2 ~]# systemctl start kube-proxy 9.查看服务状态 查看kube-proxy服务状态
[root@linux-node2 scripts]# systemctl status kube-proxy
检查LVS状态
[root@linux-node2 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.1.0.1:443 rr persistent 10800
-> 192.168.56.11:6443 Masq 1 0 0
如果你在两台实验机器都安装了kubelet和proxy服务,使用下面的命令可以检查状态:
[root@linux-node1 ssl]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.56.12 Ready <none> 22m v1.10.1 192.168.56.13 Ready <none> 3m v1.10.1
linux-node3节点请自行部署。
7.Flannel部署
1.为Flannel生成证书
[root@linux-node1 ~]# vim flanneld-csr.json
cat > flanneld-csr.json <<EOF
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
2.生成证书
[root@linux-node1 ~]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
3.分发证书
[root@linux-node1 ~]# cp flanneld*.pem /opt/kubernetes/ssl/
[root@linux-node1 ~]# scp flanneld*.pem 192.168.56.12:/opt/kubernetes/ssl/
[root@linux-node1 ~]# scp flanneld*.pem 192.168.56.13:/opt/kubernetes/ssl/
4.下载Flannel软件包
[root@linux-node1 ~]# cd /usr/local/src
wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
[root@linux-node1 src]# tar zxf flannel-v0.10.0-linux-amd64.tar.gz
[root@linux-node1 src]# cp flanneld mk-docker-opts.sh /opt/kubernetes/bin/
复制到linux-node2节点
[root@linux-node1 src]# scp flanneld mk-docker-opts.sh 192.168.56.12:/opt/kubernetes/bin/
[root@linux-node1 src]# scp flanneld mk-docker-opts.sh 192.168.56.13:/opt/kubernetes/bin/
复制对应脚本到/opt/kubernetes/bin目录下。
[root@linux-node1 ~]# cd /usr/local/src/kubernetes/cluster/centos/node/bin/
[root@linux-node1 bin]# cp remove-docker0.sh /opt/kubernetes/bin/
[root@linux-node1 bin]# scp remove-docker0.sh 192.168.56.12:/opt/kubernetes/bin/
[root@linux-node1 bin]# scp remove-docker0.sh 192.168.56.13:/opt/kubernetes/bin/
5.配置Flannel
[root@linux-node1 ~]# vim /opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=https://192.168.56.11:2379,https://192.168.56.12:2379,https://192.168.56.13:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"
复制配置到其它节点上
[root@linux-node1 ~]# scp /opt/kubernetes/cfg/flannel 192.168.56.12:/opt/kubernetes/cfg/
[root@linux-node1 ~]# scp /opt/kubernetes/cfg/flannel 192.168.56.13:/opt/kubernetes/cfg/
6.设置Flannel系统服务
[root@linux-node1 ~]# vim /usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker
Type=notify
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
复制系统服务脚本到其它节点上
scp /usr/lib/systemd/system/flannel.service 192.168.56.12:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/flannel.service 192.168.56.13:/usr/lib/systemd/system/
Flannel CNI集成
下载CNI插件
[root@linux-node1 ~]# cd /usr/local/src https://github.com/containernetworking/plugins/releases
wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
[root@linux-node1 ~]# mkdir /opt/kubernetes/bin/cni -p
[root@linux-node1 src]# tar zxf cni-plugins-amd64-v0.7.1.tgz -C /opt/kubernetes/bin/cni
scp -r /opt/kubernetes/bin/cni 192.168.56.12:/opt/kubernetes/bin/cni/
scp -r /opt/kubernetes/bin/cni 192.168.56.13:/opt/kubernetes/bin/cni/
创建Etcd的key
/opt/kubernetes/bin/etcdctl --ca-file /opt/kubernetes/ssl/ca.pem \
--cert-file /opt/kubernetes/ssl/flanneld.pem \
--key-file /opt/kubernetes/ssl/flanneld-key.pem \
--no-sync -C https://192.168.56.11:2379,https://192.168.56.12:2379,https://192.168.56.13:2379 \
mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}'
启动flannel
[root@linux-node1 ~]# systemctl daemon-reload
[root@linux-node1 ~]# systemctl enable flannel
[root@linux-node1 ~]# chmod +x /opt/kubernetes/bin/*
[root@linux-node1 ~]# systemctl start flannel
查看服务状态
[root@linux-node1 ~]# systemctl status flannel
配置Docker使用Flannel
[root@linux-node1 ~]# vim /usr/lib/systemd/system/docker.service
[Unit] #在Unit下面修改After和增加Requires
After=network-online.target flannel.service
Wants=network-online.target
Requires=flannel.service
[Service] #增加EnvironmentFile=-/run/flannel/docker
Type=notify
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd $DOCKER_OPTS
最终配置
cat /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target flannel.service
Requires=flannel.service
[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
EnvironmentFile=-/opt/kubernetes/cfg/docker
ExecStart=/usr/bin/dockerd $DOCKER_OPT_BIP $DOCKER_OPT_MTU $DOCKER_OPTS
LimitNOFILE=1048576
LimitNPROC=1048576
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. #TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
将配置复制到另外两个阶段
scp /usr/lib/systemd/system/docker.service 192.168.56.12:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/docker.service 192.168.56.13:/usr/lib/systemd/system/
重启Docker
[root@linux-node1 ~]# systemctl daemon-reload
[root@linux-node1 ~]# systemctl restart docker
8.应用创建
1.创建一个测试用的deployment
[root@linux-node1 ~]# kubectl run net-test --image=alpine --replicas=2 sleep 360000 2.查看获取IP情况
[root@linux-node1 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
net-test-74f45db489-gmgv8 1/1 Running 0 1m 10.2.83.2 192.168.56.13
net-test-74f45db489-pr5jc 1/1 Running 0 1m 10.2.59.2 192.168.56.12 3.测试联通性(在对应的node节点去测试)
ping 10.2.83.2
本文转自SegmentFault-
Kubernetes集群的详细部署