一、通过二进制部署Kubernetes(k8s)集群
资源列表
操作系统 | 配置 | 主机名/IP |
---|---|---|
Centos 7.9 | 2G2C | master/172.16.10.10 |
Centos 7.9 | 2G2C | node01/172.16.10.11 |
Centos 7.9 | 2G2C | node02/172.16.10.12 |
1.1 实验环境
-
k8s集群所有节点都执行
1.1.1 修改主机名
hostnamectl set-hostname master hostnamectl set-hostname node01 hostnamectl set-hostname node02
1.1.2 关闭防火墙
systemctl stop firewalld systemctl disable firewalld
1.1.3 关闭selinux安全机制
sed -i "s/.*SELINUX=.\*/SELINUX=disabled/g" /etc/selinux/config # 重启系统 reboot
1.1.4 关闭swap交换分区
# 关闭swap分区 swapoff -a # 关闭开启自启 vim /etc/fstab ##swap分区前加上注释“#” #/dev/mapper/cl-swap swap swap defaults 0 0
1.1.5 修改内核机制,开启转发
# 修改内核机制 vim /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 # 刷新 sysctl --system
1.1.6 添加hosts映射
vim /etc/hosts 172.16.10.10 master 172.16.10.11 node01 172.16.10.12 node02
1.1.7 时间同步
yum -y install chrony systemctl start chronyd chronyc sources -v
1.1.8 安装并配置docker-ce-18.06.1.ce版本
# 安装docker-ce-18.06.1.ce版本 wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -P /etc/yum.repos.d/ yum -y install docker-ce-18.06.1.ce # 开机自启 systemctl enable docker # docker加速器 sudo mkdir -p /etc/docker sudo cat > /etc/docker/daemon.json << EOF { "registry-mirrors": ["https://hdi5v8p1.mirror.aliyuncs.com"] } EOF # 启动docker systemctl start docker ##若添加加速器之前已经开启docker,则需要重新加载docker systemctl daemon-reload [root@master ~]# systemctl restart docker
1.2 创建生成认证证书
# 上传认证工具cfssl_linux-amd64 cfssl-certinfo_linux-amd64 cfssljson_linux-amd64 ls cfssl_linux-amd64 cfssl-certinfo_linux-amd64 cfssljson_linux-amd64 # 给认证工具添加执行权限 chmod +x cf* # 将认证工具移动到PATH环境变量 mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson mv cfssl_linux-amd64 /usr/local/bin/cfssl # 生成认证证书 ##上传 mkdir /root/software cd /root/software ##将ssl.tar.gz上传到/root/software ##解压 tar zxf ssl.tar.gz cd ssl/ vim server-csr.json #添加集群所有节点IP地址 ... "hosts": [ "127.0.0.1", "172.16.10.10", "172.16.10.11", "172.16.10.12", "10.10.10.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" .... ##执行脚本生成证书 ./ssl.sh
1.3 部署etcd组件
-
master节点执行
# 创建相关目录(所有节点都要执行) mkdir -p /opt/kubernetes/{bin,cfg,ssl} # 将etcd安装包上传到/root tar zxf etcd-v3.4.3-linux-amd64.tar.gz mv /root/etcd-v3.4.3-linux-amd64/etcd* /opt/kubernetes/bin/ # 推送到其他节点 scp /opt/kubernetes/bin/* 172.16.10.11:/opt/kubernetes/bin/ scp /opt/kubernetes/bin/* 172.16.10.12:/opt/kubernetes/bin/ # 上传etcd到master节点 ##推送到其他节点 scp etcd 172.16.10.11:/opt/kubernetes/cfg/ scp etcd 172.16.10.12:/opt/kubernetes/cfg/ # 将master节点的etcd配置文件移到/opt/kubernetes/cfg/ mv etcd /opt/kubernetes/cfg/ # 修改etcd配置文件 vim /opt/kubernetes/cfg/etcd #[Member] ETCD_NAME="etcd01" ##master节点为etcd01,node01为etcd02,node02为etcd03 ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://172.16.10.10:2380" ##改为当前节点的IP地址 ETCD_LISTEN_CLIENT_URLS="https://172.16.10.10:2379,http://127.0.0.1:2379" #[Clustering] ##改为当前节点的IP地址 ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.10.10:2380" ##改为当前节点的IP地址 ETCD_ADVERTISE_CLIENT_URLS="https://172.16.10.10:2379" ##改为当前节点的IP地址 ETCD_INITIAL_CLUSTER="etcd01=https://172.16.10.10:2380,etcd02=https://172.16.10.11:2380,etcd03=https://172.16.10.12:2380" ##添加所有节点的IP地址 ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_ENABLE_V2="true" # node01和node02节点分别进行etcd配置文件修改 # 上传etcd.service到master节点 ##将etcd.service 移到/usr/lib/systemd/system/ mv etcd.service /usr/lib/systemd/system/ ##将etcd.service推送到其他节点 scp /usr/lib/systemd/system/etcd.service 172.16.10.11:/usr/lib/systemd/system/ scp /usr/lib/systemd/system/etcd.service 172.16.10.12:/usr/lib/systemd/system/ # 将etcd相关证书复制到/opt/kubernetes/ssl下 cp /root/software/ssl/server*.pem /root/software/ssl/ca*.pem /opt/kubernetes/ssl ##将etcd相关证书推送到其他节点 scp /opt/kubernetes/ssl/* 172.16.10.11:/opt/kubernetes/ssl/ scp /opt/kubernetes/ssl/* 172.16.10.12:/opt/kubernetes/ssl/ # 启动时必须所有节点同步进行 systemctl daemon-reload systemctl start etcd systemctl enable etcd # 查看etcd状态 /opt/kubernetes/bin/etcdctl --cacert=/opt/kubernetes/ssl/ca.pem --cert=/opt/kubernetes/ssl/server.pem --key=/opt/kubernetes/ssl/server-key.pem endpoint health --endpoints="https://172.16.10.10:2379,https://172.16.10.11:2379,https://172.16.10.12:2379"
1.4 部署flannel网络
-
master节点执行
# 通过etcd认证创建网络 ETCDCTL_API=2 /opt/kubernetes/bin/etcdctl --ca-file=/opt/kubernetes/ssl/ca.pem --cert-file=/opt/kubernetes/ssl/server.pem --key-file=/opt/kubernetes/ssl/server-key.pem --endpoints="https://172.16.10.10:2379,https://172.16.10.11:2379,https://172.16.10.12:2379" set /coreos.com/network/config '{"Network":"172.17.0.0/16","Backend":{"Type":"vxlan"} }' # 上传并解压flannel包 tar zxf flannel-v0.10.0-linux-amd64.tar.gz # 将可执行文件推送到node节点,flannel网络仅需部署在node节点 scp /root/flanneld mk-docker-opts.sh 172.16.10.11:/opt/kubernetes/bin/ scp /root/flanneld mk-docker-opts.sh 172.16.10.12:/opt/kubernetes/bin/
-
node节点执行(以node01 为例)
# 上传flanneld 、flanneld.service到/root ##移动到/opt/kubernetes/cfg/ mv flanneld /opt/kubernetes/cfg/ # 修改配置文件 vim /opt/kubernetes/cfg/flanneld ## 将etcd的IP地址改为 集群节点的IP地址 FLANNEL_OPTIONS="--etcd-endpoints=https://172.16.10.10:2379,https://172.16.10.11:2379,https://172.16.10.12:2379 -etcd-cafile=/opt/kubernetes/ssl/ca.pem -etcd-certfile=/opt/kubernetes/ssl/server.pem -etcd-keyfile=/opt/kubernetes/ssl/server-key.pem" # 将flanneld.service文件移动到 /usr/lib/systemd/system/ mv flanneld.service /usr/lib/systemd/system/ # 将两个文件推送到node02相应目录下 scp /opt/kubernetes/cfg/flanneld 172.16.10.12:/opt/kubernetes/cfg/ scp /usr/lib/systemd/system/flanneld.service 172.16.10.12:/usr/lib/systemd/system/ # 修改docker创建服务的网络配置 vim /usr/lib/systemd/system/docker.service ..... [Service] Type=notify Environmentfile=/run/flannle/subnet.env ##添加“Environmentfile=/run/flannle/subnet.env” # the default is not to use systemd for cgroups because the delegate issues still # exists and systemd currently does not support the cgroup feature set required # for containers run by docker ExecStart=/usr/bin/dockerd -D $DOCKER_NETWORK_OPTIONS ##修改为“ExecStart=/usr/bin/dockerd -D $DOCKER_NETWORK_OPTIONS,若使用的docker-20版本的则不需要改动” ExecReload=/bin/kill -s HUP $MAINPID ...... # 重载启动flanneld和docker systemctl daemon-reload systemctl start flanneld systemctl restart docker # node02 节点进行相同操作
1.5 部署master节点相关组件
1.5.1 配置组件环境
# 上传并解压kubernetes-server-linux-amd64_1-16.tar.gz tar zxf kubernetes-server-linux-amd64_1-16.tar.gz # 将kubectl可执行文件移动到/opt/kubernetes/bin/,并添加执行权限 cp kubernetes/server/bin/kubectl /opt/kubernetes/bin/ chmod +x /opt/kubernetes/bin/kubectl # 设置环境变量,将/opt/kubernetes/bin/添加为PATH全局变量 vim /etc/profile ##末行添加 PATH=$PATH:/opt/kubernetes/bin export PATH # 刷新环境变量 source /etc/profile # 上传config_k8s.sh 脚本并修改 vim config_k8s.sh ... export KUBE_APISERVER="https://172.16.10.10:6443" ##修改为master节点IP地址 ... # 认证 #kubectl create clusterrolebinding kubelet-bootstrap \ ##注释掉 #--clusterrole=system:node-bootstrapper \ ##注释掉 #--user=kubelet-bootstrap ##注释掉 # 执行脚本 sh config_k8s.sh
1.5.2 部署apiserver、controller、scheduler组件
# 将所有的可执行文件复制到 /opt/kubernetes/bin/下 cp kubernetes/server/bin/kube-controller-manager kubernetes/server/bin/kube-scheduler kubernetes/server/bin/kube-apiserver /opt/kubernetes/bin/ # 添加可执行权限 chmod +x /opt/kubernetes/bin/* # 上传 master.zip 到/root ##解压mastart.zip unzip master.zip ##给apiserver.sh 添加执行权限 chmod +x apiserver.sh # 执行./apiserver.sh 脚本 ./apiserver.sh 172.16.10.10 https://172.16.10.10:2379,https://172.16.10.11:2379,https://172.16.10.12:2379 # 查看状态 netstat -nlpt|grep apiserve # 创建controller组件 # 执行 controller-manager.sh 127.0.0.1 脚本 sh controller-manager.sh 127.0.0.1 netstat -nlpt|grep controll # 创建scheduler组件 sh scheduler.sh 127.0.0.1 netstat -nlpt|kube # 推送 *kubeconfig 配置文件推送到node节点/opt/kubernetes/cfg scp /root/software/ssl/*kubeconfig 172.16.10.11:/opt/kubernetes/cfg scp /root/software/ssl/*kubeconfig 172.16.10.12:/opt/kubernetes/cfg # 推送kubelet、kube-proxy组件到node节点/opt/kubernetes/bin/ scp kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy 172.16.10.11:/opt/kubernetes/bin/ scp kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy 172.16.10.12:/opt/kubernetes/bin/
1.6 集群添加node节点
-
node节点执行(以node01 为例)
# 上传node.zip到/root/ ##解压 unzip node.zip chmod +x /opt/kubernetes/bin/*
-
master节点执行
# 开启节点认证 vim config_k8s.sh #取消节点认证注释 kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper \ --user=kubelet-bootstrap # 执行命令,生成节点认证 kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper \ --user=kubelet-bootstrap
-
node节点执行
# node01 节点执行 ##创建kubelet组件 sh kubelet.sh 172.16.10.11 ##当前节点 IP地址 ##创建proxy组件 sh proxy.sh 172.16.10.11 ##当前节点 IP地址 # node02 节点执行 sh kubelet.sh 172.16.10.12 ##当前节点 IP地址 sh proxy.sh 172.16.10.12 ##当前节点 IP地址
-
master节点执行
# 查看各节点的认证书情况,Pending为等待同意 kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-GpSShec7StbuENCXnOxZQKJBtYLa4R8gWU8leqUWUnw 69s kubelet-bootstrap Pending node-csr-eLgrrIMgAlfD5zKQjxW5VcKcxPGUOWz8tcOZE8Icw5U 29s kubelet-bootstrap Pending # 同意批准各节点认证 kubectl certificate approve [各节点NAME] kubectl certificate approve node-csr-GpSShec7StbuENCXnOxZQKJBtYLa4R8gWU8leqUWUnw kubectl certificate approve node-csr-eLgrrIMgAlfD5zKQjxW5VcKcxPGUOWz8tcOZE8Icw5U # 查看各节点的认证书情况,Approved为已同意批准 kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-GpSShec7StbuENCXnOxZQKJBtYLa4R8gWU8leqUWUnw 4m19s kubelet-bootstrap Approved,Issued node-csr-eLgrrIMgAlfD5zKQjxW5VcKcxPGUOWz8tcOZE8Icw5U 3m39s kubelet-bootstrap Approved,Issued # 查看集群节点情况 kubectl get node NAME STATUS ROLES AGE VERSION 172.16.10.11 Ready <none> 83s v1.16.15 172.16.10.12 Ready <none> 62s v1.16.15