一、前置准备
准备三台服务器,操作系统centos7.x
硬件配置:CPU2核、内存2GB、硬盘30GB
确保机器间网络互通,时钟同步,所需镜像及资源包下载链接:点击下载
规划:
节点名称 | IP | 安装组件 |
---|---|---|
k8s-master | 192.168.14.101 | docker、etcd、kubelet、kube-proxy、kube-apiserver、kube-controller-manager、kube-scheduler |
k8s-node1 | 192.168.14.102 | docker、etcd、kubelet、kube-proxy |
k8s-node2 | 192.168.14.103 | docker、etcd、kubelet、kube-proxy |
docker私有仓库 | 192.168.14.100 | docker(服务器配置可以低点) |
三台服务器初始化操作
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 关闭 selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
# 关闭 swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
# master节点添加 hosts
cat >> /etc/hosts << EOF
192.168.14.101 k8s-master
192.168.14.102 k8s-node1
192.168.14.103 k8s-node2
EOF
# 将桥接的 IPv4 流量传递到 iptables 的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# 生效
sysctl -p
- 部署docker本地私有镜像仓库 (192.168.14.100)
#将资源包里面的docker_绿色免安装版.tar.gz解压,把docker_inspkg下的所有文件放到 /usr/sbin/目录下
#使用 systemd 管理 docker
vim /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/sbin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
###########################分 割 线 #############################
#启动并设置开机启动
systemctl daemon-reload
systemctl start docker
systemctl enable docker
###########################分 割 线 #############################
#新建或修改daemon.json
vim /etc/docker/daemon.json
{
"registry-mirror" : ["https://hub-mirror.c.163.com"],
"insecure-registries" : ["192.168.14.100:5000"]
}
###########################分 割 线 #############################
#重启docker服务
systemctl restart docker
###########################分 割 线 #############################
#将资源包里面的docker-images目录下registry镜像源导入
docker load < registry.docker
###########################分 割 线 #############################
#启动容器
mkdir -p /opt/data/registry
docker run -d --name private_registry -p 5000:5000 -v /opt/data/registry:/var/lib/registry --restart=always registry
- 上传资源包里面的镜像
#将资源包里面的docker-images目录下所有镜像导入(registry.docker已经导过)
docker load -i flanneld-v0.11.0-s390x.docker
docker load -i flanneld-v0.11.0-ppc64le.docker
docker load -i flanneld-v0.11.0-arm.docker
docker load -i flanneld-v0.11.0-arm64.docker
docker load -i flanneld-v0.11.0-amd64.docker
docker load -i dashboard-v2.0.4.docker
docker load -i metrics-scraper-v1.0.4.docker
docker load -i coredns-1.6.2.docker
docker load -i busybox.docker
docker images #查看镜像
REPOSITORY TAG IMAGE ID CREATED SIZE
registry latest 1fd8e1b0bb7e 13 days ago 26.2MB
kubernetesui/dashboard v2.0.4 46d0a29c3f61 7 months ago 225MB
kubernetesui/metrics-scraper v1.0.4 86262685d9ab 13 months ago 36.9MB
coredns/coredns 1.6.2 bf261d157914 20 months ago 44.1MB
quay.io/coreos/flannel v0.11.0-s390x c5963b81ce28 2 years ago 58.2MB
quay.io/coreos/flannel v0.11.0-ppc64le c96a2f3abc08 2 years ago 69.6MB
quay.io/coreos/flannel v0.11.0-arm64 32ffa9fadfd7 2 years ago 53.5MB
quay.io/coreos/flannel v0.11.0-arm ef3b5d63729b 2 years ago 48.9MB
quay.io/coreos/flannel v0.11.0-amd64 ff281650a721 2 years ago 52.6MB
busybox 1.28.4 8c811b4aec35 2 years ago 1.15MB
#给刚导入的镜像重新打标签
docker tag busybox:1.28.4 192.168.14.100:5000/busybox
docker tag quay.io/coreos/flannel:v0.11.0-amd64 192.168.14.100:5000/flannel-amd64
docker tag quay.io/coreos/flannel:v0.11.0-arm 192.168.14.100:5000/flannel-arm
docker tag quay.io/coreos/flannel:v0.11.0-arm64 192.168.14.100:5000/flannel-arm64
docker tag quay.io/coreos/flannel:v0.11.0-ppc64le 192.168.14.100:5000/flannel-ppc64le
docker tag quay.io/coreos/flannel:v0.11.0-s390x 192.168.14.100:5000/flannel-s390x
docker tag coredns/coredns:1.6.2 192.168.14.100:5000/coredns
docker tag kubernetesui/metrics-scraper:v1.0.4 192.168.14.100:5000/metrics-scraper
docker tag kubernetesui/dashboard:v2.0.4 192.168.14.100:5000/dashboard
#打完标签后的镜像上传到本地仓库
docker push 192.168.14.100:5000/busybox
docker push 192.168.14.100:5000/flannel-amd64
docker push 192.168.14.100:5000/flannel-arm
docker push 192.168.14.100:5000/flannel-arm64
docker push 192.168.14.100:5000/flannel-ppc64le
docker push 192.168.14.100:5000/flannel-s390x
docker push 192.168.14.100:5000/coredns
docker push 192.168.14.100:5000/metrics-scraper
docker push 192.168.14.100:5000/dashboard
#删除不需要的镜像
docker rmi busybox:1.28.4
docker rmi docker rmi quay.io/coreos/flannel:v0.11.0-amd64
docker rmi quay.io/coreos/flannel:v0.11.0-arm
docker rmi quay.io/coreos/flannel:v0.11.0-arm64
docker rmi quay.io/coreos/flannel:v0.11.0-ppc64le
docker rmi quay.io/coreos/flannel:v0.11.0-s390x
docker rmi coredns/coredns:1.6.2
docker rmi kubernetesui/metrics-scraper:v1.0.4
docker rmi kubernetesui/dashboard:v2.0.4
- 本地镜像仓库及资源准备完成
二、安装docker(每个节点都要)
- 解压资源包 (docker_绿色免安装版.tar.gz),将docker_inspkg下的所有文件放到 /usr/sbin/目录下
- 修改镜像源地址
#新建或修改daemon.json
vim /etc/docker/daemon.json
{
"registry-mirror" : ["https://hub-mirror.c.163.com"],
"insecure-registries" : ["192.168.14.100:5000"]
}
- 使用 systemd 管理 docker
vim /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/sbin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
- 启动并设置开机启动
systemctl daemon-reload
systemctl start docker
systemctl enable docker
三、部署Etcd集群
注:为了节省机器,这里与 K8s 节点机器复用 , 也可以独立于 k8s 集群之外部署,只要 apiserver 能连接到就行
- 准备 cfssl 证书生成工具 , 找任意一台服务器操作,这里用 Master 节点
进入下载下来的资源包路径下,将cfssl_linux-amd64目录中的所有文件复制到 /usr/local/bin/ 下
chmod +x cfssl_linux-amd64/*
cp cfssl_linux-amd64/* /usr/local/bin/
- 创建Etcd目录并解压二进制包
mkdir /opt/etcd/{
bin,cfg,ssl} -p
tar -zxf etcd-v3.4.9-linux-amd64.tar.gz
mv etcd-v3.4.9-linux-amd64/{
etcd,etcdctl} /opt/etcd/bin/
- 创建配置文件
cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.14.101:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.14.101:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.14.101:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.14.101:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.14.101:2380,etcd-2=https://192.168.14.102:2380,etcd-3=https://192.168.14.103:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
################################################################################
#ETCD_NAME:节点名称,集群中唯一
#ETCD_DATA_DIR:数据目录
#ETCD_LISTEN_PEER_URLS:集群通信监听地址
#ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
#ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
#ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
#ETCD_INITIAL_CLUSTER:集群节点地址
#ETCD_INITIAL_CLUSTER_TOKEN:集群 Token
#ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new 是新集群,existing 表示加入已有集群
################################################################################
- 生成自签 CA证书
mkdir -p /usr/local/ssl/{
etcd_ssl,k8s_ssl}
cd /usr/local/ssl/etcd_ssl
vim ca-config.json
{
"signing": {
"default": {
"expiry":