【k8s】二进制安装kubernetes高可用集群

1. 环境准备

1.1 准备服务器

主机IP部署内容
Host110.0.4.9
Host110.0.4.5
Host310.0.4.11

1.2 升级服务器内核(内核为3.10版本或更低版本需要升级)

#################centos7升级内核#################################
#查看内核版本
uname -r
#升级软件包
yum update -y --exclude=kernel*
#下载公钥
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
#安装镜像加速
yum install -y yum-plugin-fastestmirror
#列出可用的内核相关包 
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
#选择lt结尾的长期支持版本进行安装
yum --enablerepo=elrepo-kernel install -y kernel-lt
#查看内核使用顺序
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
#重新创建内核配置
grub2-mkconfig -o /boot/grub2/grub.cfg
#修改使用默认内核 将GRUb_DEFAULT设置为0
vim /etc/default/grub
#重新整理下内核
grub2-mkconfig -o /boot/grub2/grub.cfg
#重启机器
reboot
#################centos7升级内核#################################

#################centos8升级内核#################################
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
yum --disablerepo="*" --enablerepo="elrepo-kernel" install kernel-ml
grub2-set-default 0
#重启
reboot
#################centos8升级内核#################################

1.3 准备集群所用证书

1.3.1 下载cfssl证书工具
#github下载地址: https://github.com/cloudflare/cfssl/releases/tag/v1.5.0
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.0/cfssl_1.6.0_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.0/cfssl-certinfo_1.6.0_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.0/cfssljson_1.6.0_linux_amd64
#为工具增加执行权限
chmod +x cfssl*
#为了操作方便,这里为证书重命名
for name in `ls cfssl*`; do mv ${name} ${name%_1.6.0_linux_amd64}; done
#将证书工具cp到/usr/bin/
cp cfssl* /usr/bin/

1.4 配置服务器及工具

# 三台机器分别设置hostname,注意hostname不能为localhost
hostnamectl set-hostname host1
#修改每台机器的hosts
vim /etc/hosts
#天机下面内容
10.0.4.9        host1
10.0.4.5        host2
10.0.4.11       host3

#关闭selinux
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux

#关闭 swap:
swapoff -a  
sed -ri 's/.*swap.*/#&/' /etc/fstab 

#修改limit
ulimit -SHn 65535
vim /etc/security/limits.conf
#末尾添加下面内容
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd

#配置ssh免密登录
ssh-keygen -t rsa 
#将公钥发给其他机器
for i in host1 host2 host3; do ssh-copy-id -i .ssh/id_rsa.pub $i;done
#安装后续使用的一些工具
yum install wget git jq psmisc net-tools yum-utils device-mapper-persistent-data lvm2 -y
#安装ipvs工具
yum install ipvsadm ipset sysstat conntrack libseccomp -y
#所有节点配置ipvs模块 内核4.18以下改为nf_conntrack_ipv4
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
#修改/etc/modules-load.d/ipvs.conf配置文件
cat >> /etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
#执行命令
systemctl restart systemd-modules-load.service
systemctl enable systemd-modules-load.service
#检测是否加载
lsmod | grep -e ip_vs -e nf_conntrack
#修改调优配置
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
#使配置生效
sysctl --system
#重启机器查看是否生效
reboot
lsmod | grep -e ip_vs -e nf_conntrack
#安装docker并配置镜像加速
#参照: https://blog.csdn.net/beck12341234/article/details/126595469
#准备证书文件夹
mkdir -p /etc/kubernetes/pki
cd /etc/kubernetes/pki
#配置文件
cat > ca-config.json <<EOF
{
    "signing": {
        "default": {
            "expiry": "19800h"
        },
        "profiles": {
            "server": {
                "expiry": "19800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
            "client": {
                "expiry": "19800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "19800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth",
                    "server auth"
                ]
            },
            "kubernetes": {
                "expiry": "19800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth",
                    "server auth"
                ]
            },
            "etcd": {
                "expiry": "19800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth",
                    "server auth"
                ]
            }
        }
    }
}
EOF

1.5 安装高可用etcd集群

1.5.1 准备安装包
 #下载etcd安装包etcd-v3.4.20-linux-amd64.tar.gz
 #下载地址:https://github.com/etcd-io/etcd/release
 #上传安装包到三台服务器
 #解压安装包并将执行命令移动到/usr/bin/
tar -zxvf etcd-v3.4.20-linux-amd64.tar.gz
cd etcd-v3.4.20-linux-amd64/
mv etcd /usr/bin/
mv etcdctl /usr/bin/
#测试命令
ectdctl
1.5.2 准备安装证书
#证书文件
mkdir /etc/kubernetes/pki/etcd
cd /etc/kubernetes/pki/etcd
#生成CA
#写入申请证书配置
cat > ca-csr.json <<EOF
{
    "CN": "etcd",
    "hosts": [
        "cj.io",
        "www.cj.io"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
            "O": "etcd",
            "OU": "etcd"
        }
    ]
}
EOF

#执行命令生成CA的证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
#创建证书申请文件
cat > etcd-easytuop-csr.json <<EOF
{
    "CN": "etcd-easytuop",
    "hosts": [
                "127.0.0.1",
                "host1",
                "host2",
                "host3",
                "10.0.4.9",
                "10.0.4.5",
                "10.0.4.11"
        ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
           "O": "etcd",
           "OU": "System"
        }
    ]
}
EOF
#签发证书
cfssl gencert \
	-ca=ca.pem \
	-ca-key=ca-key.pem \
	-config=/etc/kubernetes/pki/ca-config.json \
	-profile=etcd \
	etcd-easytuop-csr.json | cfssljson -bare etcd
#将etcd文件夹发送到其它机器
scp -r etcd root@host2:/etc/kubernetes/pki/
scp -r etcd root@host3:/etc/kubernetes/pki/
#准备etcd配置文件
mkdir /etc/etcd
vim /etc/etcd/etcd.yaml
#写入下面配置
#每个节点的name 和 ip地址需要修改
name: 'etcd-3'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: https://10.0.4.11:2380
listen-client-urls: https://10.0.4.11:2379,http://127.0.0.1:2379
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: https://10.0.4.11:2380
advertise-client-urls: https://10.0.4.11:2379
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://10.0.4.9:2380,etcd-2=https://10.0.4.5:2380,etcd-3=https://10.0.4.11:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/ca.pem'
  auto-tls: true
logger: zap
log-outputs: [stderr]
force-new-cluster: false
auto-compaction-mode: periodic
auto-compaction-retention: "1"
#将etcd制作成开机启动的服务
vim /usr/lib/systemd/system/etcd.service
#写入下面内容
[Unit]
Description=etcd services
After=network.target

[Service]
Type=notify
ExecStart=/usr/bin/etcd --config-file=/etc/etcd/etcd.yaml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service
#使配置生效
systemctl daemon-reload
#启动etcd并设置开机启动
systemctl enable --now etcd
#查看状态
systemctl status etcd
#查看启动日志
journalctl -u etcd
#查看etcd集群状态
etcdctl --endpoints="10.0.4.9:2379,10.0.4.5:2379,10.0.4.11:2379" --cacert=/etc/kubernetes/pki/etcd/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table

#etcd可以自动读取环境变量,可以配置环境变量减少命令参数书写
export ETCDCTL_API=3
HOST_1=10.0.4.9
HOST_2=10.0.4.5
HOST_3=10.0.4.11
ENDPOINTS=$HOST_1:2379,$HOST_2:2379,$HOST_3:2379
#导出环境变量,参照: https://github.com/etcd-io/etcd/tree/main/etcdctl
export ETCDCTL_DIAL_TIMEOUT=3s
export ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.pem
export ETCDCTL_CERT=/etc/kubernetes/pki/etcd/etcd.pem
export ETCDCTL_KEY=/etc/kubernetes/pki/etcd/etcd-key.pem
export ETCDCTL_ENDPOINTS=$HOST_1:2379,$HOST_2:2379,$HOST_3:2379
#使用etcdctl自动使用环境变量
etcdctl endpoint status --write-out=table
etcdctl member list --write-out=table

2.k8s安装

2.1 master安装包准备

#下载软件包,并分发到每个节点
wget https://dl.k8s.io/v1.21.14/kubernetes-server-linux-amd64.tar.gz
#将包分发给所有节点
scp kubernetes-server-linux-amd64.tar.gz root@host2:/app/
scp kubernetes-server-linux-amd64.tar.gz root@host3:/app/
#解压并将命令移动到/usr/bin
tar -zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kubelet /usr/bin/
cp kubectl /usr/bin/
cp kube-apiserver /usr/bin/
cp kube-controller-manager /usr/bin/
cp kube-scheduler /usr/bin/
cp kube-proxy /usr/bin/

2.2 准备apiserver域名及nginx解析

#这里去腾讯云使用自己的域名申请了免费证书(也可以直接使用云厂商的工作负载,这样就不需要自己的域名及证书了)
#nginx配置如下
# 在10.0.4.9服务器nginx的conf.d的目录下创建文件 
vim nginx.conf
stream {
    upstream kube_apiserver {
        server 10.0.4.9:6443;
        server 10.0.4.5:6443;
        server 10.0.4.11:6443;
    }
    
    server {
        listen        9443;
        proxy_pass    kube_apiserver;
        proxy_timeout 30;
        proxy_connect_timeout 2s;
    }
}

#测试配置
sbin/nginx
#使配置生效
sbin/nginx -s reload
#配置完成后,apiserver可以直接通过该域名访问

2.3 apiserver证书生成

cd /etc/kubernetes/pki
#准备ca
cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
            "O": "Kubernetes",
            "OU": "Kubernetes"
        }
    ],
    "ca": {
        "expiry": "43800h"
    }
}
EOF
#生成ca
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
#证书申请文件
cat > apiserver-csr.json <<EOF
{
    "CN": "kube-apiserver",
    "hosts": [
    						"10.96.0.1",
                "127.0.0.1",
                "host1",
                "host2",
                "host3",
                "10.0.4.9",
                "10.0.4.5",
                "10.0.4.11",
                "kubernetes",
                "kubernetes.default",
                "kubernetes.default.svc",
                "kubernetes.default.svc.cluster",
                "kubernetes.default.svc.cluster.local"
        ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
           "O": "Kubernetes",
           "OU": "Kubernetes"
        }
    ]
}
EOF

#生成证书
cfssl gencert \
	-ca=/etc/kubernetes/pki/ca.pem \
	-ca-key=/etc/kubernetes/pki/ca-key.pem \
	-config=/etc/kubernetes/pki/ca-config.json \
	-profile=kubernetes \
	apiserver-csr.json | cfssljson -bare apiserver

2.3 front-proxy证书生成

cd /etc/kubernetes/pki
#准备ca
cat > front-proxy-ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    }
}
EOF
#生成ca
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca

#front-proxy-client证书
cat > front-proxy-client-csr.json <<EOF
{
    "CN": "front-proxy-client",
    "key": {
        "algo": "rsa",
        "size": 2048
    }
}
EOF

#生成证书
cfssl gencert \
	-ca=/etc/kubernetes/pki/front-proxy-ca.pem \
	-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \
	-config=/etc/kubernetes/pki/ca-config.json \
	-profile=kubernetes \
	front-proxy-client-csr.json | cfssljson -bare front-proxy-client

2.4 controller-manager证书生成

/etc/kubernetes/pki
#准备ca
cat > controller-manager-csr.json <<EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
           "O": "system:kube-controller-manager",
           "OU": "Kubernetes"
        }
    ]
}
EOF

#生成证书
cfssl gencert \
	-ca=/etc/kubernetes/pki/ca.pem \
	-ca-key=/etc/kubernetes/pki/ca-key.pem \
	-config=/etc/kubernetes/pki/ca-config.json \
	-profile=kubernetes \
	controller-manager-csr.json | cfssljson -bare controller-manager

2.5 生成controller-manager.conf配置

#如果不是高可用apiserver,这里的192.168.0.250:6443的地址要改为master01的地址,6443为apiserver的端口
#设置一个集群项
kubectl config set-cluster kubernetes \
	--certificate-authority=/etc/kubernetes/pki/ca.pem \
	--embed-certs=true \
	--server=https://10.0.4.9:9443 \
	--kubeconfig=/etc/kubernetes/controller-manager.conf
	
#设置一个环境上下文
kubectl config set-context system:kube-controller-manager@kubernetes \
	--cluster=kubernetes \
	--user=system:kube-controller-manager \
	--kubeconfig=/etc/kubernetes/controller-manager.conf
	
#设置一个用户项
kubectl config set-credentials system:kube-controller-manager \
	--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
	--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
	--embed-certs=true \
	--kubeconfig=/etc/kubernetes/controller-manager.conf

#设置默认环境
kubectl config use-context system:kube-controller-manager@kubernetes \
	--kubeconfig=/etc/kubernetes/controller-manager.conf

2.6 scheduler证书生成

cd /etc/kubernetes/pki
#证书申请文件
cat > scheduler-csr.json <<EOF
{
    "CN": "system:kube-scheduler",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
           "O": "system:kube-scheduler",
           "OU": "Kubernetes"
        }
    ]
}
EOF

#生成证书
cfssl gencert \
	-ca=/etc/kubernetes/pki/ca.pem \
	-ca-key=/etc/kubernetes/pki/ca-key.pem \
	-config=/etc/kubernetes/pki/ca-config.json \
	-profile=kubernetes \
	scheduler-csr.json | cfssljson -bare scheduler

2.7 生成scheduler.conf配置

#如果不是高可用apiserver,这里的192.168.0.250:6443的地址要改为master01的地址,6443为apiserver的端口
#设置一个集群项
kubectl config set-cluster kubernetes \
	--certificate-authority=/etc/kubernetes/pki/ca.pem \
	--embed-certs=true \
	--server=https://10.0.4.9:9443 \
	--kubeconfig=/etc/kubernetes/scheduler.conf
	
#设置一个用户项
kubectl config set-credentials system:kube-scheduler \
	--client-certificate=/etc/kubernetes/pki/scheduler.pem \
	--client-key=/etc/kubernetes/pki/scheduler-key.pem \
	--embed-certs=true \
	--kubeconfig=/etc/kubernetes/scheduler.conf
	
#设置一个环境上下文
kubectl config set-context system:kube-scheduler@kubernetes \
	--cluster=kubernetes \
	--user=system:kube-scheduler \
	--kubeconfig=/etc/kubernetes/scheduler.conf
	

#设置默认环境
kubectl config use-context system:kube-scheduler@kubernetes \
	--kubeconfig=/etc/kubernetes/scheduler.conf

2.8 admin证书生成

cd /etc/kubernetes/pki
#证书申请文件
cat > admin-csr.json <<EOF
{
    "CN": "admin",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
           "O": "system:masters",
           "OU": "Kubernetes"
        }
    ]
}
EOF

#生成证书
cfssl gencert \
	-ca=/etc/kubernetes/pki/ca.pem \
	-ca-key=/etc/kubernetes/pki/ca-key.pem \
	-config=/etc/kubernetes/pki/ca-config.json \
	-profile=kubernetes \
	admin-csr.json | cfssljson -bare admin

2.9 生成admin.conf配置

#如果不是高可用apiserver,这里的192.168.0.250:6443的地址要改为master01的地址,6443为apiserver的端口
#设置一个集群项
kubectl config set-cluster kubernetes \
	--certificate-authority=/etc/kubernetes/pki/ca.pem \
	--embed-certs=true \
	--server=https://10.0.4.9:9443 \
	--kubeconfig=/etc/kubernetes/admin.conf
	
#设置一个用户项
kubectl config set-credentials kubernetes-admin \
	--client-certificate=/etc/kubernetes/pki/admin.pem \
	--client-key=/etc/kubernetes/pki/admin-key.pem \
	--embed-certs=true \
	--kubeconfig=/etc/kubernetes/admin.conf
	
#设置一个环境上下文
kubectl config set-context kubernetes-admin@kubernetes \
	--cluster=kubernetes \
	--user=kubernetes-admin \
	--kubeconfig=/etc/kubernetes/admin.conf
	
#设置默认环境
kubectl config use-context kubernetes-admin@kubernetes \
	--kubeconfig=/etc/kubernetes/admin.conf

2.10 serviceAccountKey生成

openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

2.11 将conf配置发送到其它节点

scp admin.conf root@host2:/etc/kubernetes/
scp controller-manager.conf  root@host2:/etc/kubernetes/
scp scheduler.conf   root@host2:/etc/kubernetes/

scp admin.conf root@host3:/etc/kubernetes/
scp controller-manager.conf  root@host3:/etc/kubernetes/
scp scheduler.conf   root@host3:/etc/kubernetes/

2.11 apiserver启动

#创建目录
mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
#创建apiserver服务文件
vim /usr/lib/systemd/system/kube-apiserver.service
#写入下面内容
[Unit]
Description=Kubernetes API Server
After=network.target

[Service]
ExecStart=/usr/bin/kube-apiserver \
      --v=2  \
      --logtostderr=true  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --insecure-port=0  \
      --advertise-address=10.0.4.9 \
      --service-cluster-ip-range=10.96.0.0/16  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://10.0.4.9:2379,https://10.0.4.5:2379,https://10.0.4.11:2379 \
      --etcd-cafile=/etc/kubernetes/pki/etcd/ca.pem  \
      --etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem   \
      --etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator,front-proxy-client  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User
      # --token-auth-file=/etc/kubernetes/token.csv
 
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target
#重新加载
systemctl daemon-reload
systemctl enable --now kube-apiserver
#查看状态
systemctl status kube-apiserver

2.12 controller-manager启动

#创建apiserver服务文件
vim /usr/lib/systemd/system/kube-controller-manager.service
#写入下面内容
[Unit]
Description=Kubernetes Controller Manager
After=network.target

[Service]
ExecStart=/usr/bin/kube-controller-manager \
      --v=2  \
      --logtostderr=true  \
     	--address=127.0.0.1 \
     	--root-ca-file=/etc/kubernetes/pki/ca.pem \
     	--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
  		--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
     	--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
     	--kubeconfig=/etc/kubernetes/controller-manager.conf \
     	--leader-elect=true \
     	--use-service-account-credentials=true \
     	--node-monitor-grace-period=40s \
     	--node-monitor-period=5s \
     	--pod-eviction-timeout=2m0s \
      --controllers=*,bootstrapsigner,tokencleaner \
     	--allocate-node-cidrs=true \
     	--cluster-cidr=196.16.0.0/16 \
     	--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
     	--node-cidr-mask-size=24 
 
Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
#重新加载
systemctl daemon-reload
systemctl enable --now kube-controller-manager
#查看状态
systemctl status kube-controller-manager

2.13 scheduler启动

#创建apiserver服务文件
vim /usr/lib/systemd/system/kube-scheduler.service
#写入下面内容
[Unit]
Description=Kubernetes Scheduler
After=network.target

[Service]
ExecStart=/usr/bin/kube-scheduler \
      --v=2  \
      --logtostderr=true  \
     	--address=127.0.0.1 \
     	--leader-elect=true \
     	--kubeconfig=/etc/kubernetes/scheduler.conf 
 
Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
#重新加载
systemctl daemon-reload
systemctl enable --now kube-scheduler
#查看状态
systemctl status kube-scheduler

2.14 生成kubelet配置

#准备一个随机token。但是我们只需要16个字符
head -c 8 /dev/urandom | od -An -t x | tr -d ' '
# 生成16个字符的
head -c 3 /dev/urandom | od -An -t x | tr -d ' '

#如果不是高可用apiserver,这里的192.168.0.250:6443的地址要改为master01的地址,6443为apiserver的端口
#设置集群
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://10.0.4.9:9443 \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf

#设置秘钥
kubectl config set-credentials tls-bootstrap-token-user \
--token=a44840.00f14c1c57b698e8 \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf 

#设置上下文
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf

#使用设置
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf
#创建kubernetes配置文件目录
mkdir -p /root/.kube
cp /etc/kubernetes/admin.conf /root/.kube/config

#将bootstrap-kubelet.conf发送到所有节点
scp bootstrap-kubelet.conf root@host2:/etc/kubernetes/
scp bootstrap-kubelet.conf root@host3:/etc/kubernetes/

2.15 创建集群权限引导文件

#master节点
vi  /etc/kubernetes/bootstrap.secret.yaml

apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-a44840
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: a44840
  token-secret: 00f14c1c57b698e8
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
 
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
# 应用此文件资源内容
kubectl create -f /etc/kubernetes/bootstrap.secret.yaml

2.16 配置kublet服务

# 所有节点创建相关目录
mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/

#所有节点,配置kubelet服务
vi  /usr/lib/systemd/system/kubelet.service

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
ExecStart=/usr/bin/kubelet

Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
# 所有节点配置kubelet service配置文件
vi /etc/systemd/system/kubelet.service.d/10-kubelet.conf

[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/easytuop/pause:3.4.1"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
#所有节点,配置kubelet-conf文件
vi /etc/kubernetes/kubelet-conf.yml
# clusterDNS 为service网络的第10个ip值,改成自己的。如:10.96.0.10

apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s  #缩小相应的配置
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
#启动
systemctl daemon-reload && systemctl enable --now kubelet
#查看状态
systemctl status kubelet

#注意,这里启动可能报错
#查看错误日志
journalctl -u kubelet
#报错如下
#failed to run Kubelet: misconfiguration:  kubelet cgroup driver: \"systemd\" is different from docker cgroup driver: \"cgroupfs\
#参照:https://blog.csdn.net/beck12341234/article/details/126908572?spm=1001.2014.3001.5502

2.17 kube-proxy配置与启动

#生成kube-proxy.conf
#创建kube-proxy的sa
kubectl -n kube-system create serviceaccount kube-proxy

#创建角色绑定
kubectl create clusterrolebinding system:kube-proxy \
--clusterrole system:node-proxier \
--serviceaccount kube-system:kube-proxy

#导出变量,方便后面使用
SECRET=$(kubectl -n kube-system get sa/kube-proxy --output=jsonpath='{.secrets[0].name}')
JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET --output=jsonpath='{.data.token}' | base64 -d)
PKI_DIR=/etc/kubernetes/pki
K8S_DIR=/etc/kubernetes

# 生成kube-proxy配置
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://10.0.4.9:9443 \
--kubeconfig=${K8S_DIR}/kube-proxy.conf

# kube-proxy秘钥设置
kubectl config set-credentials kubernetes \
--token=${JWT_TOKEN} \
--kubeconfig=/etc/kubernetes/kube-proxy.conf


kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=kubernetes \
--kubeconfig=/etc/kubernetes/kube-proxy.conf


kubectl config use-context kubernetes \
--kubeconfig=/etc/kubernetes/kube-proxy.conf
#把生成的 kube-proxy.conf 传给每个节点
for NODE in host2 host3; do
      scp /etc/kubernetes/kube-proxy.conf $NODE:/etc/kubernetes/
 done
# 所有节点配置 kube-proxy.service 服务,一会儿设置为开机启动
vi /usr/lib/systemd/system/kube-proxy.service

[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.yaml \
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
# 所有机器执行
vi /etc/kubernetes/kube-proxy.yaml

apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.conf   #kube-proxy引导文件
  qps: 5
clusterCIDR: 196.16.0.0/16  #修改为自己的Pod-CIDR
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
#启动kube-proxy
systemctl daemon-reload && systemctl enable --now kube-proxy
systemctl status kube-proxy

2.18 部署calico

cd /etc/kubernetes/
# 下载官网calico
curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -o calico.yaml
## 把这个镜像修改成国内镜像


# 修改一些我们自定义的. 修改etcd集群地址
sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://10.0.4.9:2379,https://10.0.4.5:2379,https://10.0.4.11:2379"#g' calico.yaml


# etcd的证书内容,需要base64编码设置到yaml中
ETCD_CA=`cat /etc/kubernetes/pki/etcd/ca.pem | base64 -w 0 `
ETCD_CERT=`cat /etc/kubernetes/pki/etcd/etcd.pem | base64 -w 0 `
ETCD_KEY=`cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 -w 0 `

# 替换etcd中的证书base64编码后的内容
sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico.yaml


#打开 etcd_ca 等默认设置(calico启动后自己生成)。
sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico.yaml

# 修改自己的Pod网段 196.16.0.0/16
POD_SUBNET="196.16.0.0/16"
sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@#   value: "192.168.0.0/16"@  value: '"${POD_SUBNET}"'@g' calico.yaml
# 一定确定自己是否修改好了

#确认calico是否修改好
grep "CALICO_IPV4POOL_CIDR" calico.yaml -A 1
# 应用calico配置
kubectl apply -f calico.yaml
#查看所有pod
kubectl get pod -A
#这里如果遇到calico的pod部分未启动成功
kubectl describe pod <这里是上面查出来的pod> -n kube-system
#如果是镜像拉取权限拒绝,可能需要docker login一下

#如果发现部分pod没有启动成功
#报错 calico/node is not ready: BIRD is not ready: BGP not established with xxxxx
#修改该上面的 calico.yaml配置文件
#增加
            - name: IP_AUTODETECTION_METHOD
              value: "interface=eth.*"
#参照:http://www.manongjc.com/detail/22-hflhsntfikgaezg.html

2.19 部署coreDNS

git clone https://github.com/coredns/deployment.git
cd deployment/kubernetes

#10.96.0.10 改为 service 网段的 第 10 个ip
#等kubectl get pod -A返回的pod状态都为running时,执行下面命令
./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -
#查看taints,默认节点都是没有Taints的,都可以运行pod
kubectl describe nodes host | grep Taints
#如果不想再host1部署pod,可以为host1打个traints
kubectl taints node host1 
#为node打标签
#三个节点都是master
kubectl label node host1 node-role.kubernetes.io/master=''
kubectl label node host2 node-role.kubernetes.io/master=''
kubectl label node host3 node-role.kubernetes.io/master=''
#三个节点都是worker
kubectl label node host1 node-role.kubernetes.io/worker=''
kubectl label node host2 node-role.kubernetes.io/worker=''
kubectl label node host3 node-role.kubernetes.io/worker=''
#host1是apiserver负载节点
kubectl label node host1 node-role.kubernetes.io/apiserver-lb=''

3. 验证集群

vim deploy.yaml

#写入下面内容
apiVersion: apps/v1
kind: Deployment
metadata:
  name:  nginx-01
  namespace: default
  labels:
    app:  nginx-01
spec:
  selector:
    matchLabels:
      app: nginx-01
  replicas: 1
  template:
    metadata:
      labels:
        app:  nginx-01
    spec:
      containers:
      - name:  nginx-01
        image:  nginx
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc
  namespace: default
spec:
  selector:
    app:  nginx-01
  type: ClusterIP
  ports:
  - name: nginx-svc
    port: 80
    targetPort: 80
    protocol: TCP
---
apiVersion: v1
kind: Namespace
metadata:
  name: hello
spec: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name:  nginx-hello
  namespace: hello
  labels:
    app:  nginx-hello
spec:
  selector:
    matchLabels:
      app: nginx-hello
  replicas: 1
  template:
    metadata:
      labels:
        app:  nginx-hello
    spec:
      containers:
      - name:  nginx-hello
        image:  nginx
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc-hello
  namespace: hello
spec:
  selector:
    app:  nginx-hello
  type: ClusterIP
  ports:
  - name: nginx-svc-hello
    port: 80
    targetPort: 80
    protocol: TCP
    
#执行
kubectl apply -f deploy.yaml
#测试访问 pod
#查看上面创建的nginx的pod的ip
kubectl get pod -A -owide 
#访问查到的两个nginx的pod的ip
curl 196.16.119.3
#查看svc的两个nginx的CLUSTER-IP
kubectl get svc -A
#访问CLUSTER-IP
curl 10.96.76.202
curl 10.96.15.55

#pod间互相访问
#查看pod
kubectl get pod -A -owide
#进入其中一个pod
kubectl exec -it nginx-01-5bd9d6df7b-v9bsq -- bash
#访问另一个pod的ip
curl 196.16.119.2
#访问另一个负载均衡网络
curl 10.96.76.202
#验证负载均衡域名访问
curl nginx-svc-hello.hello
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值