目录
参考文档:
K8S官方容器运行时:
https://kubernetes.io/zh-cn/docs/setup/production-environment/container-runtimes/
K8S与Docker对等版本:https://gitcode.com/kubernetes/kubernetes/blob/master/build/dependencies.yaml?utm_source=csdn_github_accelerator&isLogin=1
Docker离线安装包:
https://download.docker.com/linux/static/stable/x86_64/
calico的yaml文件:
https://github.com/projectcalico/calico/blob/v3.25.0/manifests/calico.yaml
calico官网:
https://docs.tigera.io/calico/latest/about
一、环境准备
1.更改主机名
主服务器
hostnamectl set-hostname k8s-master01
从服务器
hostnamectl set-hostname k8s-node01
从服务器2
hostnamectl set-hostname k8s-node02
三个终端全部执行
vim /etc/hosts
192.168.159.82 k8s-master01 m1
192.168.159.83 k8s-node01 n1
192.168.159.84 k8s-node02 n2
2.安装依赖包
yum install -y conntrack ntpdate ntp ipvsadm ipset iptables curl sysstat libseccomp wget vim net-tools git yum-utils
3.关闭swap分区
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
free -m
4.关闭 firewalld 防火墙
systemctl stop firewalld
systemctl disable firewalld
5.关闭 NetworkManager
systemctl stop NetworkManager
systemctl disable NetworkManager
6.关闭 SElinux
setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config
7.关闭系统不需要的进程
systemctl stop postfix && systemctl disable postfix
二、配置k8s的流量被防火墙处理
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
#IPV4中,网桥的流量必须被防火墙处理
net.bridge.bridge-nf-call-ip6tables=1
#IPV6中,网桥的必须被防火墙处理
net.ipv4.ip_forward=1
#开启路由转发
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
# 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1
# 不检查物理内存是否够用
vm.panic_on_oom=0
# 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf
# 会有目录不存在报错,不用理会
三、配置时间同步
1.主服务器
vim /etc/chrony.conf
# 配置阿里时间服务器
# 注释默认的server,添加以下两个server。
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
allow 172.16.66.0/24
systemctl restart chronyd
systemctl enable chronyd
2.从服务器
vim /etc/chrony.conf
# 指向主服务器地址
server 172.16.66.101 iburst
systemctl restart chronyd
systemctl enable chronyd
四、主服务器配置持久化日志
mkdir /var/log/journal
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald
五、物理机内核升级
# 下载内核rpm包:https://mirrors.aliyun.com/elrepo/archive/kernel/el7/x86_64/RPMS/
# 或:https://elrepo.org/linux/kernel/el7/x86_64/RPMS/
yum -y install kernel-lt-4.4.222-1.el7.elrepo.x86_64.rpm
cat /boot/grub2/grub.cfg | grep 4.4
grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'
reboot
# 加载br_netfilter配置文件
modprobe br_netfilter
六、配置ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
# kube-proxy开启ipvs的前置条件
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
七、安装docker
# 配置阿里docker的yum源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache
yum install -y docker-ce-20.10.24 docker-ce-cli-20.10.24 containerd.io-1.6.31
# 配置阿里云镜像加速
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"insecure-registries": ["harbor.daboluo.com"],
"registry-mirrors": ["https://kfp63jaj.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload && systemctl restart docker && systemctl enable docker && docker -v
八、部署K8S
1.安装kubeadm、kubelet、kubectl
# 配置阿里k8s的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 导入密钥
rpm --import https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
rpm --import https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
yum install -y kubelet-1.26.9 kubeadm-1.26.9 kubectl-1.26.9
systemctl enable --now kubelet
# 查看 kubelet 是否有报错日志
journalctl -xefu kubelet
2.生成 kubeadm 配置初始化模板
# 打印当前kubeadm配置,输出到文件
kubeadm config print init-defaults > kubeadm-config.yaml
vim kubeadm-config.yaml
-- 修改advertiseAddress字段为本机地址
-- 修改criSocket地址:unix:///run/containerd/containerd.sock
-- 检查kubernetesVersion,应与kubeadm version查询得到的版本一致
-- 将imageRepository字段改为registry.cn-hangzhou.aliyuncs.com/google_containers
-- 将name字段改为当前master节点的名字
-- 在networking字段添加:podSubnet: 100.100.0.0/16
-- 末尾配置 kube-proxy 的 ipvs 模块
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
ipvs:
scheduler: "rr"
3.镜像预下载
# 基于初始化文件,查看需要下载的镜像
kubeadm config images list --config=kubeadm-config.yaml
# 下载镜像
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.26.9
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.26.9
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.26.9
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.26.9
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.6-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.9.3
4.Containerd作为容器运行时(所有节点执行)
# 所有节点配置Containerd所需模块,确保在系统启动时自动加载这两个内核模块
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
# 所有节点加载模块
modprobe -- overlay
modprobe -- br_netfilter
# 所有节点配置Containerd所需内核,为 Kubernetes 的 CRI 和相关网络功能提供支持
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# 所有节点加载内核
sysctl --system
# 所有节点配置Containerd的配置文件
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml
vim /etc/containerd/config.toml
-- 将containerd.runtimes.runc.options中的 SystemdCgroup 字段修改为true
-- 接下来将sandbox_image的Pause镜像修改为适合自己版本的地址,
# sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9"
# 所有节点设置Containerd开机启动
systemctl daemon-reload && systemctl enable --now containerd && systemctl restart containerd
# 所有节点配置crictl客户端连接的Runtime位置
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 0
debug: false
pull-image-on-create: true
EOF
5.kubeadmin 初始化
# kubeadm初始化集群,初始化信息保存到 kubeadm-init.log
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
# 初始化成功的输出:
... ...
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.106.252:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:d5aa72d59db17d47935df8b8ed326595e3c0f868be6c9dd3eb1ffade88b54efc
# token的有效期为24小时,创建新token,使从节点加入集群。
# kubeadm token create --print-join-command
6.重新初始化
# 初始化失败时,重新初始化
kubeadm reset -f
# 清空防火墙规则
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
iptables -P FORWARD ACCEPT
# 清空残留文件
rm -rf /etc/cni/net.d
rm -rf /var/lib/etcd
rm -rf /var/lib/kubelet
rm -rf /var/lib/cni
# 容器运行时,删除所有运行的容器、镜像等
docker system prune -a
# 停止和禁用 kubelet
sudo systemctl stop kubelet
sudo systemctl disable kubelet
# 重启节点
reboot
# 重启后,再执行 kubeadm init ...
7.用户授权
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
8.从节点加入集群
kubeadm join 192.168.106.252:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:89495d156e508a90824326fa88a47aba6f8ec465cb2ede2ea0c83750e87ddaf3
9.查看节点就绪状态
# 节点未就绪,继续部署calico
kubectl get node
九、配置 calico 网络
# 下载 calico 的 yaml 文件
浏览器访问:https://github.com/projectcalico/calico/blob/v3.25.0/manifests/calico.yaml
# 部署calico
kubectl apply -f calico.yaml
# 安装calicoctl,注意,版本需要与calico保持一致
wget https://github.com/projectcalico/calico/releases/download/v3.25.0/coctl-linux-amd64
mv calicoctl-linux-amd64 /usr/local/bin/calicoctl
chmod +x /usr/local/bin/calicoctl
# 相关命令
calicoctl version
calicoctl get ippool -o wide
calicoctl node status
calicoctl get node k8s-master01 -o yaml
十、查看k8s集群状态
kubectl get node
kubectl get pod -A
# 配置k8s的补全命令
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)