k8s 1.23 + centos7 部署

kubernetes部署架构图

在这里插入图片描述

节点基础软件安装

环境准备

检查系统时区、时间

各个节点的系统时间要一致,否则导致证书等问题

参考 https://blog.csdn.net/xiao_yi_xiao/article/details/124308109

修改时区

# 修改时区
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
# 保存
hwclock -w

修改时间

# 修改系统时间
date -s "2019-11-01 17:28:00"
# 修改硬件时间
hwclock --set --date "2019-11-01 17:28:00"
# 同步系统时间和硬件时间
hwclock --hctosys
# 保存
hwclock -w 或者 clock -w
# 重启系统
init 6
检查网络

使用命令ip route show,必须有default路由,没有手动添加

#route命令找不到,如果存在,忽略
yum -y install net-tools
#添加默认路由 gw后面接网关 dev后面是网卡名 公司内网没有网关,设本机ip即可
route add default gw 192.168.1.2 dev enp1s0
# 或修改 
vim /etc/sysconfig/network-scripts/ifcfg-*
#添加如下两行,必须设置网关,可以直接用本机IP
#DEFROUTE=yes
#GATEWAY=x.x.x.x
systemctl restart network

机器名可能无法识别,需要在/etc/hosts添加 ip host-name,比如 127.0.0.1 k8s-master

安装docker或containerd

docker版本必须为20.10.7,containerd为1.4.6
这里需要安装与Kubernetes兼容的docker版本,参考链接:

https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md
https://github.com/kubernetes/kubernetes/blob/v1.23.6/build/dependencies.yaml
containerd也需要和Docker兼容,参考链接:

https://docs.docker.com/engine/release-notes/
https://github.com/moby/moby/blob/v20.10.7/vendor.conf

安装docker
# 在 master 节点和 worker 节点都要执行

# 卸载旧版本
sudo yum remove -y docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine \
docker-ce \
docker-ce-cli \
containerd.io

# 设置 yum repository
sudo yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 安装并启动 docker,docker依赖containerd.io,不需要单独安装
sudo yum install -y docker-ce-20.10.7 docker-ce-cli-20.10.7 containerd.io-1.4.6

# 设置 docker 镜像,提高 docker 镜像下载速度和稳定性 推荐地址 http://f1361db2.m.daocloud.io/
# 如果您访问 https://hub.docker.io 速度非常稳定,亦可以跳过这个步骤
# 内网环境不需要,直接配置内网仓库即可
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io

# 修改docker Cgroup Driver为systemd
#添加/etc/docker/daemon.json配置,添加
mkdir /etc/docker
cat <<EOF > /etc/docker/daemon.json
{
  "registry-mirrors": ["http://f1361db2.m.daocloud.io"],
  "insecure-registries": ["192.168.1.2:5000"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl daemon-reload
sudo systemctl enable docker
sudo systemctl start docker

# 检查 docker 版本
docker info
安装nfs-utils
# 在 master 节点和 worker 节点都要执行
sudo yum install -y nfs-utils

安装kubectl/kubeadm/kubelet

部署kubectl / kubeadm / kubelet(所有节点均需安装)

# 关闭 防火墙
systemctl stop firewalld
systemctl disable firewalld

# 关闭 SeLinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

# 关闭 swap
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab

# bridged网桥设置 (containerd要执行的,使用docker不需要)
# 新建modules-load.d/k8s.conf文件
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
# 新建sysctl.d/k8s.conf文件
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#加载配置文件
sysctl --system
#加载br_netfilter网桥过滤模块,和加载网络虚拟化技术模块
modprobe br_netfilter
modprobe overlay
#检验网桥过滤模块是否加载成功
lsmod | grep -e br_netfilter -e overlay

# 配置K8S的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装kubelet、kubeadm、kubectl
yum install -y kubelet-1.23.9 kubeadm-1.23.9 kubectl-1.23.9

# 重启 docker,并启动 kubelet
systemctl restart docker
systemctl enable kubelet && systemctl start kubelet

master节点部署

下载镜像

查看集群所需镜像的版本

kubeadm config images list

编辑镜像下载文件 images.sh,然后执行。其中node节点只需要kube-proxy和pause

tee ./images.sh <<'EOF'
#!/bin/bash
images=(
kube-apiserver:v1.23.9
kube-controller-manager:v1.23.9
kube-scheduler:v1.23.9
kube-proxy:v1.23.9
pause:3.6
etcd:3.5.1-0
coredns:v1.8.6
)
for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done
EOF
# 拉取镜像
chmod +x ./images.sh && ./images.sh

初始化master节点

查看默认配置

 kubeadm config print init-defaults
 # 1.23.17
registry.k8s.io/kube-apiserver:v1.23.17
registry.k8s.io/kube-controller-manager:v1.23.17
registry.k8s.io/kube-scheduler:v1.23.17
registry.k8s.io/kube-proxy:v1.23.17
registry.k8s.io/pause:3.6
registry.k8s.io/etcd:3.5.6-0
registry.k8s.io/coredns/coredns:v1.8.6   registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6
#导出镜像到本地命令
docker save registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.17 > kube-apiserver_v1.23.17.image
docker save registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.17 > kube-controller-manager_v1.23.17.image
docker save registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.17 > kube-scheduler_v1.23.17.image
docker save registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.17 > kube-proxy_v1.23.17.image
docker save registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6 > pause_3.6.image
docker save registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.6-0 > etcd_3.5.6-0.image
docker save registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6 > coredns_v1.8.6.image
#calico
docker save calico/cni:v3.25.0 > calico_cni_v3.25.0.image
docker save calico/node:v3.25.0 > calico_node_v3.25.0.image
docker save calico/kube-controllers:v3.25.0 > calico_kube-controllers_v3.25.0.image

执行初始化

 # master节点上执行 --apiserver-advertise-address=192.168.1.2 \
 kubeadm init \
 --apiserver-advertise-address=192.168.1.2 \
 --control-plane-endpoint=k8s-master-001 \
 --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers \
 --kubernetes-version=v1.23.9 \
 --service-cidr=10.96.0.0/16 \
 --pod-network-cidr=10.100.0.0/16 \
 --upload-certs \
 --v=10

配置 kubectl

rm -rf /root/.kube/
mkdir /root/.kube/
cp -i /etc/kubernetes/admin.conf /root/.kube/config

检查状态

#检查 Master 状态
kubectl cluster-info
#查看节点状态
kubectl get nodes -o wide

**注意:**如果是INTERNAL-IP都是相同的或为空(VirtualBox下安装的)都是10.0.2.15 的IP则需要手动修改配置文件/etc/sysconfig/kubelet

https://kubernetes.io/zh-cn/docs/reference/command-line-tools-reference/kubelet/

KUBELET_EXTRA_ARGS=--node-ip=192.168.1.2 # 改成节点真实ip

出现错误,重置环境

swapoff -a && kubeadm reset -f && systemctl daemon-reload && systemctl restart kubelet && iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X && systemctl restart docker

网络插件安装

calico

非operator方式,参考 https://blog.csdn.net/yy8623977/article/details/124707433

# 安装 calico 网络插件 
# 参考文档 https://projectcalico.docs.tigera.io/getting-started/kubernetes/self-managed-onprem/onpremises
echo "安装calico-3.25"
wget https://docs.projectcalico.org/archive/v3.25/manifests/calico.yaml
## 放开下面两行,并修改为kubeadm init中设置的pod网格 10.100.0.0/16
 # - name: CALICO_IPV4POOL_CIDR
 #   value: "192.168.0.0/16"
#查看获取镜像
cat calico.yaml | grep image
#手动获取镜像
tee ./calicoImages.sh <<'EOF'
#!/bin/bash
 images=(
 docker.io/calico/cni:v3.25.0
 docker.io/calico/node:v3.25.0
 docker.io/calico/kube-controllers:v3.25.0
)
for imageName in ${images[@]} ; do
docker pull $imageName
done
EOF

#获取镜像
chmod +x ./calicoImages.sh && ./calicoImages.sh
#部署calico
kubectl apply -f calico.yaml

kubernetes dashboard安装

https://blog.csdn.net/yy8623977/article/details/124707433

1.创建Dashboard的yaml文件
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.1/aio/deploy/recommended.yaml

2.使用如下命令或直接手动编辑kubernetes-dashboard.yaml文件

sed -i '/targetPort:/a\ \ \ \ \ \ nodePort: 30001\n\ \ type: NodePort' kubernetes-dashboard.yaml
#注释 imagePullPolicy: Always  避免每次都需要重复拉取镜像

3.创建pod,service

需kubernetesui/dashboard:v2.5.1、kubernetesui/metrics-scraper:v1.0.7两个镜像(已提供离线镜像,手动导入)

kubectl create -f kubernetes-dashboard.yaml

4.创建完后检测

kubectl get deployment kubernetes-dashboard -n kubernetes-dashboard
kubectl get pods -n kubernetes-dashboard -o wide
kubectl get services -n kubernetes-dashboard
netstat -ntlp|grep 30001

5.创建账户
创建dashboard-account.yaml

tee ./dashboard-account.yaml <<'EOF'
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
EOF

部署dashboard-account.yaml

kubectl apply -f dashboard-account.yaml

6.获取token

kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"

解决浏览器证书问题

https://blog.csdn.net/sshwolf/article/details/119578839

openssl genrsa -out dashboard.key 2048
openssl req -days 3650 -new -out dashboard.csr -key dashboard.key -subj '/CN=192.168.1.2'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
kubectl delete secret kubernetes-dashboard-certs -n kubernetes-dashboard
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard

metrics-server部署

参考https://blog.csdn.net/yy8623977/article/details/124872606

下载https://github.com/kubernetes-sigs/metrics-server/releases,当前版本v0.6.1,components.yaml为普通版本,high-availability.yaml为高可用版本

修改配置

args:
  - --cert-dir=/tmp
  - --secure-port=4443
  # - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
  # 注释上面那行,添加下面两行
  - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP    # node address类型的优先权设置
  - --kubelet-insecure-tls    # 取消安全验证
  - --kubelet-use-node-status-port
  - --metric-resolution=15s
  # image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1
  # 注释上面那行,添加下面这行
  image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.6.1
  imagePullPolicy: IfNotPresent
  livenessProbe:
  failureThreshold: 3

应用部署文件

kubectl apply -f components.yaml

work节点加入集群

(保证与master时间一致)

拉镜像,或手动导入镜像

# worker节点需要的镜像 
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.9
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6

docker pull docker.io/calico/node:v3.19.4   
docker pull docker.io/calico/cni:v3.19.4
docker pull docker.io/calico/pod2daemon-flexvol:v3.19.4

加入集群

# 在 master 节点执行 获取kubeadm join命令
kubeadm token create --print-join-command
# worker节点上执行
kubeadm join k8s-master-001:6443 --token j3h4iv.ojdq3fbfgzcklw3m \
--discovery-token-ca-cert-hash sha256:7ad81dad772080e20f4793e6cae67cc6c00c007867d94f6be2cbad3d703a0cab \
--control-plane --certificate-key b2f4a2db18e580830daf9a3d42fd0683f91e6f4ede653d105139b9f392cf5dac
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

maxwell code

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值