kubernetes1.28部署(Centos7)

前提要点

1. 参考地址:k8s笔记-CSDN博客

                备注:特别感谢该博主的资料,非常详细

2.采用kubeadm部署

3.容器采用Containerd

集群规划 

192.168.184.137  k8s-node1
192.168.184.138  k8s-node2
192.168.184.139  k8s-node3

内核升级 

yum update -y --exclude=kernel*                      #系统升级但不升级内核

sudo mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup    #备份原有文件
sudo curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo   
   #增加阿里数据源
sudo yum clean all                                 #清楚缓存
sudo yum makecache                                 #添加数据源到缓存
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm        #下载内核
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-headers-4.19.12-1.el7.elrepo.x86_64.rpm        #下载内核
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm     #下载内核
yum localinstall -y kernel-ml*                   #安装内核

#修改内核启动顺序
grub2-set-default 0  && grub2-mkconfig -o /etc/grub.cfg
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
grubby  --default-kernel                              #查看默认内核版本是不是4.19
reboot                                                #服务重启
uname -a                                              #查看内核版本


#解决出现:   您在 /var/spool/mail/root 中有新邮件 
cat /dev/null > /var/spool/mail/root                   #删除邮件
echo "unset MAILCHECK" >> /etc/profile                 #禁止系统启动邮件检查


环境准备

1.修改主机名
hostnamectl set-hostname  xxx                                  #修改主机名
2.同步hosts文件

如果 DNS 不支持主机名称解析,还需要在每台机器的 /etc/hosts 文件中添加主机名和 IP 的对应关系

cat >> /etc/hosts <<EOF
192.168.184.137  k8s-node1
192.168.184.138  k8s-node2
192.168.184.139  k8s-node3
192.168.184.100 k8s-node4
EOF
3.关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
 4.关闭 SELINUX
 setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
5.关闭 swap 分区
 swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
6.同步时间
yum install ntpdate -y
ntpdate time.windows.com

k8s组件安装 

1.安装 containerd
#安装 yum-config-manager 相关依赖
yum install -y yum-utils device-mapper-persistent-data lvm2   
# 添加 containerd yum 源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 安装 containerd
yum install  -y containerd.io cri-tools  
#配置containerd
cat >  /etc/containerd/config.toml <<EOF
disabled_plugins = ["restart"]
[plugins.linux]
shim_debug = true
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
  [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
    endpoint = ["https://bqr1dr1n.mirror.aliyuncs.com"]
  [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
    endpoint = ["https://registry.aliyuncs.com/k8sxio"]
[plugins.cri]
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.2"
EOF

#启动 containerd 服务 并 开机配置自启动
systemctl enable containerd && systemctl start containerd && systemctl status containerd 

# 配置 containerd 配置
cat > /etc/modules-load.d/containerd.conf <<EOF
overlay
br_netfilter
EOF
2.配置网络
# 配置 k8s 网络配置
cat  > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

# 加载 overlay br_netfilter 模块
modprobe overlay
modprobe br_netfilter

# 查看当前配置是否生效
sysctl -p /etc/sysctl.d/k8s.conf
3.安装k8s
#设置k8s数据源
cat <<EOF > kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#将数据源添加到配置文件
mv kubernetes.repo /etc/yum.repos.d/
#查看数据源
yum repolist

# 安装(k8s)      最新版本 
yum install -y kubelet kubeadm kubectl

# 安装(k8s)      指定版本
yum install -y kubelet-1.26.0 kubectl-1.26.0 kubeadm-1.26.0

# 启动 kubelet 并 查看 kubelet状态   备注:这个时候的kubelet 是失败状态
sudo systemctl enable kubelet && sudo systemctl start kubelet && sudo systemctl status kubelet

#############注意: 初始化 k8s 集群在 master 节点执行!
####特别提醒:10.15.0.5改成自己的master的IP地址
####备注:这个执行与网络有关,执行的时间有点长,耐心等待
# 初始化master节点  
$ kubeadm init \
--apiserver-advertise-address=10.15.0.5 \
--pod-network-cidr=10.244.0.0/16 \
--image-repository registry.aliyuncs.com/google_containers \
--cri-socket=unix:///var/run/containerd/containerd.sock

备注:出现以下内容表示初始化成功

#创建权限目录和文件    (只在master节点执行)
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


#在master节点执行         (在master) 创建永不过期的令牌
kubeadm token create --print-join-command --ttl=0

#将生成的内容              在(所有从节点)执行加入master节点
kubeadm join 192.168.184.137:6443 --token 27i6o0.y9zvxsqokyxyzx3n --discovery-token-ca-cert-hash sha256:d82a01c39b586fc400a33c9bf1993ecde32563c78a0cb5c727a5c92fc268ceff 

备注:出现以下内容表示加入成功

#查看集群节点状态    (在master节点执行)查看从节点是否加入
kubectl get nodes

 4.配置集群网络

备注:采用离线安装 flannel插件,共有两个tar包(flannel-flannel-cni-plugin-v1.4.1-flannel1-amd64.tar、flannel-flannel-v0.25.1-amd64.tar)  

从文章开头下载jar解压得到以上内容

############将下载的tar包上传到服务器(所有节点执行)  
#上传文件
rz -bye  xxxx.tar         

#将jar包增加到containerd仓库
ctr -n k8s.io images import flannel-flannel-v0.25.1-amd64.tar
ctr -n k8s.io images import flannel-flannel-cni-plugin-v1.4.1-flannel1-amd64.tar

#出现以下内容添加成功
#unpacking docker.io/flannel/flannel:v0.25.1 (sha256:4df360aec845c4d5bf72f86e6dcdd776a20985d4f63d63d97396c0ca1f3fba82)...done


#创建配置(master执行)
vim kube-flannel.yml

 复制以下内容到  kube-flannel.yml    文件中,内容不用修改

---
kind: Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
  - kind: ServiceAccount
    name: flannel
    namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
        - operator: Exists
          effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
        - name: install-cni-plugin
          #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
          image: docker.io/flannel/flannel-flannel-cni-plugin-v1.4.1-flannel1
          imagePullPolicy: Never
          command:
            - cp
          args:
            - -f
            - /flannel
            - /opt/cni/bin/flannel
          volumeMounts:
            - name: cni-plugin
              mountPath: /opt/cni/bin
        - name: install-cni
          #image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
          image: docker.io/flannel/flannel-flannel-v0.25.1
          imagePullPolicy: Never
          command:
            - cp
          args:
            - -f
            - /etc/kube-flannel/cni-conf.json
            - /etc/cni/net.d/10-flannel.conflist
          volumeMounts:
            - name: cni
              mountPath: /etc/cni/net.d
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
      containers:
        - name: kube-flannel
          #image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
          image: docker.io/flannel/flannel-flannel-v0.25.1
          imagePullPolicy: Never
          command:
            - /opt/bin/flanneld
          args:
            - --ip-masq
            - --kube-subnet-mgr
          resources:
            requests:
              cpu: "100m"
              memory: "50Mi"
            limits:
              cpu: "100m"
              memory: "50Mi"
          securityContext:
            privileged: false
            capabilities:
              add: ["NET_ADMIN", "NET_RAW"]
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            - name: EVENT_QUEUE_DEPTH
              value: "5000"
          volumeMounts:
            - name: run
              mountPath: /run/flannel
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
            - name: xtables-lock
              mountPath: /run/xtables.lock
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni-plugin
          hostPath:
            path: /opt/cni/bin
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
        - name: xtables-lock
          hostPath:
            path: /run/xtables.lock
            type: FileOrCreate 
#创建并生成flannel容器
kubectl apply -f kube-flannel.yml
#查看集群状态   (在master执行)
kubectl get nodes
NAME        STATUS   ROLES           AGE   VERSION
k8s-node1   Ready    control-plane   21h   v1.26.0
k8s-node2   Ready    <none>          21h   v1.26.0
k8s-node3   Ready    <none>          21h   v1.26.0
# 查看集群系统 pod 运行情况,下面所有 pod 状态为 Running 代表集群可用
kubectl get pod -A
NAMESPACE      NAME                                READY   STATUS    RESTARTS   AGE
kube-flannel   kube-flannel-ds-gtq49               1/1     Running   0          21h
kube-flannel   kube-flannel-ds-qpdl6               1/1     Running   0          21h
kube-flannel   kube-flannel-ds-ttxjb               1/1     Running   0          21h
kube-system    coredns-5bbd96d687-p7q2x            1/1     Running   0          21h
kube-system    coredns-5bbd96d687-rzcnz            1/1     Running   0          21h
kube-system    etcd-k8s-node1                      1/1     Running   0          21h
kube-system    kube-apiserver-k8s-node1            1/1     Running   0          21h
kube-system    kube-controller-manager-k8s-node1   1/1     Running   0          21h
kube-system    kube-proxy-mtsbp                    1/1     Running   0          21h
kube-system    kube-proxy-v2jfs                    1/1     Running   0          21h
kube-system    kube-proxy-x6vhn                    1/1     Running   0          21h
kube-system    kube-scheduler-k8s-node1            1/1     Running   0          21h

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值