Centos Linux 7
kubernetes 1.23.5
kuboard 3
kubernetes集群部署
192.168.131.55 master 192.168.131.56 node1 192.168.131.57 node2 192.168.131.58 node3
关闭防火墙 SELINUX swap
[root@localhost ~]# systemctl stop firewalld [root@localhost ~]# systemctl disable firewalld [root@localhost ~]# setenforce 0 [root@localhost ~]# sed -i '/^SELINUX=/c SELINUX=disabled/' /etc/selinux/config [root@localhost ~]# swapoff -a 临时关闭 [root@localhost ~]# sed -i 's/.*swap.*/#&/' /etc/fstab 永久关闭 注意: 关闭所有服务器的交换分区 所有节点操作
域名解析
[root@master ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.131.55 master 192.168.131.56 node1 192.168.131.57 node2 192.168.131.58 node3 注意: 所有节点操作
时间同步
[root@master ~]# yum install chrony -y [root@master ~]# systemctl start chronyd [root@master ~]# systemctl enable chronyd [root@master ~]# sed -i 's/.*swap.*/#&/' /etc/fstab [root@master ~]# swapoff -a 注意: 所有节点操作
加载内核模块
[root@master ~]# modprobe br_netfilter 注意: 所有节点操作
修改内核参数
[root@master ~]# cat >> /etc/sysctl.conf <<eof net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 vm.swappiness=0 net.ipv4.ip_forward=1 eof [root@master ~]# sysctl -p net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 vm.swappiness = 0 net.ipv4.ip_forward = 1 注意: 所有节点操作
Docker
[root@master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo --2024-07-26 02:24:25-- https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo Resolving mirrors.aliyun.com (mirrors.aliyun.com)... 123.125.216.228, 123.125.216.227, 123.125.216.219, ... Connecting to mirrors.aliyun.com (mirrors.aliyun.com)|123.125.216.228|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 2081 (2.0K) [application/octet-stream] Saving to: ‘docker-ce.repo’ docker-ce.repo 100%[================================================>] 2.03K --.-KB/s in 0.03s 2024-07-26 02:24:26 (64.1 KB/s) - ‘docker-ce.repo’ saved [2081/2081] [root@master ~]# ls anaconda-ks.cfg docker-ce.repo [root@master ~]# mv docker-ce.repo /etc/yum.repos.d/ [root@master ~]# yum install docker-ce -y [root@master ~]# systemctl start docker [root@master ~]# systemctl enable docker 注意: 所有节点操作
安装 kubeadm kubelet kubectl ipvsadm
[root@master ~]# cat >> /etc/yum.repos.d/kubernetes.repo <<eof [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg eof [root@master ~]# yum -y install kubeadm-1.23.5 kubelet-1.23.5 kubectl-1.23.5 ipvsadm 注意: 所有节点操作 这里安装的是最新版本(也可以指定版本号:kubeadm-1.23.5)
配置 kubelet 的 cgroups
[root@master ~]# cat >/etc/sysconfig/kubelet<<EOF KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1" EOF k8s.gcr.io/pause:3.6
上传镜像
略
镜像导入
[root@master ~]# cat image_load.sh #!/bin/bash image_path=`pwd` for i in `ls "${image_path}" | grep tar` do docker load < $i done [root@master ~]# bash image_load.sh [root@master ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE flannel/flannel v0.22.3 e23f7ca36333 10 months ago 70.2MB flannel/flannel-cni-plugin v1.2.0 a55d1bad692b 12 months ago 8.04MB k8s.gcr.io/kube-apiserver v1.23.5 3fc1d62d6587 2 years ago 135MB k8s.gcr.io/kube-proxy v1.23.5 3c53fa8541f9 2 years ago 112MB k8s.gcr.io/kube-controller-manager v1.23.5 b0c9e5e4dbb1 2 years ago 125MB k8s.gcr.io/kube-scheduler v1.23.5 884d49d6d8c9 2 years ago 53.5MB k8s.gcr.io/etcd 3.5.1-0 25f8c7f3da61 2 years ago 293MB k8s.gcr.io/coredns/coredns v1.8.6 a4ca41631cc7 2 years ago 46.8MB k8s.gcr.io/pause 3.6 6270bb605e12 2 years ago 683kB quay.io/coreos/flannel v0.14.0-amd64 8522d622299c 3 years ago 67 注意: 所有节点操作
master 初始化 ( 注意终端返回内容 )
[root@master ~]# kubeadm init --kubernetes-version=1.23.5 --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=192.168.131.55 --kubernetes-version= [kubernetes版本号] --pod-network-cidr= [容器网段,自定义] --apiserver-advertise-address= [master ip地址] [root@master ~]# kubeadm init --kubernetes-version=1.23.5 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.131.185 Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config Alternatively, if you are the root user, you can run: export KUBECONFIG=/etc/kubernetes/admin.conf You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.131.185:6443 --token k9u78s.b4ihiahjgf4iu8c9 \ --discovery-token-ca-cert-hash sha256:026e3d83766eff367fdc0f7a0992b03fdad5a78e72531a4be26a7e13b27221db [root@master ~]# mkdir -p $HOME/.kube [root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config [root@master ~]# export KUBECONFIG=/etc/kubernetes/admin.conf [root@master ~]# kubectl apply -f [podnetwork].yaml yml文件内容,附在下面
node 节点,添加到 master
[root@node1 kubernetes-1.23.5]# kubeadm join 192.168.131.185:6443 --token k9u78s.b4ihiahjgf4iu8c9 \ > --discovery-token-ca-cert-hash sha256:026e3d83766eff367fdc0f7a0992b03fdad5a78e72531a4be26a7e13b27221db [root@node2 kubernetes-1.23.5]# kubeadm join 192.168.131.185:6443 --token k9u78s.b4ihiahjgf4iu8c9 \ > --discovery-token-ca-cert-hash sha256:026e3d83766eff367fdc0f7a0992b03fdad5a78e72531a4be26a7e13b27221db [root@node3 kubernetes-1.23.5]# kubeadm join 192.168.131.185:6443 --token k9u78s.b4ihiahjgf4iu8c9 \ > --discovery-token-ca-cert-hash sha256:026e3d83766eff367fdc0f7a0992b03fdad5a78e72531a4be26a7e13b27221db
最后,在 master 节点查看
[root@master ~]# kubectl get node NAME STATUS ROLES AGE VERSION master Ready control-plane,master 6m45s v1.23.5 node1 Ready <none> 88s v1.23.5 node2 Ready <none> 86s v1.23.5 node3 Ready <none> 83s v1.23.5
kuboard
[root@master ~]# kubectl label nodes master k8s.kuboard.cn/role=etcd
node/master labeled
[root@master ~]# kubectl label nodes node1 k8s.kuboard.cn/role=etcd
node/node1 labeled
[root@master ~]# kubectl label nodes node2 k8s.kuboard.cn/role=etcd
node/node2 labeled
[root@master ~]# kubectl label nodes node3 k8s.kuboard.cn/role=etcd
node/node3 labeled
[root@master ~]# kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3-swr.yaml
namespace/kuboard created
configmap/kuboard-v3-config created
serviceaccount/kuboard-boostrap created
clusterrolebinding.rbac.authorization.k8s.io/kuboard-boostrap-crb created
daemonset.apps/kuboard-etcd created
deployment.apps/kuboard-v3 created
service/kuboard-v3 created
[root@master ~]# kubectl get pods -n kuboard
[podnetwork].yaml
[podnetwork].yaml 具体以实际命名为主
apiVersion: v1 kind: Namespace metadata: labels: k8s-app: flannel pod-security.kubernetes.io/enforce: privileged name: kube-flannel --- apiVersion: v1 kind: ServiceAccount metadata: labels: k8s-app: flannel name: flannel namespace: kube-flannel --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: k8s-app: flannel name: flannel rules: - apiGroups: - "" resources: - pods verbs: - get - apiGroups: - "" resources: - nodes verbs: - get - list - watch - apiGroups: - "" resources: - nodes/status verbs: - patch - apiGroups: - networking.k8s.io resources: - clustercidrs verbs: - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: k8s-app: flannel name: flannel roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: flannel subjects: - kind: ServiceAccount name: flannel namespace: kube-flannel --- apiVersion: v1 data: cni-conf.json: | { "name": "cbr0", "cniVersion": "0.3.1", "plugins": [ { "type": "flannel", "delegate": { "hairpinMode": true, "isDefaultGateway": true } }, { "type": "portmap", "capabilities": { "portMappings": true } } ] } net-conf.json: | { "Network": "10.244.0.0/16", "Backend": { "Type": "vxlan" } } kind: ConfigMap metadata: labels: app: flannel k8s-app: flannel tier: node name: kube-flannel-cfg namespace: kube-flannel --- apiVersion: apps/v1 kind: DaemonSet metadata: labels: app: flannel k8s-app: flannel tier: node name: kube-flannel-ds namespace: kube-flannel spec: selector: matchLabels: app: flannel k8s-app: flannel template: metadata: labels: app: flannel k8s-app: flannel tier: node spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux containers: - args: - --ip-masq - --kube-subnet-mgr command: - /opt/bin/flanneld env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: EVENT_QUEUE_DEPTH value: "5000" image: docker.io/flannel/flannel:v0.22.3 name: kube-flannel resources: requests: cpu: 100m memory: 50Mi securityContext: capabilities: add: - NET_ADMIN - NET_RAW privileged: false volumeMounts: - mountPath: /run/flannel name: run - mountPath: /etc/kube-flannel/ name: flannel-cfg - mountPath: /run/xtables.lock name: xtables-lock hostNetwork: true initContainers: - args: - -f - /flannel - /opt/cni/bin/flannel command: - cp image: docker.io/flannel/flannel-cni-plugin:v1.2.0 name: install-cni-plugin volumeMounts: - mountPath: /opt/cni/bin name: cni-plugin - args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist command: - cp image: docker.io/flannel/flannel:v0.22.3 name: install-cni volumeMounts: - mountPath: /etc/cni/net.d name: cni - mountPath: /etc/kube-flannel/ name: flannel-cfg priorityClassName: system-node-critical serviceAccountName: flannel tolerations: - effect: NoSchedule operator: Exists volumes: - hostPath: path: /run/flannel name: run - hostPath: path: /opt/cni/bin name: cni-plugin - hostPath: path: /etc/cni/net.d name: cni - configMap: name: kube-flannel-cfg name: flannel-cfg - hostPath: path: /run/xtables.lock type: FileOrCreate name: xtables-lock