1.给两台节点修改主机名
[root@localhost ~]# hostnamectl set-hostname master
[root@localhost ~]# hostnamectl set-hostname worker1
2.配置主机名映射
[root@master ~]# cat >> /etc/hosts <<EOF
192.168.200.11 master
192.168.200.12 worker1
EOF
[root@master ~]# scp /etc/hosts worker1:/etc/hosts //给worker1节点拷贝
3.所有节点检查安全规则是否关闭
[root@master ~]# getenforce
Permissive
[root@master ~]# systemctl status firewalld
4.所有节点关闭swap分区
[root@master ~]# swapoff -a
[root@master ~]# sed -i "s/\/dev\/mapper\/centos-swap/\#\/dev\/mapper\/centos-swap/g" /etc/fstab
5.所有节点配置IPVS和路由转发
[root@master ~]# cat >/etc/sysctl.d/kubernetes.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master ~]# modprobe br_netfilter
[root@master ~]# sysctl -p /etc/sysctl.d/kubernetes.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@master ~]# cat >/etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
[root@master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules
[root@master ~]# bash /etc/sysconfig/modules/ipvs.modules && lsmod |grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4 15053 0
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139264 2 ip_vs,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
6.所有节点安装docker
[root@master ~]# wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@master ~]# yum -y install docker-ce
7.所有节点配置docker
[root@master ~]# mkdir -p /etc/docker
[root@master ~]# cat >/etc/docker/daemon.json <<EOF
{
"exec-opts":["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://uy35zvn6.mirror.aliyuncs.com"]
}
EOF
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart docker
[root@master ~]# systemctl enable docker
[root@master ~]# docker info
Client:
Context: default
Debug Mode: false
Plugins:
app: Docker App (Docker Inc., v0.9.1-beta3)
buildx: Docker Buildx (Docker Inc., v0.9.1-docker)
scan: Docker Scan (Docker Inc., v0.21.0)
Server:
Containers: 0
Running: 0
Paused: 0
Stopped: 0
Images: 0
Server Version: 20.10.21
Storage Driver: overlay2
Backing Filesystem: xfs
Supports d_type: true
Native Overlay Diff: true
userxattr: false
Logging Driver: json-file
Cgroup Driver: systemd
Cgroup Version: 1
Plugins:
Volume: local
Network: bridge host ipvlan macvlan null overlay
Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
Swarm: inactive
Runtimes: io.containerd.runtime.v1.linux runc io.containerd.runc.v2
Default Runtime: runc
Init Binary: docker-init
containerd version: 1c90a442489720eec95342e1789ee8a5e1b9536f
runc version: v1.1.4-0-g5fd4c4d
init version: de40ad0
Security Options:
seccomp
Profile: default
Kernel Version: 3.10.0-1160.el7.x86_64
Operating System: CentOS Linux 7 (Core)
OSType: linux
Architecture: x86_64
CPUs: 1
Total Memory: 3.682GiB
Name: master
ID: RI64:7KFO:DWF7:FRJT:FPCQ:O67D:4FNO:CVNS:X2LF:H57Y:6FLP:DHGT
Docker Root Dir: /var/lib/docker
Debug Mode: false
Registry: https://index.docker.io/v1/
Labels:
Experimental: false
Insecure Registries:
127.0.0.0/8
Registry Mirrors:
https://uy35zvn6.mirror.aliyuncs.com/
Live Restore Enabled: false
8.所有节点安装K8S的组件
[root@master ~]# cat >/etc/yum.repos.d/kubernetes.repo <<EOF
[Kuebrnetes]
name=Kuebrnetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
[root@master ~]# yum -y install kubeadm-1.22.2 kubelet-1.22.2 kubectl-1.22.2
9.Master初始化K8S集群
[root@master ~]# kubeadm init --apiserver-advertise-address=192.168.200.11 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.22.2 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=NumCPU --ignore-preflight-errors=Mem
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.200.11:6443 --token p96vk0.nyuf42vwdnkrk5en \
--discovery-token-ca-cert-hash sha256:9f59f022a74578f9803f875add7c2b4f7995888528ef376c49f9593e2bc9369e
10.master节点配置k8s环境变量
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
11.查看k8s的集群运行状态
[root@master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
controller-manager Healthy ok
etcd-0 Healthy {"health":"true","reason":""}
//上述内容可以发现,scheduler健康状态异常,连接不上端口,我们可以修改 /etc/kubernetes/manifests/kube-scheduler.yaml 文件,将--port一行注释掉,并重启kubelet
[root@master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true","reason":""}
12.部署服务
[root@master ~]# kubectl apply -f https://gitee.com/mirrors/flannel/raw/master/Documentation/kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@master ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-7f6cbbb7b8-8d2tk 1/1 Running 0 13m
coredns-7f6cbbb7b8-sf2vw 1/1 Running 0 13m
etcd-master 1/1 Running 0 13m
kube-apiserver-master 1/1 Running 0 13m
kube-controller-manager-master 1/1 Running 0 13m
kube-proxy-g8lvw 1/1 Running 0 13m
kube-scheduler-master 1/1 Running 0 4m40s
13.部署dashboard
[root@master~]#wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
[root@master ~]# vi recommended.yaml //由于dashboard只能集群内部访问,因此需要修改Service类型为NodePort类型,暴露到外部可以访问,如下图:
[root@master ~]# kubectl create -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
[root@master ~]# kubectl get ns
NAME STATUS AGE
default Active 22m
kube-flannel Active 10m
kube-node-lease Active 22m
kube-public Active 22m
kube-system Active 22m
kubernetes-dashboard Active 28s
[root@master ~]# kubectl get pod -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-c45b7869d-stxqw 1/1 Running 0 72s
kubernetes-dashboard-576cb95f94-5l2s8 1/1 Running 0 72s
14.创建管理员访问账号
[root@master ~]# vi kubectl-admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
[root@master ~]# kubectl create -f kubectl-admin.yaml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
[root@master ~]# kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}" //获取访问的token
eyJhbGciOiJSUzI1NiIsImtpZCI6InpLaXVDR1BuRFZlTFNYcW9lZ0ktV2xCYl9EOTdWakRieFE0V082YjFhemsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLW1kcnNzIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJlOGM2ODQxMC05YzFmLTQzZmItOGRmMC00Mzk5NWFmM2YyYzIiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.GLLCeKTltV_E0q4XAZl8AtfigKcCd2f0_2XVjam5vae-I5lX1vlQZ-dVicJNbVZcUeijv8PEevA8OiqjRC0f7RHsVQB3CFMKwaX7cgDpuCuEUNTT6DQlqSg20hhC8BUClab9UtEG6QIjOMIWKhEogbO3an0MbuS3QzWWq7LYlYKloi0sxuWABCTrgViiCFE_8JfU-g1j6DOhNA7WEtcWs0ojiM--t_SlZhzs4i6m29PvRIdsK3Xfgk8mYGDuLiocjNznzf69-_Whk3Bu96YppZc-spUffb6d56IBxSbjOdOAdi2fNZwrGqd5Z1hjzo9ZPhpO5F7jMqSbjUriP3tPSA
15.访问登录
在浏览器输入https://192.168.200.11:30001,进入访问页面,会让你输入token,把上一步获取与到的token输入进去。
16.Node节点加入到集群中
[root@worker1 ~]# kubeadm join 192.168.200.11:6443 --token p96vk0.nyuf42vwdnkrk5en \
> --discovery-token-ca-cert-hash sha256:9f59f022a74578f9803f875add7c2b4f7995888528ef376c49f9593e2bc9369e
[preflight] Running pre-flight checks
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 52m v1.22.2
worker1 Ready <none> 111s v1.22.2
//将node节点加入到集群中之后需要等待一段时间,状态方可变为ready。