准备环境
节点:
10.30.59.205 | master |
---|---|
10.30.59.222 | node |
Linux系统CentOS系统镜像:---18.04
硬盘:40GB
压缩包 K8S.tar.gz Docker.tar.gz
安装Kubernetes版本: kubelet-1.14.1 kubeadm-1.14.1
可以访问外网: 需要拉取镜像,如果服务器不能上网,需要提前下载镜像并导入节点
禁止swap分区
浏览器: 必须用火狐浏览器
修改主机名
[root@localhost ~]# hostnamectl set-hostname master
[root@localhost ~]# bash
[root@master ~]#
[root@localhost ~]# hostnamectl set-hostname node
[root@localhost ~]# bash
[root@node ~]#
用 ftp 的yum源(用的别人的纯净yum源)----两节点
[root@master ~]# vi /etc/yum.repos.d/local.repo
[docker]
name=docker
baseurl=ftp://10.30.59.193/Docker
gpgcheck=0
enabled=1
[k8s]
name=k8s
baseurl=ftp://10.30.59.193/Kubernetes
gpgcheck=0
enabled=1
[root@master ~]# yum clean all
[root@master ~]# yum repolist
或者:
自己弄yum源
上传K8S.tar.gz和Docker.tar.gz压缩包
[root@master ~]# ls
anaconda-ks.cfg K8S.tar.gz Docker.tar.gz
[root@master ~]# tar -zxvf K8S.tar.gz -C /opt/
[root@master ~]# tar -zxvf Docker.tar.gz -C /opt/
[root@master ~]# ls /opt/
Docker images image.sh jdk
[root@master ~]# ls /opt/Docker 其中repodata为我们想要的yum源
base repodata
[root@master ~]# vim /etc/yum.repos.d/local.repo
[docker]
name=docker
baseurl=file:///opt/Docker/
gpgcheck=0
enabled=1
[k8s]
name=k8s
baseurl=file:///opt/Kubernetes/
gpgcheck=0
enabled=1
[root@master ~]# yum clean all
[root@master ~]# yum repolist
安装vsftpd
[root@master ~]# yum install vsftpd -y
[root@master ~]# vim /etc/vsftpd/vsftpd.conf
## 添加
anon_root=/opt/
[root@master ~]# systemctl start vsftpd
[root@master ~]# systemctl enable vsftpd
[root@master ~]# vi /etc/yum.repos.d/local.repo
[docker]
name=docker
baseurl=ftp://10.30.59.205/Docker
gpgcheck=0
enabled=1
[k8s]
name=k8s
baseurl=ftp://10.30.59.205/Kubernetes
gpgcheck=0
enabled=1
[root@node ~]# yum clean all
[root@node ~]# yum repolist
还有不懂看此博客:
https://blog.csdn.net/qq_50981675/article/details/117716223
把压缩包K8S.tar.gz上传至/root目录并解压(两节点)
[root@master ~]# ls
anaconda-ks.cfg K8S.tar.gz
[root@master ~]# tar -zvxf K8S.tar.gz
升级系统内核(两节点)
[root@master ~]# yum upgrade -y
修改/etc/hosts文件(两节点)
[root@master ~]# vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.30.59.205 master
10.30.59.222 node
配置防火墙及SELinux(两节点)
[root@master ~]# systemctl stop firewalld && systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@master ~]# iptables -F
[root@master ~]# iptables -X
[root@master ~]# iptables -Z
[root@master ~]# iptables-save
# Generated by iptables-save v1.4.21 on Mon Jun 7 17:59:30 2021
*filter
:INPUT ACCEPT [30:1992]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [18:1800]
COMMIT
# Completed on Mon Jun 7 17:59:30 2021
[root@master ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
关闭Swap(两节点)
[root@master ~]# swapoff -a
[root@master ~]# sed -i "s/\/dev\/mapper\/centos-swap/\#\/dev\/mapper\/centos-swap/g" /etc/fstab
配置时间同步
安装chrony服务(master节点)
[root@master ~]# yum install -y chrony
修改/etc/chrony.conf文件(master节点)
[root@master ~]# sed -i 's/^server/#&/' /etc/chrony.conf
[root@master ~]# cat >> /etc/chrony.conf << EOF 注释默认NTP服务器,指定上游公共NTP服务器,并允许其他节点同步时间
local stratum 10
server master iburst
allow all
EOF
重启chronyd服务并设为开机启动,开启网络时间同步功能(master节点)
[root@master ~]# systemctl enable chronyd && systemctl restart chronyd
[root@master ~]# timedatectl set-ntp true
修改/etc/chrony.conf文件(node节点)
[root@node ~]# sed -i 's/^server/#&/' /etc/chrony.conf
[root@node ~]# echo server 10.30.59.205 iburst >> /etc/chrony.conf 指定内网master节点为上游NTP服务器
[root@node ~]# systemctl enable chronyd && systemctl restart chronyd 重启服务并设为开机启动
查询结果(两节点)
[root@node ~]# chronyc sources
210 Number of sources = 1
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^* master 3 6 377 14 +1408us[+2152us] +/- 25ms
如果存在以“^*”开头的行,即说明已经同步成功
配置路由转发
创建/etc/sysctl.d/k8s.conf文件(两节点)
[root@master ~]# cat << EOF |tee /etc/sysctl.d/K8s.conf
> net.ipv4.ip_forward = 1
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF 添加以上内容
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@master ~]# modprobe br_netfilter
[root@master ~]# sysctl -p /etc/sysctl.d/K8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
配置IPVS(两节点)
[root@node ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF 保证在节点重启后能自动加载所需模块
> #!/bin/bash
> modprobe -- ip_vs
> modprobe -- ip_vs_rr
> modprobe -- ip_vs_wrr
> modprobe -- ip_vs_sh
> modprobe -- nf_conntrack_ipv4
> EOF
[root@node ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4 15053 0
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 141432 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 133053 2 ip_vs,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
使用lsmod | grep -e ip_vs -e nf_conntrack_ipv4命令查看是否已经正确加载所需的内核模块
安装ipset软件包(两节点)
[root@master ~]# yum install ipset ipvsadm -y (可以不安装)
安装Docker
[root@master ~]# yum install -y docker
[root@master ~]# yum install docker-ce-18.09.6 docker-ce-cli-18.09.6 containerd.io -y
[root@master ~]# tee /etc/docker/daemon.json <<-'EOF'
> {
> "exec-opts": ["native.cgroupdriver=systemd"]
> }
> EOF 添加以上内容
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart docker 重启docker
[root@master ~]# systemctl enable docker 开机自启docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
(解压完K8S.tar.gz)执行
[root@master ~]# ./kubernetes_base.sh
[root@master ~]# docker info |grep Cgroup
Cgroup Driver: systemd 必须为systemd
配置Kubernetes Yum源(两节点)
[root@master ~]# vi /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
[root@master ~]# yum repolist
repo id repo name status
base/7/x86_64 CentOS-7 - Base 10,072
docker docker 341
extras/7/x86_64 CentOS-7 - Extras 476
k8s k8s 341
kubernetes Kubernetes 666
updates/7/x86_64 CentOS-7 - Updates 2,189
repolist: 14,085
安装Kubernetes工具并启动Kubelet(两节点)
[root@master ~]# yum install -y kubelet-1.14.1 kubeadm-1.14.1 kubectl-1.14.1
[root@master ~]# systemctl enable kubelet && systemctl start kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
部署Kubernetes Master
初始化Kubernetes集群(在 master 执行)
[root@master ~]# kubeadm init --apiserver-advertise-address 10.24.2.8 --kubernetes-version="v1.14.1" --pod-network-cidr=10.16.0.0/16 --image-repository=registry.aliyuncs.com/google_containers
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.30.59.205:6443 --token 7ozwg3.nowoarg2i4pwlb67 \
--discovery-token-ca-cert-hash sha256:4529ccbd7af6f14da3c54d7abee6e5a0d453c45ed116698605970a3116dfd5c6
//本行代码需记录,用于node节点加入集群。
Kubectl默认:会在执行的用户home目录下面的.kube目录下寻找config文件,配置kubectl工具
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
检查集群状态
[root@master ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
配置Kubernetes网络
[root@master ~]# cd yaml/
[root@master yaml]# ll
total 36
-rw-r--r--. 1 root root 501 Aug 29 2019 dashboard-adminuser.yaml
-rw-r--r--. 1 root root 12866 Oct 16 2019 kube-flannel.yaml
-rw-r--r--. 1 root root 4654 Oct 16 2019 kubernetes-dashboard.yaml
-rw-r--r--. 1 root root 172 Oct 29 2019 kubia.yaml
-rw-r--r--. 1 root root 2634 Oct 16 2019 kuboard.yaml
[root@master yaml]# kubectl apply -f kube-flannel.yaml
加入Kubernetes Node
向集群添加新节点,在kubeadm init输出的kubeadm join命令(在 node 执行)
[root@node ~]# kubeadm join 10.30.59.205:6443 --token 7ozwg3.nowoarg2i4pwlb67 \
--discovery-token-ca-cert-hash sha256:4529ccbd7af6f14da3c54d7abee6e5a0d453c45ed116698605970a3116dfd5c6
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
检查各节点状态(在 master 执行)
[root@master yaml]# kubectl get pods -n kube-system 全部为Running
NAME READY STATUS RESTARTS AGE
coredns-8686dcc4fd-l5dmm 1/1 Running 0 5m6s
coredns-8686dcc4fd-w4k69 1/1 Running 0 5m6s
etcd-master 1/1 Running 0 4m26s
kube-apiserver-master 1/1 Running 0 4m2s
kube-controller-manager-master 1/1 Running 0 4m23s
kube-flannel-ds-amd64-jw6j5 1/1 Running 0 67s
kube-flannel-ds-amd64-wjl5k 1/1 Running 0 17s
kube-proxy-7jv88 1/1 Running 0 5m5s
kube-proxy-nxx4s 1/1 Running 0 17s
kube-scheduler-master 1/1 Running 0 4m8s
[root@master ~]# kubectl get nodes 检查各节点状态
NAME STATUS ROLES AGE VERSION
master NotReady master 18m v1.18.0
node NotReady <none> 3m38s v1.18.0
在 master 执行
安装Dashboard
[root@master yaml]# kubectl apply -f kubernetes-dashboard.yaml
secret/kubernetes-dashboard-certs created
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
deployment.apps/kubernetes-dashboard created
service/kubernetes-dashboard created
[root@master yaml]# kubectl create -f dashboard-adminuser.yaml
serviceaccount/kubernetes-dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-admin created
在 master 执行
检查所有Pod状态
[root@master yaml]# kubectl get pods --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-8686dcc4fd-l5dmm 1/1 Running 0 20m 10.16.0.2 master <none> <none>
kube-system coredns-8686dcc4fd-w4k69 1/1 Running 0 20m 10.16.0.3 master <none> <none>
kube-system etcd-master 1/1 Running 0 19m 10.30.59.205 master <none> <none>
kube-system kube-apiserver-master 1/1 Running 0 19m 10.30.59.205 master <none> <none>
kube-system kube-controller-manager-master 1/1 Running 0 19m 10.30.59.205 master <none> <none>
kube-system kube-flannel-ds-amd64-jw6j5 1/1 Running 0 16m 10.30.59.205 master <none> <none>
kube-system kube-flannel-ds-amd64-wjl5k 1/1 Running 0 15m 10.30.59.222 node <none> <none>
kube-system kube-proxy-7jv88 1/1 Running 0 20m 10.30.59.205 master <none> <none>
kube-system kube-proxy-nxx4s 1/1 Running 0 15m 10.30.59.222 node <none> <none>
kube-system kube-scheduler-master 1/1 Running 0 19m 10.30.59.205 master <none> <none>
kube-system kubernetes-dashboard-5f7b999d65-kqvhz 1/1 Running 0 27s 10.16.1.2 node <none> <none>
通过Firefox浏览器中输入Node节点地址https://10.24.2.9:30000
[root@master yaml]# yum install -y net-tools
[root@master yaml]# netstat -lntp
单击“高级”→“接受风险并继续”按钮,即可进入Kubernetes Dasboard认证界面
获取访问Dashboard的认证令牌
[root@master yaml]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep kubernetes-dashboard-admin-token | awk '{print $1}')
Name: kubernetes-dashboard-admin-token-2s7tq
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: kubernetes-dashboard-admin
kubernetes.io/service-account.uid: 56522362-c90e-11eb-9fe8-005056a01ff9
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1025 bytes
namespace: 11 bytes
token: 密匙 eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi10b2tlbi0yczd0cSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjU2NTIyMzYyLWM5MGUtMTFlYi05ZmU4LTAwNTA1NmEwMWZmOSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiJ9.bLphLZ8rlBONYBTp4E6l26KDcmqJwSPoDXlp9aaf5MNwgrs7qHSW33FAUGDcCfjV4vujVbBokxERuWX6KGKghMcnUaT_iFT7N67Df1Qhoed9An1JNTJBto-s6GTxAykdTSmRkdiFhaABGM5Sg_s5OyyY-u9PlJTvWjelCnBttNaR5hZJkMQmJ36SK7z5D9SuWk1ixo4rYjoe6rChDIHX27PDi0jzFiW94j5IPxvtZ0xtcRNxzQc4rxzuYSXK2iK3Yk5t-gehqgoDLzQLtNr6784GvhQG6kqwNY2o4PFA9IgYs_xxIrqO9pH_BvEU2Nhls8yzVNDaugFYJ7cqYeB83g
进入Kubernetes Dasboard界面
免密登录(如果界面内出现黄色警告)
[root@master yaml]# kubectl create clusterrolebinding test:anonymous --clusterrole=cluster-admin --user=system:anonymous
clusterrolebinding.rbac.authorization.k8s.io/test:anonymous created
配置Kuboard
[root@master yaml]# kubectl create -f kuboard.yaml
deployment.apps/kuboard created
service/kuboard created
serviceaccount/kuboard-user created
clusterrolebinding.rbac.authorization.k8s.io/kuboard-user created
serviceaccount/kuboard-viewer created
clusterrolebinding.rbac.authorization.k8s.io/kuboard-viewer created
clusterrolebinding.rbac.authorization.k8s.io/kuboard-viewer-node created
clusterrolebinding.rbac.authorization.k8s.io/kuboard-viewer-pvp created
ingress.extensions/kuboard created
浏览器再次访问,界面改变