k8s安装读取内核modules_kubeadm安装kubernetes

c039bf0e997127bd9d65b7544bbf6e73.png

1.主机IP规划

39ee1b7c8e280cfd05e499e9af430926.png

2.配置部署k8s基本环境

  • 配置hosts文件实现互相解析
  • MAC地址唯一
  • 确保product_uuid唯一
  • 关闭防火墙和禁用selinux
  • 禁用swap
  • 调整内核参数
  • 加载br_netfilter模块
# 三台配置hosts文件实现互相解析
[root@k8s-master ~]# cat  /etc/hosts
10.0.0.30  k8s-master
10.0.0.31  k8s-node-1
10.0.0.32  k8s-node-2
[root@k8s-master ~]# scp  /etc/hosts  root@10.0.0.31:/etc/                                                                                                         100%  225   155.3KB/s   00:00    
[root@k8s-master ~]# scp  /etc/hosts  root@10.0.0.32:/etc/

# 确保product_uuid唯一
[root@k8s-master ~]# cat /sys/class/dmi/id/product_uuid
C55A4D56-C260-7099-7E3E-FE3B120CEF88
[root@k8s-node-1 ~]# cat /sys/class/dmi/id/product_uuid 
3B0B4D56-B4E4-3401-4CF4-CEAC0755AB9F
[root@k8s-node-2 ~]# cat /sys/class/dmi/id/product_uuid
AA824D56-42BC-806B-025E-457F555A1D27

# 三个节点都要关闭防火墙和禁用selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
systemctl  stop  firewalld.service 
systemctl  disable   firewalld.service

# 三个节点都要禁用swap
禁用swap功能才能使kubelet正常工作(Kubernetes 1.8开始要求关闭系统的Swap,如果不关闭,默认配置下kubelet将无法启动。
如果开启了swap分区,kubelet会启动失败(可以通过将参数 --fail-swap-on 设置为false来忽略swap on),故需要在每台机器上关闭 swap分区)
sed -i '/ swap / s/^(.*)$/#1/g' /etc/fstab
swapoff  -a

#三个节点都要调整内核参数
echo  'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.d/99-sysctl.conf 
echo  'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.d/99-sysctl.conf 
sysctl  --system
lsmod | grep br_netfilter
# node节点安装加载br_netfilter模块
[root@k8s-node-1 ~]# yum install bridge-utils  -y
[root@k8s-node-1 ~]# modprobe bridge && modprobe br_netfilter
[root@k8s-node-1 ~]# lsmod | grep br_netfilter
br_netfilter           22256  0 
bridge                151336  1 br_netfilter
[root@k8s-node-2 ~]# yum install bridge-utils  -y
[root@k8s-node-2 ~]# modprobe bridge && modprobe br_netfilter
[root@k8s-node-2 ~]# lsmod | grep br_netfilter
br_netfilter           22256  0 
bridge                151336  1 br_netfilter

3.安装Container runtimes

  • 为了在Pods中运行容器,Kubernetes需要安装容器运行环境(Container runtimes)
  • 三个节点都要安装
[root@k8s-master ~]# cat  docker.sh
#安装依赖包
yum install -y yum-utils device-mapper-persistent-data lvm2
#添加官方yum源
yum-config-manager --add-repo 
https://download.docker.com/linux/centos/docker-ce.repo
#升级内核和安装docker服务
yum update -y && yum install -y 
  containerd.io-1.2.10 
  docker-ce-19.03.4 
  docker-ce-cli-19.03.4
#创建docker目录
mkdir /etc/docker
#设置docker参数
cat > /etc/docker/daemon.json <<EOF
{
   "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
      "max-size": "100m"
    },"registry-mirrors": ["https://7j94f0p5.mirror.aliyuncs.com"],
    "storage-driver": "overlay2",
    "storage-opts": [
      "overlay2.override_kernel_check=true"
    ]
}
EOF
#创建启动目录
mkdir /etc/systemd/system/docker.service.d -p
#重启docker服务,开机自启
systemctl daemon-reload
systemctl enable  docker
systemctl restart docker

[root@k8s-master ~]# scp /root/docker.sh  root@10.0.0.31:/root/
[root@k8s-master ~]# scp /root/docker.sh  root@10.0.0.32:/root/

4.安装kubeadm, kubelet and kubectl

  • kubeadm:引导集群的命令;
  • kubelet:运行在集群中所有机器上并执行诸如启动pod和容器之类操作的组件;
  • kubectl:操作集群的命令行单元;
# 给予可执行权限
[root@k8s-master ~]# chmod  a+x install.sh 
[root@k8s-master ~]# ./install.sh 
[root@k8s-master ~]# cat  install.sh 
#!/bin/bash
cat <<EOF > /etc/yum.repos.d/mirrors.aliyun.com_kubernetes.repo
[mirrors.aliyun.com_kubernetes]
name=added from: https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
#更新缓存
yum clean all -y && yum makecache -y && yum repolist -y
yum install -y kubelet kubeadm kubectl
systemctl enable   kubelet.service
systemctl restart  kubelet.service
[root@k8s-master ~]# scp  /root/install.sh  root@10.0.0.31:/root/     
[root@k8s-node-1 ~]# ./install.sh
[root@k8s-master ~]# scp  /root/install.sh  root@10.0.0.32:/root/
[root@k8s-node-2 ~]# ./install.sh

563d4892448b7968c9d7908135defe1d.png

5.用 kubeadm 创建 Cluster

5.1. Master节点自举集群

  • kubernetes版本可以稍有差异
[root@k8s-master ~]#  kubeadm init 
--apiserver-advertise-address=10.0.0.30 
--kubernetes-version=v1.18.1 
--pod-network-cidr=10.200.0.0/16 
--service-cidr=10.2.0.0/16 
--image-repository registry.aliyuncs.com/google_containers
W0528 17:54:10.641169   10184 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.18.1
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0528 17:54:12.126120   10184 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0528 17:54:12.126980   10184 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 6.507320 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: ivkj45.ly2wuvo4efardhzz
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.0.30:6443 --token e42otg.b3alah02nh598ydv 
    --discovery-token-ca-cert-hash sha256:9836557fac14463c7f9518d0f6568610ed1d8f4da5e212374beec4d58cce0566

5.2.查看下载的镜像

[root@k8s-master ~]# docker images
REPOSITORY                                                        TAG                 IMAGE ID            CREATED             SIZE
registry.aliyuncs.com/google_containers/kube-proxy                v1.18.1             4e68534e24f6        6 weeks ago         117MB
registry.aliyuncs.com/google_containers/kube-apiserver            v1.18.1             a595af0107f9        6 weeks ago         173MB
registry.aliyuncs.com/google_containers/kube-controller-manager   v1.18.1             d1ccdd18e6ed        6 weeks ago         162MB
registry.aliyuncs.com/google_containers/kube-scheduler            v1.18.1             6c9320041a7b        6 weeks ago         95.3MB
registry.aliyuncs.com/google_containers/pause                     3.2                 80d28bedfe5d        3 months ago        683kB
registry.aliyuncs.com/google_containers/coredns                   1.6.7               67da37a9a360        3 months ago        43.8MB
registry.aliyuncs.com/google_containers/etcd                      3.4.3-0             303ce5db0e90        7 months ago        288MB
# 查看是否端口运行
[root@k8s-master ~]# netstat  -antp | grep  6443
tcp        0      0 10.0.0.30:44632         10.0.0.30:6443          ESTABLISHED 2492/kube-controlle 
tcp        0      0 10.0.0.30:44626         10.0.0.30:6443          ESTABLISHED 2498/kube-scheduler 
tcp        0      0 10.0.0.30:44630         10.0.0.30:6443          ESTABLISHED 695/kubelet         
tcp        0      0 10.0.0.30:44836         10.0.0.30:6443          ESTABLISHED 3043/kube-proxy     
tcp        0      0 10.0.0.30:44852         10.0.0.30:6443          ESTABLISHED 2492/kube-controlle 
tcp        0      0 10.0.0.30:44786         10.0.0.30:6443          ESTABLISHED 2498/kube-scheduler 
tcp6       0      0 :::6443                 :::*                    LISTEN      2495/kube-apiserver 
tcp6       0      0 10.0.0.30:6443          10.0.0.30:44786         ESTABLISHED 2495/kube-apiserver 
tcp6       0      0 10.0.0.30:6443          10.0.0.30:44852         ESTABLISHED 2495/kube-apiserver 
tcp6       0      0 10.0.0.30:6443          10.0.0.30:44626         ESTABLISHED 2495/kube-apiserver 
tcp6       0      0 10.0.0.30:6443          10.0.0.30:44836         ESTABLISHED 2495/kube-apiserver 
tcp6       0      0 10.0.0.30:6443          10.0.0.30:44630         ESTABLISHED 2495/kube-apiserver 
tcp6       0      0 ::1:6443                ::1:44976               ESTABLISHED 2495/kube-apiserver 
tcp6       0      0 10.0.0.30:6443          10.0.0.30:44632         ESTABLISHED 2495/kube-apiserver 
tcp6       0      0 ::1:44976               ::1:6443                ESTABLISHED 2495/kube-apiserver

5.3.配置kubectl

  • kubectl 是管理 Kubernetes Cluster 的命令行工具
[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master ~]# kubectl  get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}

6.安装flanel网络

# 下载文件
[root@k8s-master ~]# cat /etc/hosts
10.0.0.30  k8s-master
10.0.0.31  k8s-node-1
10.0.0.32  k8s-node-2
## 添加解析地址
192.30.253.112 github.com    
199.232.28.133 raw.githubusercontent.com
[root@k8s-master ~]# wget -c https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
--2020-05-28 10:17:55--  https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
正在解析主机 raw.githubusercontent.com (raw.githubusercontent.com)... 199.232.28.133
正在连接 raw.githubusercontent.com (raw.githubusercontent.com)|199.232.28.133|:443... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:14366 (14K) [text/plain]
正在保存至: “kube-flannel.yml”

100%[========================================================================================================>] 14,366      13.3KB/s 用时 1.1s   

2020-05-28 10:18:17 (13.3 KB/s) - 已保存 “kube-flannel.yml” [14366/14366])
# 修改文件
[root@k8s-master ~]# vim kube-flannel.yml
·····
net-conf.json: |
    {
      "Network": "10.200.0.0/16",     ##Pod的网段地址,要和初始化集群是一致。
      "Backend": {
        "Type": "vxlan"
      }
    }
....省略部分内容
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-amd64      ##拉取镜像名字
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=ens32   ##如果是多网卡,要指定具体网卡。单网卡不需要指定
        resources:
          requests:
# 安装flannel网络
[root@k8s-master ~]# kubectl  apply -f kube-flannel.yml 
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@k8s-master ~]# kubectl  get  pod -n kube-system
NAME                                 READY   STATUS    RESTARTS   AGE
coredns-7ff77c879f-kpfrj             1/1     Running   0          14h
coredns-7ff77c879f-tzcrv             1/1     Running   0          14h
etcd-k8s-master                      1/1     Running   2          14h
kube-apiserver-k8s-master            1/1     Running   2          14h
kube-controller-manager-k8s-master   1/1     Running   2          14h
kube-flannel-ds-amd64-qgq48          1/1     Running   0          6m25s    ###保证运行状态
kube-proxy-nhr6g                     1/1     Running   3          14h
kube-scheduler-k8s-master            1/1     Running   2          14h
# kube-flannel-ds-amd64  值都为1
[root@k8s-master ~]# kubectl get ds -l app=flannel -n kube-system
NAME                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
kube-flannel-ds-amd64     1         1         1       1            1           <none>          6m56s
kube-flannel-ds-arm       0         0         0       0            0           <none>          6m56s
kube-flannel-ds-arm64     0         0         0       0            0           <none>          6m56s
kube-flannel-ds-ppc64le   0         0         0       0            0           <none>          6m56s
kube-flannel-ds-s390x     0         0         0       0            0           <none>          6m55s

7.node节点加入集群

[root@k8s-node-1 ~]# kubeadm join 10.0.0.30:6443 --token e42otg.b3alah02nh598ydv 
    --discovery-token-ca-cert-hash sha256:9836557fac14463c7f9518d0f6568610ed1d8f4da5e212374beec4d58cce0566
W0528 11:52:39.776037    3666 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@k8s-node-2 ~]# kubeadm join 10.0.0.30:6443 --token e42otg.b3alah02nh598ydv 
    --discovery-token-ca-cert-hash sha256:9836557fac14463c7f9518d0f6568610ed1d8f4da5e212374beec4d58cce0566
W0528 11:52:35.961497    3523 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

# 查看节点加入状态,若不是ready状态则查看是不是节点的镜像没有拉取成功
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE   VERSION
k8s-master   Ready    master   16h   v1.18.3
k8s-node-1   Ready    <none>   45m   v1.18.3
k8s-node-2   Ready    <none>   45m   v1.18.3

8.测试集群

[root@k8s-master ~]# cat  nginx-ds.yml 
apiVersion: v1    #指定api版本标签
kind: Service     #定义资源的类型/角色,Service为控制器
metadata:         #定义资源的元数据信息
  name: nginx-ds  #定义资源的名称,在同一个namespace空间中必须是唯一的
  labels:         #定义资源标签
    app: nginx-ds
spec:
  type: NodePort
  selector:         #定义选择器
    app: nginx-ds   #匹配模板名
  ports:
  - name: http
    port: 80
    targetPort: 80
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: nginx-ds
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      app: nginx-ds
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: nginx:1.7.9
        ports:
        - containerPort: 80
执行yml文件在两个节点上安装nginx服务
[root@k8s-master ~]# kubectl  apply  -f  nginx-ds.yml
service/nginx-ds created
daemonset.apps/nginx-ds created
[root@k8s-master ~]# kubectl get pod -o wide
NAME             READY   STATUS    RESTARTS   AGE     IP           NODE         NOMINATED NODE   READINESS GATES
nginx-ds-7jzlj   1/1     Running   0          3m25s   10.200.2.2   k8s-node-2   <none>           <none>
nginx-ds-snvcp   1/1     Running   0          3m25s   10.200.1.2   k8s-node-1   <none>           <none>
[root@k8s-master ~]# kubectl get services -o wide 
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE     SELECTOR
kubernetes   ClusterIP   10.2.0.1      <none>        443/TCP        20m     <none>
nginx-ds     NodePort    10.2.100.15   <none>        80:31350/TCP   3m28s   app=nginx-ds

8.1.访问

[root@k8s-master ~]# curl 10.200.1.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

[root@k8s-master ~]# curl 10.200.2.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

2cd5271610d693e27adf6c0dc2219672.png

19b5d7c318579d726ddbd01ab8222f8e.png

9.安装Dashboard UI

# 下载官网文件
[root@k8s-master ~]# wget -c https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml -c
--2020-05-28 14:34:42--  https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml
正在解析主机 raw.githubusercontent.com (raw.githubusercontent.com)... 199.232.28.133
正在连接 raw.githubusercontent.com (raw.githubusercontent.com)|199.232.28.133|:443... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:7552 (7.4K) [text/plain]
正在保存至: “recommended.yaml”

100%[===================================================================================================>] 7,552        261B/s 用时 29s     

2020-05-28 14:35:23 (261 B/s) - 已保存 “recommended.yaml” [7552/7552])


---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort      ##开发服务为NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 32443 ##访问端口
  selector:
    k8s-app: kubernetes-dashboard

---
.................
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin    ##添加改行修改名字为集群管理员
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard
---
## 安装kubernetes-dashboard
[root@k8s-master ~]# kubectl apply -f recommended.yaml 
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
## 查看服务是否运行
[root@k8s-master ~]# kubectl get pods --namespace=kubernetes-dashboard -o wide
NAME                                         READY   STATUS    RESTARTS   AGE    IP           NODE         NOMINATED NODE   READINESS GATES
dashboard-metrics-scraper-6b4884c9d5-wdc8f   1/1     Running   0          114s   10.200.1.4   k8s-node-1   <none>           <none>
kubernetes-dashboard-7bfbb48676-shr48        1/1     Running   0          114s   10.200.2.4   k8s-node-2   <none>           <none>
[root@k8s-master ~]# kubectl --namespace=kubernetes-dashboard get service kubernetes-dashboard
NAME                   TYPE       CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.2.19.77   <none>        443:32443/TCP   2m2s
## 获取登录token
[root@k8s-master ~]# kubectl describe secret -n kubernetes-dashboard 
> $(kubectl get secret -n kubernetes-dashboard |grep 
> kubernetes-dashboard-token | awk '{print $1}') |grep token | awk '{print $2}'
kubernetes-dashboard-token-zw756
kubernetes.io/service-account-token
eyJhbGciOiJSUzI1NiIsImtpZCI6Ilh6SVFiV3c4RkFPRURnalB2TXdBcU52M3UyQ21ldDgyZW01V3BocHl4WE0ifQ.eyJ
pc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY
2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJ
rdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi16dzc1NiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZ
S1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2V
ydmljZS1hY2NvdW50LnVpZCI6IjU3ZDFkYjI1LTk0NGUtNDFjMC05OTEzLWQ1ZGIxNGRiMTU4ZSIsInN1YiI6InN5c3Rlb
TpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.8-QABzeWCR2qXF
7-adlKsdiVgWjf34FPsCDZvxxGM9Iz1K10l5bBHkvdCTj9AhN6jMWrzRTxirH85ilgzOtRdJ_-eI2LdMMQL5Hf4zDJHsik
V7fX4RWk1NJJmc11jkL8z0I_uTZSgzKGgDyLIVHg7bZUaVz22-MiPoooaT3QPn3lAdBQOKA842nM1ouZRfFknFaMkgDNYJ
GdIcNNG_gbIyjrNYLC1eJ2Bz1SoJsjJOXEhnI_7KSqVIGlFjvBYw3dEpQGJY1UCmyioYAnh8noHjn4jQlM6dYCOfhOyLBh
X8HyL4vw0RsaRND15IWABiwXqZxmb6iTxbrGqZTeLBksTA

7a8452fc29048d9ef60c593e2a204ad3.png

82782b204d2ab63ebca352231c5f846a.png

a314aaea292b60fe3e1d21eb113b607b.png

10.测试集群

## 新建用户
[root@k8s-master ~]# vim   admin-user.yaml 
[root@k8s-master ~]# cat admin-user.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
[root@k8s-master ~]# kubectl  apply -f  admin-user.yaml
serviceaccount/admin-user created
## 绑定用户关系
[root@k8s-master ~]# vim  admin-user-role-binding.yaml 
[root@k8s-master ~]# cat admin-user-role-binding.yaml 
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
[root@k8s-master ~]# kubectl  apply -f  admin-user-role-binding.yaml 
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
[root@k8s-master ~]# kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-xtg2w
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: ee07b881-ec17-457d-8e2c-b1798cc22991

Type:  kubernetes.io/service-account-token

Data
====
token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilh6SVFiV3c4RkFPRURnalB2TXdBcU52M3UyQ21ldDgyZW01V3BocHl
4WE0ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9
uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm
5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXh0ZzJ3Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY
291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51
aWQiOiJlZTA3Yjg4MS1lYzE3LTQ1N2QtOGUyYy1iMTc5OGNjMjI5OTEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ
6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.sZiIf-tYcJbJa2fjOgBWOH7NVeDHb0hn3U-KNKRanlmnXPH5y
ppCSLY8A4HdX7b3mVOVF1-qnbuXjrkfBMSHJzawjnKxVrmkekepNEO_v7oTHhkw9Ysggn-r3RS6HH3en7-2AcDskn0FlCMm
BkAtrGHVZ59rOuRmTh_KivQ9XXB1dwNibMaSJp4oaUa48QY-QMHn6lecwLE9e8KPGDNfZNzfcn-sCx7T_IXaoEABec7XvTB
NfxNcTqqlx8nOs5cPmaRHfE0U7mCVcIlRT6NffPBDB3HAi42Wgfw_oRbhTzbrHC57OpKrt7sRQz4axEqh2cfG6dnjLP23Oe
5hGyDUMw
ca.crt:     1025 bytes
namespace:  20 bytes

c6b63b52db0c8389aef45746af66cd3e.png

0172c5f8dcbcbcbe22831016f0c73c4f.png

a4852b454dc1c2e4f979a891a4339c27.png
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值