基于python3一键拉起k8s脚本以及k8s pod创建流程 statefullset rbac等

脚本依赖项

python3.x(centos7环境直接yum -y install python3)

规划用的json文件

需要填写实际要部署节点的主机名以及ip地址,和要部署的k8s版本
这里的规划会作为部署脚本里的全局变量
k8s_plan.json

{
    "master_info":[{"ipaddr":"192.168.62.131"},{"hostname":"k8s-master.mageedu.com"}],
    "node01_info":[{"ipaddr":"192.168.62.130"},{"hostname":"k8s-node01.mageedu.com"}],
    "node02_info":[{"ipaddr":"192.168.62.129"},{"hostname":"k8s-node02.mageedu.com"}],
    "k8s_version":"1.23.1"
}

初始化脚本

执行command.py 需要在三个节点上运行负责初始化
关闭selinux,配置基础yum源(本脚本用的清华大学源)
关闭防火墙,开启ipv4以及ipv6的转发,安装docker,kubelet,kubeadm等

#!/usr/bin/python3.6
# -*- coding: utf-8 -*

import os
import socket
import json
from pathlib import Path
import shutil
import time

# 全局变量
with open('/root/k8s_plan.json','r') as f:
    data = f.read()

    dict_plan = json.loads(data)

    master_info = dict_plan['master_info']
    node01_info = dict_plan['node01_info']
    node02_info = dict_plan['node02_info']
    k8s_version = dict_plan['k8s_version']
    master_ipaddr,master_hostname = master_info
    node01_ipaddr,node01_hostname = node01_info
    node02_ipaddr,node02_hostname = node02_info

def stop_firewalld():
    cmds = [
        'systemctl stop firewalld >> /dev/null',
        'systemctl disable firewalld >> /dev/null',
        'systemctl stop NetworkManager >> /dev/null',
       ' systemctl disable NetworkManager >> /dev/null'
    ]

    for cmd in cmds:
        if os.system(cmd) == 0:
            result = cmd,'执行命令成功'
        
            print(result)

def setting_hosts():
    data = [
        '%s %s\n' %(master_ipaddr['ipaddr'],master_hostname['hostname']),
        '%s %s\n' %(node01_ipaddr['ipaddr'],node01_hostname['hostname']),
        '%s %s\n' %(node02_ipaddr['ipaddr'],node02_hostname['hostname'])
    ]
    with open('/etc/hosts','r') as f:
        file_data = f.readlines()
        for i in data:
            if i in file_data:
                print(i,'存在于hosts文件里无需更改')
            else:
                with open('/etc/hosts','a') as f:
                    f.write(i)
                    print('添加映射关系%s到hosts文件内' %(i))
            
def stop_selinux(old_str,new_str):
    with open('/etc/selinux/config','r') as f:
        data = f.read()
        if new_str in data:
            return 'sellinux已修改!'
        new_Data = data.replace(old_str,new_str)

        with open('/etc/selinux/config','w') as f:
            f.writelines(new_Data)

            return '关闭selinux成功!'

def add_Ipv6Rule():
    with open('/etc/sysctl.conf','r') as f:
        data = f.read()
        if 'net.bridge.bridge-nf-call-ip6tables = 1' in data and 'net.bridge.bridge-nf-call-iptables = 1' in data:
            return '已开启ipv4和ipv6的转发规则'
        with open('/etc/sysctl.conf','a') as f:
            data = [
                'net.bridge.bridge-nf-call-ip6tables = 1\n',
                'net.bridge.bridge-nf-call-iptables = 1\n'
            ]
            f.writelines(data)

        if os.system('modprobe br_netfilter >> /dev/null') == 0 and os.system('sysctl -p >> /dev/null') == 0:
            return '开启ip转发规则成功!'

def stop_swap(old_str,new_str):
    os.system('swapoff -a >> /dev/null')
    with open('/etc/fstab','r') as f:
        data = f.read()
        if new_str in data:
            return 'swap已经永久关闭'
        with open('/etc/fstab','w') as f:
            new_Data = data.replace(old_str,new_str)
            f.writelines(new_Data)
            return '永久关闭swap成功'

def install_docker():
    cmds = [
        'yum -y install yum-utils device-mapper-presistent-data lvm2 >> /dev/null',
        'yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo >> /dev/null',
        'yum -y install docker-ce docker-ce-cli containerd.io >> /dev/null',
        'systemctl start docker >> /dev/null'
    ]
    print('~~~~~~~~~~正在部署docker和配置docker加速预计个把分钟请耐心等待~~~~~~~~~~~~~')
    for cmd in cmds:
        if os.system(cmd) == 0:
            print(cmd,'执行成功!')
    
    with open('/etc/docker/daemon.json','w') as f:
        data = [
                '{\n',
        '"registry-mirrors": [\n',
        '   "https://docker.mirrors.ustc.edu.cn",\n',
        '   "https://hub-mirror.c.163.com",\n',
        '   "https://reg-mirror.qiniu.com",\n',
        '   "https://registry.docker-cn.com"\n'
       ' ],\n',
        '"exec-opts": ["native.cgroupdriver=systemd"],\n',
        '"log-driver": "json-file",\n',
        '"log-opts": {\n',
        '   "max-size": "200m"\n',
       '},\n',
        '"storage-driver": "overlay2"\n' , 
        '}\n',
        ]
        f.writelines(data)
    time.sleep(1)
    cmds = [
            'sudo systemctl daemon-reload',
            'sudo systemctl restart docker',
        ]
    for cmd in cmds:
            print(cmd,'执行成功')
            os.system(cmd)
    CgroupDriver = os.popen('docker info | grep -w "Cgroup Driver"').read()
    print(CgroupDriver)

def setting_YumRepo():
    base_dir = '/etc/yum.repos.d/'

    # 创建bak目录

    back_dir = Path(base_dir) / 'bak'

    if not back_dir.exists():
        back_dir.mkdir()

        # basedir下的所有文件移动back_dir下
        for file in Path(base_dir).iterdir():
            if file.is_file():
                shutil.move(file,back_dir / file.name)
                # print(file.name)

        for file in Path(back_dir).iterdir():
            if file.is_file() and file.name == 'CentOS-Base.repo':
                shutil.move(file,Path(base_dir) / file.name)

        if os.system("sed -e 's|^mirrorlist=|#mirrorlist=|g' \
                    -e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \
                    -i.bak \
                    /etc/yum.repos.d/CentOS-*.repo") == 0:
            return '配置清华源镜像成功!'
        return '配置清华源镜像失败!'
    return '脚本检测到/etc/yum.repos.d/bak目录已存在,如果已经配好了yum源则不需要管,如果没配好请把bak目录下的文件移出来然后删除bak目录'
def editHostName():
    hostname = socket.gethostname()
    ip = socket.gethostbyname(hostname)
    with open('/etc/hosts','r') as f:
        data = f.readlines()
        for line in data:
            if ip in line:
                ipaddr,domain = line.split()
                set_hostname = 'hostname %s'%(domain)
                if os.system(set_hostname) == 0:
                    print(set_hostname,'执行成功!')
                    with open('/etc/hostname','w') as f:
                        f.write(domain + '\n')
                        print('设置hostname成功重启后生效(根据hosts文件)')

    

def setting_k8sRepo():
    data = [
        '[kubernetes]\n',
        'name=Kubernetes\n',
        'baseurl=https://mirrors.huaweicloud.com/kubernetes/yum/repos/kubernetes-el7-$basearch\n',
        'enabled=1\n',
        'gpgcheck=1\n',
        'repo_gpgcheck=0\n',
        'gpgkey=https://mirrors.huaweicloud.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.huaweicloud.com/kubernetes/yum/doc/rpm-package-key.gpg\n'
    ]
    with open('/etc/yum.repos.d/kubernetes.repo','w') as f:
        f.writelines(data)

    if os.system('yum install -y kubelet-%s kubeadm-%s kubectl-%s --disableexcludes=kubernetes >> /dev/null' %(k8s_version,k8s_version,k8s_version)) == 0 and os.system('systemctl enable --now kubelet >> /dev/null') == 0:

        return 'kubeadm kubelet kubectl 已部署完毕版本为%s' %(k8s_version)

def install_dockerd_cri():
    path = '/root/cri-dockerd-0.2.6-3.el7.x86_64.rpm'
    dockerd = os.popen('rpm -qa | grep cri-dockerd-0.2.6-3.el7.x86_64').read()
    if not os.path.exists(path) and dockerd != 'cri-dockerd-0.2.6-3.el7.x86_64':
        return '检测到cri-dockerd-0.2.6-3.el7.x86_64.rpm 不存在/root/下,并且环境里没有 cri-dockerd-0.2.6-3.el7.x86_64 新版本k8s依赖这个包使用docker 下载地址为:https://github.com/Mirantis/cri-dockerd/releases/tag/v0.2.6'
    elif os.system('rpm -ivh %s >> /dev/null' %(path)) == 0 and os.popen('rpm -qa | grep cri-dockerd-0.2.6-3.el7.x86_64').read() == 'cri-dockerd-0.2.6-3.el7.x86_64\n':
        os.system('systemctl start cri-docker')
        os.system('systemctl enable cri-docker')
        return 'dockerd-cri安装成功下面开始配置'
    return 'dockerd-cri已经部署无需再次部署'
    
def setting_CriConfig(old_str,new_str):
    with open('/usr/lib/systemd/system/cri-docker.service','r') as f:
        data = f.read()
        if new_str in data:
            return 'cri已经配置,无需再配置'
        with open('/usr/lib/systemd/system/cri-docker.service','w') as f:
            new_data = data.replace(old_str,new_str)
            f.writelines(new_data)
        os.system('systemctl daemon-reload && systemctl restart cri-docker.service >> /dev/null')
        os.system('systemctl start cri-docker && systemctl enable cri-docker')
        return 'cri配置成功!'
        

def setting_kubelet():
    with open('/etc/sysconfig/kubelet','w') as f:
        data = 'KUBELET_KUBEADM_ARGS="--container-runtime=remote --container-runtime-endpoint=/run/cri-dockerd.sock"\n'
        f.writelines(data)
        print('kubelet配置成功! 集群初始化需要执行/root/kubernetes_init.py')
            







print(setting_YumRepo())
print('~~~~~~~~~~正在对环境进行初始化配置~~~~~~~~~~~~~')
stop_firewalld()
setting_hosts()
editHostName()
print(stop_selinux('SELINUX=enforcing','SELINUX=disabled'))

print(add_Ipv6Rule())

print(stop_swap('/dev/mapper/centos-swap swap                    swap    defaults        0 0','#/dev/mapper/centos-swap swap                    swap    defaults        0 0'))

print('~~~~~~~~~~~配置完成~~~~~~~~~~~')

print(install_docker())

print('~~~~~~~~~~正在配置和安装kubeadm kubelet kubectl~~~~~~~~~~~~~~')
print(setting_k8sRepo())
setting_kubelet()
print(install_dockerd_cri())

print(setting_CriConfig('ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd://','ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-cache-dir=/var/lib/cni/cache --cni-conf-dir=/etc/cni/net.d'))



t1 = input('目前环境初始化已完成后边操作需要在master节点执行,因前边修改了selinux hostname机器需要重启才能生效是否重启?y/n ')
if t1 == 'y' or t1 == 'Y':
    os.system('reboot')


执行k8s初始化脚本(在master节点上执行)

如果初始化失败如kubelet起不来 可以journalctl -u kubelet | grep -i failed 查看失败
原因没问题后,继续执行初始化脚本

#!/usr/bin/python3.6
# -*- coding: utf-8 -*

# 全局变量
with open('/root/k8s_plan.json','r') as f:
    data = f.read()

    dict_plan = json.loads(data)

    master_info = dict_plan['master_info']
    node01_info = dict_plan['node01_info']
    node02_info = dict_plan['node02_info']
    k8s_version = dict_plan['k8s_version']
    master_ipaddr,master_hostname = master_info
    node01_ipaddr,node01_hostname = node01_info
    node02_ipaddr,node02_hostname = node02_info

import os
def download_image():
    print('------------------------正在下载镜像,下载时间取决你的网速-------------------------------')
    if os.system('kubeadm config images pull --cri-socket unix:///run/cri-dockerd.sock --image-repository=registry.aliyuncs.com/google_containers') == 0:
        print('下载成功')

def cluester_init():
    print('-----------------执行集群初始化------------------')
    os.system('kubeadm init --image-repository=registry.aliyuncs.com/google_containers --control-plane-endpoint="%s" --kubernetes-version=v1.23.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12  --cri-socket unix:///run/cri-dockerd.sock --token-ttl=0  --upload-certs' %(master_ipaddr('ipaddr'))

download_image()

cluester_init()

statefullSet应用编排

1.有状态应用服务:应用本身带有认证和权限系统(服务端知道客户端是已哪个用户登录的,这个用户有什么样的权限)如:mysql,gitlib,harbor仓库
2.无状态应用服务:应用本身不具备认证和权限系统每次请求没有任何关联(服务端不知道客户端是以哪个用户登录的)如:nginx

statefullSet 需要明确区别每一个实例,不能简单的互相取代,比如mysql的主节点和从节点
如何编排?
基于索引编号的逻辑标记每一个实例

Headless:可以把server名称直接解析为podip
serviceName ->> podip
podip ->> podname
这个podname 其实就可以理解成每个实例的唯一标识

我们的statefullSet强依赖于Headless

实例间的管理存在顺次关系
创建(扩容):由大到小
删除(缩容):由小到大

定义一个有状态应用,如果指定副本数为3 那么会有3个实例 那么这三个实例是共享一个存储卷的,如果是无状态应用那没有任何问题,假设是3个redis实例恰恰需要3个独立的存储,直接使用volumes 定义pvc 那这三个存储的数据会有被覆盖或者丢失的风险这种情况需要为每个实例创建专用的卷请求模板(Volume Claim Template),这里实际上需要定义的是pvc 自动置备的话通常需要访问sorageclass为实例动态置备pvc,考虑到每个节点规格不等 随时需要扩缩容 那么提前准备pv的可能性几乎为0 所以需要nfs-csi自动置备pv

1.部署nfs-server
2.部署nfs-csi
3.部署storgeslass
前三步不再赘述 https://github.com/kubernetes-csi/csi-driver-nfs 官网有部署步骤
4.最后启动我们的stsfullset
kubectl apply -f nginx-statefullset.yaml

kind: Service
apiVersion: v1
metadata:
  name: myapp
spec:
  clusterIP: None
  selector:
    app: myapp
  ports:
    - name: myapp
      port: 80
      targetPort: 80

---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nginx
  namespace: default
spec:
  selector:
    matchLabels:
      app: myapp # has to match .spec.template.metadata.labels
  serviceName: "myapp"
  replicas: 3 # by default is 1
  template:
    metadata:
      labels:
        app: myapp # has to match .spec.selector.matchLabels
    spec:
      terminationGracePeriodSeconds: 10
      containers:
        - name: nginx
          image: nginx:v0.8
          ports:
            - containerPort: 80
              name: web
          volumeMounts:
            - name: www
              mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
    - metadata:
        name: www
      spec:
        accessModes: ["ReadWriteOnce"]
        storageClassName: "nfs-csi"
        resources:
          requests:
            storage: 1Gi

结果符合预期基于索引号编排实例

NAMESPACE   NAME      READY   STATUS    RESTARTS   AGE
default     nginx-0   1/1     Running   0          15m
default     nginx-1   1/1     Running   0          13m
default     nginx-2   1/1     Running   0          11m
[root@k8s-master k8s]#

通过storage class动态置备pvc

[root@k8s-master k8s]# kubectl get pvc -A
NAMESPACE   NAME          STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
default     www-nginx-0   Bound    pvc-90dbbcb8-b898-42d2-9327-6478336e6d56   1Gi        RWO            nfs-csi        18m
default     www-nginx-1   Bound    pvc-fd5ec099-ab03-4f45-89fb-ec3305b93ca1   1Gi        RWO            nfs-csi        14m
default     www-nginx-2   Bound    pvc-4cfd4f89-5b38-4b9f-8bee-43bd4e595818   1Gi        RWO            nfs-csi        12m
[root@k8s-master k8s]#

通过nfs-csi动态置备pv

[root@k8s-master k8s]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                 STORAGECLASS   REASON   AGE
pvc-4cfd4f89-5b38-4b9f-8bee-43bd4e595818   1Gi        RWO            Delete           Bound    default/www-nginx-2   nfs-csi                 14m
pvc-90dbbcb8-b898-42d2-9327-6478336e6d56   1Gi        RWO            Delete           Bound    default/www-nginx-0   nfs-csi                 19m
pvc-fd5ec099-ab03-4f45-89fb-ec3305b93ca1   1Gi        RWO            Delete           Bound    default/www-nginx-1   nfs-csi                 15m
[root@k8s-master k8s]#

节点扩容

可以直接修改stsfullset的yaml文件 replicas字段 我们这里修改为4
kubectl apply -f nginx-statefullset.yaml

[root@k8s-master k8s]# kubectl get pod -A -l app=myapp
NAMESPACE   NAME      READY   STATUS    RESTARTS   AGE
default     nginx-0   1/1     Running   0          38m
default     nginx-1   1/1     Running   0          36m
default     nginx-2   1/1     Running   0          34m
default     nginx-3   1/1     Running   0          9s
[root@k8s-master k8s]#

pvc 和pv也是正常动态置备

[root@k8s-master k8s]# kubectl get pvc -A -l app=myapp
NAMESPACE   NAME          STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
default     www-nginx-0   Bound    pvc-90dbbcb8-b898-42d2-9327-6478336e6d56   1Gi        RWO            nfs-csi        41m
default     www-nginx-1   Bound    pvc-fd5ec099-ab03-4f45-89fb-ec3305b93ca1   1Gi        RWO            nfs-csi        37m
default     www-nginx-2   Bound    pvc-4cfd4f89-5b38-4b9f-8bee-43bd4e595818   1Gi        RWO            nfs-csi        35m
default     www-nginx-3   Bound    pvc-d68c8b68-d22e-4358-a433-a0673085ab61   1Gi        RWO            nfs-csi        78s
[root@k8s-master k8s]# kubectl get pv -n default
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                 STORAGECLASS   REASON   AGE
pvc-4cfd4f89-5b38-4b9f-8bee-43bd4e595818   1Gi        RWO            Delete           Bound    default/www-nginx-2   nfs-csi                 35m
pvc-90dbbcb8-b898-42d2-9327-6478336e6d56   1Gi        RWO            Delete           Bound    default/www-nginx-0   nfs-csi                 41m
pvc-d68c8b68-d22e-4358-a433-a0673085ab61   1Gi        RWO            Delete           Bound    default/www-nginx-3   nfs-csi                 82s
pvc-fd5ec099-ab03-4f45-89fb-ec3305b93ca1   1Gi        RWO            Delete           Bound    default/www-nginx-1   nfs-csi                 37m
[root@k8s-master k8s]#


节点缩容

这里我们缩容为1个节点
可以直接修改stsfullset的yaml文件 replicas字段 我们这里修改为1
kubectl apply -f nginx-statefullset.yam

[root@k8s-master k8s]# kubectl get pod -A -l app=myapp
NAMESPACE   NAME      READY   STATUS    RESTARTS   AGE
default     nginx-0   1/1     Running   0          42m
[root@k8s-master k8s]#

节点缩容后需要手动删除所关联的pv和Pvc,如果pv是deleted模式则不需要
这是为了缩容的节点后续如果想扩容回来有意设定的,所以pv的删除策略最好是retain

[root@k8s-master k8s]# kubectl get pvc -A -l app=myapp
NAMESPACE   NAME          STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
default     www-nginx-0   Bound    pvc-90dbbcb8-b898-42d2-9327-6478336e6d56   1Gi        RWO            nfs-csi        71m
default     www-nginx-1   Bound    pvc-fd5ec099-ab03-4f45-89fb-ec3305b93ca1   1Gi        RWO            nfs-csi        67m
default     www-nginx-2   Bound    pvc-4cfd4f89-5b38-4b9f-8bee-43bd4e595818   1Gi        RWO            nfs-csi        66m
default     www-nginx-3   Bound    pvc-d68c8b68-d22e-4358-a433-a0673085ab61   1Gi        RWO            nfs-csi        31m
[root@k8s-master k8s]# kubectl delete pvc -n default www-nginx-1
persistentvolumeclaim "www-nginx-1" deleted
[root@k8s-master k8s]# kubectl delete pvc -n default www-nginx-2
persistentvolumeclaim "www-nginx-2" deleted
[root@k8s-master k8s]# kubectl delete pvc -n default www-nginx-3
persistentvolumeclaim "www-nginx-3" deleted
[root@k8s-master k8s]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                 STORAGECLASS   REASON   AGE
pvc-90dbbcb8-b898-42d2-9327-6478336e6d56   1Gi        RWO            Delete           Bound    default/www-nginx-0   nfs-csi                 72m
[root@k8s-master k8s]#

这里需要注意这个headless没有cluster-ip 他的作用是给各个节点分配名称,如果要以server的模式访问的话需要再建立一个
type为CLUSTER-IP的service 为这个控制器提供服务

[root@k8s-master ~]# kubectl get svc -n default
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   29h
myapp        ClusterIP   None         <none>        80/TCP    9s
[root@k8s-master ~]#

operator编排有状态应用

因为如果编排像mysql redis这样的类似主从的集群直接使用statefullset还是非常困难的,需要写大量的配置,operator 就是
基于deployment,statefullset,service 在上边加了一些各种应用专有的业务代码

应用专有的operator会有额外的资源类型 比如 kind:kafka 下边需要写比如zookeeper有几个节点等,在这个zookeeper的基础
之上自动创建kafka集群

operator官网地址 https://operatorhub.io/

kubectl访问k8s服务端请求与执行原理

在这里插入图片描述

创建修改删除的流程

当kubectl访问比如创建一个yaml文件然后kubectl apply -f $yamlfile 时,会把这个yaml文件转换为json去访问apiserver,
apiserver会检查认证和权限 检查完毕后如果有相应的权限,会把数据存储到etcd 然后把请求发送到kubelet上,kubelet也会检查认证和权限,检查通过后 调用docker api 对pod或者容器做一些增删查改的操作并且watch到变化后将实例的状态实时上报给apiserver,再把结果返回给用户

查询原理

当kubectl 执行查询操作的比如 kubectl get pod -A 会发请求到apiserver apiserver会检查权限和认证,通过后会直接访问etcd查询 查询后将结果返回给用户

k8s认证系统

kubelet 控制平面中的几个组件pod,均为静态pod,其配置文件在/etc/kubernetes/manifests
还会持续监控配置文件中资源的变动,而我们如果需要定义认证类型 就需要在这个目录下修改kube-apiserver.yaml
所以一般情况下建议现在其他路径修改完 再把它一次性替换回来
修改kube-apiserver 其他配置pod会自动重载,如果修改令牌文件pod需要手动重启才能生效

[root@k8s-master manifests]# pwd
/etc/kubernetes/manifests
[root@k8s-master manifests]# ls
etcd.yaml  kube-apiserver.yaml  kube-controller-manager.yaml  kube-scheduler.yaml
[root@k8s-master manifests]#

[root@k8s-master manifests]# cp kube-apiserver.yaml /root/
[root@k8s-master ~]# cd /root
[root@k8s-master ~]# vim kube-apiserver.yaml
spec:
  containers:
  - command:
    - kube-apiserver
    - --advertise-address=192.168.62.131
    - --allow-privileged=true
    - --authorization-mode=Node,RBAC
    - --client-ca-file=/etc/kubernetes/pki/ca.crt # 启用证书客户端认证
    - --enable-admission-plugins=NodeRestriction
    - --enable-bootstrap-token-auth=true # bootstrap 令牌认证
    - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
    - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
    - --etcd-servers=https://127.0.0.1:2379
    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
    - --requestheader-allowed-names=front-proxy-client
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --requestheader-extra-headers-prefix=X-Remote-Extra-
    - --requestheader-group-headers=X-Remote-Group
    - --requestheader-username-headers=X-Remote-User
    - --secure-port=6443 # apiserver服务监听哪个端口
    - --service-account-issuer=https://kubernetes.default.svc.cluster.local
    - --service-account-key-file=/etc/kubernetes/pki/sa.pub # 
    - --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
    - --service-cluster-ip-range=10.96.0.0/12
    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
    image: registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.1

在外部可以访问的kubelet api接口

在这里插入图片描述
k8s集群证书目录

[root@k8s-master pki]# ll /etc/kubernetes/pki/ca.crt
-rw-r--r-- 1 root root 1099 1027 15:10 /etc/kubernetes/pki/ca.crt
[root@k8s-master pki]#

kubelet 配置文件

[root@k8s-master kubelet]# ll /var/lib/kubelet/config.yaml
-rw-r--r-- 1 root root 970 1027 15:10 /var/lib/kubelet/config.yaml
[root@k8s-master kubelet]#

kubelet认证 认证通过则进入权限认证,认证失败则使用匿名用户访问,可以把匿名用户关掉

[root@k8s-master kubelet]# cat config.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false # 是否启用匿名用户
  webhook:
    cacheTTL: 0s
    enabled: true # 是否启动webhook认证
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt # 这个根证书签发的所有证书都可以通过认证
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
  flushFrequency: 0
  options:
    json:
      infoBufferSize: "0"
  verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
[root@k8s-master kubelet]#

使用k8s api访问名称空间级别的资源

访问格式:
https://API_SERVER:HOST/api/v1/namespaces/<NS_NAME>/<RESOURCE_NAME>/[<OBJECT_NAME>]

例如:访问default名称空间下的mypod资源
curl https://API_SERVER/api/v1/namespaces/default/pods/mypod
[root@k8s-master kubelet]# curl -k https://127.0.0.1:6443/api/v1/namespaces/default/nginx-0
[root@k8s-master kubelet]# curl -k https://127.0.0.1:6443/api/v1/namespaces/kube-system/pods/
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "pods is forbidden: User \"system:anonymous\" cannot list resource \"pods\" in API group \"\" in the namespace \"kube-system\"",
  "reason": "Forbidden",
  "details": {
    "kind": "pods"
  },
  "code": 403
}[root@k8s-master kubelet]#

访问发现403请求被拒绝 原因:

  • 当前用户被识别为匿名用户
  • 我们在/etc/kubernetes/manifests/kube-apiserver.yaml 定义不允许使用匿名用户访问资源,所以请求被拒绝
  • 定义k8s用户

    创建token.csv在/etc/kubernetes/manifests/auth-token/

    [root@k8s-master auth-token]# ls /etc/kubernetes/manifests/auth-token
    token.csv
    [root@k8s-master auth-token]#
    

    使用openssl生成随机token

    [root@k8s-master auth-token]# echo "$(openssl rand -hex 3).$(openssl rand -hex 8)"
    ae20f8.25a0d2aab45c759e
    [root@k8s-master auth-token]#
    

    定义token.csv文件

    [root@k8s-master auth-token]# cat token.csv
    b2f955.64500dd951774c7d,tom,1001,kubusers
    85a767.655200145ab133f9,jerry,1002,kubeusers
    ab8dd7.321d5e33c3e7cbbb,zhangsan,1003,kubeadmin
    [root@k8s-master auth-token]#
    

    修改kube-apiserver.yaml文件
    指定token.csv文件

    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
        - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
        - --token-auth-file=/etc/kubernetes/manifests/auth-token/token.csv
        - --etcd-servers=https://127.0.0.1:2379
    
    

    添加这个csv的路径挂载

     - hostPath:
          path: /etc/kubernetes/manifests/auth-token/
          type: DirectoryOrCreate
        name: auth-token
    
     - mountPath: /etc/kubernetes/manifests/auth-token/
          name: auth-token
          readOnly: true
    
    

    修改完毕后保存退出 apiserver会自动重新加载

    再次访问a8s api需要在请求头里带上我们刚才定义好的token信息

    访问还是被拒绝 原因是tom用户认证通过了 但是该用户没有查看namespace列表的权限
    这里显示了tom用户说明apiserver 已经可以把这个token反序列成用户model类了

    C:\Users\Administrator>curl -H "Authorization: Bearer b2f955.64500dd951774c7d" -k https://192.168.62.131:6443/api/v1/namespaces/
    {
      "kind": "Status",
      "apiVersion": "v1",
      "metadata": {},
      "status": "Failure",
      "message": "namespaces is forbidden: User \"tom\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope",
    

    k8s的授权策略

  • ABAC:通过将属性(包括资源属性,用户属性,对象和属性等)组合在一起的策略,将访问权限授予用户
  • RBAC:什么用户在什么用户组,什么用户组有什么样的权限,并且某个用户组跟某个namespace建立连接
  • Webhoob:用于支持外部权限系统的集成
  • NODE:专用于kubelet的权限策略
  • 在apiserver的yaml文件里上可以定义集群要用到的授权策略是什么
    默认为node,rbac

    [root@k8s-master ~]# less /etc/kubernetes/manifests/kube-apiserver.yaml
    apiVersion: v1
    kind: Pod
    metadata:
      annotations:
        kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.62.131:6443
      creationTimestamp: null
      labels:
        component: kube-apiserver
        tier: control-plane
      name: kube-apiserver
      namespace: kube-system
    spec:
      containers:
      - command:
        - kube-apiserver
        - --advertise-address=192.168.62.131
        - --allow-privileged=true
        - --authorization-mode=Node,RBAC # 定义集群的授权策略
        - --client-ca-file=/etc/kubernetes/pki/ca.crt
        - --enable-admission-plugins=NodeRestriction
        - --enable-bootstrap-token-auth=true
        - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
        - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
        - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
        - --token-auth-file=/etc/kubernetes/manifests/auth-token/token.csv
        - --etcd-servers=https://127.0.0.1:2379
        - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
    
    

    rbac可以给用户组授予的权限

  • 只读操作: watch get list
  • 读写操作: create update patch delete deletecollection(删除某个类型下所有的资源)
  • 非资源型端点:只有get权限
  • 角色的类型

  • Cluster级别:称为clusterRole定义集群范围内的资源操作集合,包括集群级别及名称空间级别下的资源操作的权限集合
  • Namespace级别:成为Role定义名称空间级别下的资源操作权限的集合
  • 角色绑定类型

  • cluster级别:称为ClusterRoleBinding,可以将实体(user,group)关联至clusterRole
  • namespace级别,称为RoleBinding,可以将实体(user,group)关联至Role或者clusterRole
  • 通常使用命令行--dry-run 生成yaml文件再修改是比较妥当的做法
  • 其中--verb 写该角色拥有的权限 --resource参数是可以操作哪些资源,因为创建的是role所以只能填写名称空间级别的资源类型,reader为该role的名称
  • [root@k8s-master ~]# kubectl create role reader --verb=get,list,watch --resource=pods,services -o yaml --dry-run=client
    apiVersion: rbac.authorization.k8s.io/v1
    kind: Role
    metadata:
      creationTimestamp: null
      name: reader
    rules:
    - apiGroups:
      - ""
      resources:
      - pods
      - services
      verbs:
      - get
      - list
      - watch
    [root@k8s-master ~]# kubectl create role reader --verb=get,list,watch --resource=pods,services -o yaml --dry-run=client > reader_role_services_pods.yaml
    [root@k8s-master ~]# kubectl apply -f reader_role_services_pods.yaml --namespace=default
    role.rbac.authorization.k8s.io/reader created
    [root@k8s-master ~]# kubectl get role -n default
    NAME     CREATED AT
    reader   2023-10-30T14:38:49Z
    [root@k8s-master ~]#
    

    将他创建在default名称空间下,就意味着他可以对default名称空间下的资源做get list watch的操作

    定义clusterrole资源

  • 集群级别的资源:stsfullset,namespace,deployment,daemonset,pv,pvc,storageclass
  • [root@k8s-master ~]# kubectl create clusterrole reader-clusterrole --verb=get,list,watch --resource=pv,pvc,deployment -oyaml --dry-run=client
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      creationTimestamp: null
      name: reader-clusterrole
    rules:
    - apiGroups:
      - ""
      resources:
      - persistentvolumes
      - persistentvolumeclaims
      verbs:
      - get
      - list
      - watch
    - apiGroups:
      - apps
      resources:
      - deployments
      verbs:
      - get
      - list
      - watch
    [root@k8s-master ~]# kubectl create clusterrole reader-clusterrole --verb=get,list,watch --resource=pv,pvc,deployment -oyaml --dry-run=client > clusterrole.yaml
    [root@k8s-master ~]# kubectl apply -f clusterrole.yaml
    clusterrole.rbac.authorization.k8s.io/reader-clusterrole created
    [root@k8s-master ~]# kubectl get clusterrole
    

    不需要指定名称空间因为他本身属于集群级别的资源

    添加用户到用户组

    首先在Node节点使用jerry用户的token访问apiserver 认证成功 但是没有权限

    [root@k8s-node02 ~]# curl -H "Authorization: Bearer 85a767.655200145ab133f9" -k https://192.168.62.131:6443/api/v1/namespaces/default/
    {
      "kind": "Status",
      "apiVersion": "v1",
      "metadata": {},
      "status": "Failure",
      "message": "namespaces is forbidden: User \"jerry\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope",
      "reason": "Forbidden",
      "details": {
        "kind": "namespaces"
      },
      "code": 403
    }[root@k8s-node02 ~]#
    
    

    没有对delault名称空间的访问权限,所以需要使用roleboing给他加入刚才创建的render role里

  • 创建role:Usage: kubectl create role NAME --verb=verb --resource=resource.group/subresource [--resource-name=resourcename] [--dry-run=server|client|none] [options]
  • 创建rolebinding:Usage: kubectl create rolebinding NAME --clusterrole=NAME|--role=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run=server|client|none] [options]
  • # 创建格式
    Usage:
      kubectl create role NAME --verb=verb --resource=resource.group/subresource [--resource-name=resourcename]
    [--dry-run=server|client|none] [options]
    
    Use "kubectl options" for a list of global command-line options (applies to all commands).
    [root@k8s-master k8s-rbac]#
    # 先生成模板查看没问题
    [root@k8s-master k8s-rbac]# kubectl create role nfs-role --verb=get,list,watch --resource=pods,services -oyaml --dry-run=client
    apiVersion: rbac.authorization.k8s.io/v1
    kind: Role
    metadata:
      creationTimestamp: null
      name: nfs-role
    rules:
    - apiGroups:
      - ""
      resources:
      - pods
      - services
      verbs:
      - get
      - list
      - watch
    [root@k8s-master k8s-rbac]#
    # 再指定名称空间 表示这个角色将操作该名称空间下的资源 权限为 get list watch
    [root@k8s-master k8s-rbac]# kubectl create -f nfs-role.yaml --namespace=nfs
    role.rbac.authorization.k8s.io/nfs-role created
    [root@k8s-master k8s-rbac]#
    # 创建rolebinding 格式如下:
    Usage:
      kubectl create rolebinding NAME --clusterrole=NAME|--role=NAME [--user=username] [--group=groupname]
    [--serviceaccount=namespace:serviceaccountname] [--dry-run=server|client|none] [options]
    # 先使用dry-run 查看没问题
    [root@k8s-master k8s-rbac]# kubectl create rolebinding nfs-rolebinding --role=nfs-role --username=tom --group=kubeusers -oyaml --dry-run=client
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      creationTimestamp: null
      name: nfs-rolebinding
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: nfs-role
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: Group
      name: kubeusers
    [root@k8s-master k8s-rbac]#
    # 这里因为 dry-run的时候没有指定名称空间 所以生成后需要去yaml文件里改一下namespace(dry-run后一定要细心检查)
    [root@k8s-master k8s-rbac]# kubectl apply -f nfs-tom-rolebinding --namespace=nfs
    rolebinding.rbac.authorization.k8s.io/nfs-tom-rolebinding created
    [root@k8s-master k8s-rbac]#
    # 在其他节点使用tom的token 访问nfs名称空间下所有pod
    [root@k8s-node02 ~]# curl -H "Authorization: Bearer b2f955.64500dd951774c7d" -k https://192.168.62.131:6443/api/v1/namespaces/nfs/pods/nfs-server-594768d8b8-4qtq7
    {
      "kind": "Pod",
      "apiVersion": "v1",
      "metadata": {
        "name": "nfs-server-594768d8b8-4qtq7",
        "generateName": "nfs-server-594768d8b8-",
        "namespace": "nfs",
        "uid": "f7539c86-bc33-47c6-99b8-372c85ff1fbc",
        "resourceVersion": "141263",
        "creationTimestamp": "2023-10-30T12:49:35Z",
        "labels": {
          "app": "nfs-server",
          "pod-template-hash": "594768d8b8"
        },
        "ownerReferences": [
          {
            "apiVersion": "apps/v1",
            "kind": "ReplicaSet",
            "name": "nfs-server-594768d8b8",
            "uid": "3bcf96c5-8e80-47b8-915b-d5205d756ad3",
            "controller": true,
            "blockOwnerDeletion": true
          }
        ],
    # 可以正常拿到数据
    

    创建clusterrolebinding

    用于交互式用户授权目的的几个角色

  • cluster-admin:可以访问操作集群级别的资源,如果给予的是一个名称空间资源的范围 那么他将可以访问名称空间级别的资源 以及集群级别的资源,包括名称空间本身
  • admin:可以访问操作名称空间级别的资源,但不包括名称空间本身
  • edit:接近于admin的权限,支持对名称空间内大多数对象进行读/写操作,包括Secret,但不允许查看或者修改role以及rolebinding
  • view:允许已只读的方式访问名称空间内中的大多数资源,但不包括role,rolebinding和Secret
    创建rolebinding 指定clusterrole为cluster-admin
    
  • # 格式
    Usage:
      kubectl create rolebinding NAME --clusterrole=NAME|--role=NAME [--user=username] [--group=groupname]
    [--serviceaccount=namespace:serviceaccountname] [--dry-run=server|client|none] [options]
    
    Use "kubectl options" for a list of global command-line options (applies to all commands)
    [root@k8s-master auth-token]# kubectl create rolebinding admin-lisi-rolebind --clusterrole=cluster-admin --user=lisi --group=kubeadmin -n test1 -oyaml --dry-run=client
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      creationTimestamp: null
      name: admin-lisi-rolebind
      namespace: test1
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: cluster-admin
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: User
      name: lisi
    - apiGroup: rbac.authorization.k8s.io
      kind: Group
      name: kubeadmin
    [root@k8s-master auth-token]#
    [root@k8s-master auth-token]# kubectl apply -f admin-lisi-rolebind.yaml
    Warning: resource rolebindings/admin-lisi-rolebind is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
    rolebinding.rbac.authorization.k8s.io/admin-lisi-rolebind configured
    [root@k8s-master auth-token]#
    可以正常访问
    [root@k8s-node02 ~]# curl -H "Authorization: Bearer ab8dd7.321d5e33c3e7cbbb" -k https://192.168.62.131:6443/api/v1/namespaces/test1/services/
    {
      "kind": "ServiceList",
      "apiVersion": "v1",
      "metadata": {
        "resourceVersion": "165973"
      },
      "items": []
    }[root@k8s-node02 ~]#
    

    serviceaccount

    创建serviceaccount命令格式:
    Use “kubectl options” for a list of global command-line options (applies to all commands).

    [root@k8s-master learning-k8s]# kubectl create serviceaccount sa-test
    serviceaccount/sa-test created
    [root@k8s-master learning-k8s]#
    

    创建serviceaccountk8s会默认创建一个secret 存放token信息,和ca.crt等信息

    [root@k8s-master learning-k8s]# kubectl get secrets sa-test-token-kdzjj -oyaml
    apiVersion: v1
    data:
      ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1UQXlOekEzTVRBeE5Gb1hEVE16TVRBeU5EQTNNVEF4TkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTGRxCjl5Nld0eUt2ZTZvREhzQmIzQXhQcE5kN3VjdUdQUzZSOUZmNkVhaDJlUEpXZWZKRUcvY3NEeVlUeW03N0hkUHcKSDUzYkdQWGw1OHY0aWdDYldHeFBhaFUzRldUV3BBV2tReFpUbGFBU3IyNUo5c1VXc1RkMkxaTmd1VzBkaFJUcQp0aXUxbDh3U2lqZzUyVUVVdjRmWGREaEVIaVFsSmFjclVZUVRSNU5Zd2ppblJxYjNaV1hiVm1ZcWsrQk9UMW8vCmdXbGhhMXl1RDZ4S3Yyd2t0ejhFUTVmUGRSVzQxbFJaSnAvMGlXcm1wMnhrOG1ycFBnTmFEYUFvWEJPNTVSa0gKaHFWZHVKcWFDZmljeFhvNkpIQmM0VXZ0ZWdkcUM1Wm0zZVkxajV1d05VUnRSYWNQRHlRUDZ2KytHSDBRRW9xVgpPUGdqa0owRVNxZXdFZDhFSFZjQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZHVUVrN0FXY094NGNGMmwxT01TcnNZNGpJczlNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRkNINE1hNUlzWWJUdzU4NFJSYQpYek5pdHFmUUlOY3kzTC95R3FXNlFoQ1gxdmh3aHNCcGEzZnNLTU0yRm95QjZ3MzZTdnA3U0xGOUNWUzlUbkVVCnRnbXFjZWFqdnJxaXBRaUpTOGsyMDc2WDh3Y05zQUxsZ21BaHI5aSt6L21IMDRmOWpuNjI3ZUFnZXpmMldOdWQKZ3pQQUlrbmg1Q0ZHRW9OblZBZklUdDNTMG81SVFMbkFVcjhMUG1TN1U4RldkM0ZEK0w5WWpHWkJCYlZzZGF5TwpqMXExS1RDaXZtdHlLTkFUYllWSmp5OHhNWm9NZVpxOWh2aEpPSmNDVWpXK2hkTE4rRVh3ckJOOTZRdWkzTmZnCnFMTER3ajdnbkZOWms4cGQ4bmVGcGtJMEI1T3JaN2xEWFp0K2ZBTDZqTHo0Y1NJUFJPVERLelZkZk9ITG1raDIKQW1NPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
      namespace: ZGVmYXVsdA==
      token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklsOWZjakpCVFZwR2JEaHJkM1ZDU20wek5VODNRalZDTFhaYVIyRnFVbUp4ZFc1QlpIWkdXVlZFYjAwaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUprWldaaGRXeDBJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpXTnlaWFF1Ym1GdFpTSTZJbk5oTFhSbGMzUXRkRzlyWlc0dGEyUjZhbW9pTENKcmRXSmxjbTVsZEdWekxtbHZMM05sY25acFkyVmhZMk52ZFc1MEwzTmxjblpwWTJVdFlXTmpiM1Z1ZEM1dVlXMWxJam9pYzJFdGRHVnpkQ0lzSW10MVltVnlibVYwWlhNdWFXOHZjMlZ5ZG1salpXRmpZMjkxYm5RdmMyVnlkbWxqWlMxaFkyTnZkVzUwTG5WcFpDSTZJalkwTTJSbE5EVXpMVFZsTTJZdE5EWTNZaTA1WmpWaUxXTmhaVFUyTmpVME5HUTVPQ0lzSW5OMVlpSTZJbk41YzNSbGJUcHpaWEoyYVdObFlXTmpiM1Z1ZERwa1pXWmhkV3gwT25OaExYUmxjM1FpZlEuVTl3T3RWZmRhSDZwZ2JiSDRvbVNmNWtxVzRHdHFORm1oNU1aZ19saU9nbEN5cllWTmVvc0VBZWJZOWpuWGM4REFuVG9uTlpIRkl3Q3hDc2pvM3NaMHNDMk00Y2VmOEpsNDByRnpiSnhXbW9OWm1la3l0YlZyT2ZhTXdLMkttWTgxM29ncGhmMnh2WVhqNnY4MU1xZjNsaGNWbGRLcmMwYjh4aG9BM3N0Yi1ON1c1Z2lZaHVRcUZ0YjR0TnJScEp6Sm1GYXJtQXVEUjhDS204YnN3Z2lYbTBfUGVTM0pYNkV1S0UtYzVjY2Q2Z2VyUE1jUW01dl9mN0ZYT0pma0NGcXlQTUluSGdlMnRKRXFrbmR4NExmZmtWVFUwM1dmNXVPeTBncVBCdG5sQXItQVJ1N0wxZDBLMG40ZE1YcTFoS1R6b1Y0MXRrWVgxRE9sQUFPTEFLQVhR
    kind: Secret
    metadata:
      annotations:
        kubernetes.io/service-account.name: sa-test
        kubernetes.io/service-account.uid: 643de453-5e3f-467b-9f5b-cae566544d98
      creationTimestamp: "2023-11-10T15:03:11Z"
      name: sa-test-token-kdzjj
      namespace: default
      resourceVersion: "314819"
      uid: 04ae1444-f9d3-4655-b262-49a122432f0c
    type: kubernetes.io/service-account-token
    [root@k8s-master learning-k8s]#
    

    因为这里的token信息是加密的,所以我们把token复制出来 使用base64命令即可把它解密

    [root@k8s-master learning-k8s]# token="ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklsOWZjakpCVFZwR2JEaHJkM1ZDU20wek5VODNRalZDTFhaYVIyRnFVbUp4ZFc1QlpIWkdXVlZFYjAwaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUprWldaaGRXeDBJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpXTnlaWFF1Ym1GdFpTSTZJbk5oTFhSbGMzUXRkRzlyWlc0dGEyUjZhbW9pTENKcmRXSmxjbTVsZEdWekxtbHZMM05sY25acFkyVmhZMk52ZFc1MEwzTmxjblpwWTJVdFlXTmpiM1Z1ZEM1dVlXMWxJam9pYzJFdGRHVnpkQ0lzSW10MVltVnlibVYwWlhNdWFXOHZjMlZ5ZG1salpXRmpZMjkxYm5RdmMyVnlkbWxqWlMxaFkyTnZkVzUwTG5WcFpDSTZJalkwTTJSbE5EVXpMVFZsTTJZdE5EWTNZaTA1WmpWaUxXTmhaVFUyTmpVME5HUTVPQ0lzSW5OMVlpSTZJbk41YzNSbGJUcHpaWEoyYVdObFlXTmpiM1Z1ZERwa1pXWmhkV3gwT25OaExYUmxjM1FpZlEuVTl3T3RWZmRhSDZwZ2JiSDRvbVNmNWtxVzRHdHFORm1oNU1aZ19saU9nbEN5cllWTmVvc0VBZWJZOWpuWGM4REFuVG9uTlpIRkl3Q3hDc2pvM3NaMHNDMk00Y2VmOEpsNDByRnpiSnhXbW9OWm1la3l0YlZyT2ZhTXdLMkttWTgxM29ncGhmMnh2WVhqNnY4MU1xZjNsaGNWbGRLcmMwYjh4aG9BM3N0Yi1ON1c1Z2lZaHVRcUZ0YjR0TnJScEp6Sm1GYXJtQXVEUjhDS204YnN3Z2lYbTBfUGVTM0pYNkV1S0UtYzVjY2Q2Z2VyUE1jUW01dl9mN0ZYT0pma0NGcXlQTUluSGdlMnRKRXFrbmR4NExmZmtWVFUwM1dmNXVPeTBncVBCdG5sQXItQVJ1N0wxZDBLMG40ZE1YcTFoS1R6b1Y0MXRrWVgxRE9sQUFPTEFLQVhR"
    [root@k8s-master learning-k8s]#
    [root@k8s-master learning-k8s]# echo $token | base64 -d && echo
    eyJhbGciOiJSUzI1NiIsImtpZCI6Il9fcjJBTVpGbDhrd3VCSm0zNU83QjVCLXZaR2FqUmJxdW5BZHZGWVVEb00ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InNhLXRlc3QtdG9rZW4ta2R6amoiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoic2EtdGVzdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjY0M2RlNDUzLTVlM2YtNDY3Yi05ZjViLWNhZTU2NjU0NGQ5OCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OnNhLXRlc3QifQ.U9wOtVfdaH6pgbbH4omSf5kqW4GtqNFmh5MZg_liOglCyrYVNeosEAebY9jnXc8DAnTonNZHFIwCxCsjo3sZ0sC2M4cef8Jl40rFzbJxWmoNZmekytbVrOfaMwK2KmY813ogphf2xvYXj6v81Mqf3lhcVldKrc0b8xhoA3stb-N7W5giYhuQqFtb4tNrRpJzJmFarmAuDR8CKm8bswgiXm0_PeS3JX6EuKE-c5ccd6gerPMcQm5v_f7FXOJfkCFqyPMInHge2tJEqkndx4LffkVTU03Wf5uOy0gqPBtnlAr-ARu7L1d0K0n4dMXq1hKTzoV41tkYX1DOlAAOLAKAXQ
    [root@k8s-master learning-k8s]#
    

    使用这个token在Node02访问apiserver,这里报错说明认证通过但是权限被拒绝了

    [root@k8s-node02 ~]# token="eyJhbGciOiJSUzI1NiIsImtpZCI6Il9fcjJBTVpGbDhrd3VCSm0zNU83QjVCLXZaR2FqUmJxdW5BZHZGWVVEb00ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InNhLXRlc3QtdG9rZW4ta2R6amoiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoic2EtdGVzdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjY0M2RlNDUzLTVlM2YtNDY3Yi05ZjViLWNhZTU2NjU0NGQ5OCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OnNhLXRlc3QifQ.U9wOtVfdaH6pgbbH4omSf5kqW4GtqNFmh5MZg_liOglCyrYVNeosEAebY9jnXc8DAnTonNZHFIwCxCsjo3sZ0sC2M4cef8Jl40rFzbJxWmoNZmekytbVrOfaMwK2KmY813ogphf2xvYXj6v81Mqf3lhcVldKrc0b8xhoA3stb-N7W5giYhuQqFtb4tNrRpJzJmFarmAuDR8CKm8bswgiXm0_PeS3JX6EuKE-c5ccd6gerPMcQm5v_f7FXOJfkCFqyPMInHge2tJEqkndx4LffkVTU03Wf5uOy0gqPBtnlAr-ARu7L1d0K0n4dMXq1hKTzoV41tkYX1DOlAAOLAKAXQ"
    [root@k8s-node02 ~]#
    [root@k8s-node02 ~]# kubectl --insecure-skip-tls-verify=true -s https://192.168.62.131:6443 --token=${token} get pods
    Error from server (Forbidden): pods is forbidden: User "system:serviceaccount:default:sa-test" cannot list resource "pods" in API group "" in the namespace "default"
    [root@k8s-node02 ~]#
    

    可以通过使用 clusterrolebinding 来赋予该sa权限

    # 查看sa
    [root@k8s-master learning-k8s]# kubectl get sa
    NAME      SECRETS   AGE
    default   1         14d
    mysa      1         65m
    sa-test   1         48m
    # 创建clusterrolebinding绑定默认 clusterrole为admin
    [root@k8s-master learning-k8s]#
    [root@k8s-master learning-k8s]# kubectl create clusterrolebinding ClusterRoleSaTest --clusterrole=admin --serviceaccount=default:sa-test --dry-run=client -oyaml > ClusterRoleSaTest.yaml
    [root@k8s-master learning-k8s]# kubectl apply -f ClusterRoleSaTest.yaml
    clusterrolebinding.rbac.authorization.k8s.io/ClusterRoleSaTest created
    [root@k8s-master learning-k8s]#
    # 查看该sa自动生成的token
    [root@k8s-master learning-k8s]# kubectl get secrets sa-test-token-kdzjj -oyaml
    apiVersion: v1
    data:
      ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1UQXlOekEzTVRBeE5Gb1hEVE16TVRBeU5EQTNNVEF4TkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTGRxCjl5Nld0eUt2ZTZvREhzQmIzQXhQcE5kN3VjdUdQUzZSOUZmNkVhaDJlUEpXZWZKRUcvY3NEeVlUeW03N0hkUHcKSDUzYkdQWGw1OHY0aWdDYldHeFBhaFUzRldUV3BBV2tReFpUbGFBU3IyNUo5c1VXc1RkMkxaTmd1VzBkaFJUcQp0aXUxbDh3U2lqZzUyVUVVdjRmWGREaEVIaVFsSmFjclVZUVRSNU5Zd2ppblJxYjNaV1hiVm1ZcWsrQk9UMW8vCmdXbGhhMXl1RDZ4S3Yyd2t0ejhFUTVmUGRSVzQxbFJaSnAvMGlXcm1wMnhrOG1ycFBnTmFEYUFvWEJPNTVSa0gKaHFWZHVKcWFDZmljeFhvNkpIQmM0VXZ0ZWdkcUM1Wm0zZVkxajV1d05VUnRSYWNQRHlRUDZ2KytHSDBRRW9xVgpPUGdqa0owRVNxZXdFZDhFSFZjQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZHVUVrN0FXY094NGNGMmwxT01TcnNZNGpJczlNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRkNINE1hNUlzWWJUdzU4NFJSYQpYek5pdHFmUUlOY3kzTC95R3FXNlFoQ1gxdmh3aHNCcGEzZnNLTU0yRm95QjZ3MzZTdnA3U0xGOUNWUzlUbkVVCnRnbXFjZWFqdnJxaXBRaUpTOGsyMDc2WDh3Y05zQUxsZ21BaHI5aSt6L21IMDRmOWpuNjI3ZUFnZXpmMldOdWQKZ3pQQUlrbmg1Q0ZHRW9OblZBZklUdDNTMG81SVFMbkFVcjhMUG1TN1U4RldkM0ZEK0w5WWpHWkJCYlZzZGF5TwpqMXExS1RDaXZtdHlLTkFUYllWSmp5OHhNWm9NZVpxOWh2aEpPSmNDVWpXK2hkTE4rRVh3ckJOOTZRdWkzTmZnCnFMTER3ajdnbkZOWms4cGQ4bmVGcGtJMEI1T3JaN2xEWFp0K2ZBTDZqTHo0Y1NJUFJPVERLelZkZk9ITG1raDIKQW1NPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
      namespace: ZGVmYXVsdA==
      token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklsOWZjakpCVFZwR2JEaHJkM1ZDU20wek5VODNRalZDTFhaYVIyRnFVbUp4ZFc1QlpIWkdXVlZFYjAwaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUprWldaaGRXeDBJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpXTnlaWFF1Ym1GdFpTSTZJbk5oTFhSbGMzUXRkRzlyWlc0dGEyUjZhbW9pTENKcmRXSmxjbTVsZEdWekxtbHZMM05sY25acFkyVmhZMk52ZFc1MEwzTmxjblpwWTJVdFlXTmpiM1Z1ZEM1dVlXMWxJam9pYzJFdGRHVnpkQ0lzSW10MVltVnlibVYwWlhNdWFXOHZjMlZ5ZG1salpXRmpZMjkxYm5RdmMyVnlkbWxqWlMxaFkyTnZkVzUwTG5WcFpDSTZJalkwTTJSbE5EVXpMVFZsTTJZdE5EWTNZaTA1WmpWaUxXTmhaVFUyTmpVME5HUTVPQ0lzSW5OMVlpSTZJbk41YzNSbGJUcHpaWEoyYVdObFlXTmpiM1Z1ZERwa1pXWmhkV3gwT25OaExYUmxjM1FpZlEuVTl3T3RWZmRhSDZwZ2JiSDRvbVNmNWtxVzRHdHFORm1oNU1aZ19saU9nbEN5cllWTmVvc0VBZWJZOWpuWGM4REFuVG9uTlpIRkl3Q3hDc2pvM3NaMHNDMk00Y2VmOEpsNDByRnpiSnhXbW9OWm1la3l0YlZyT2ZhTXdLMkttWTgxM29ncGhmMnh2WVhqNnY4MU1xZjNsaGNWbGRLcmMwYjh4aG9BM3N0Yi1ON1c1Z2lZaHVRcUZ0YjR0TnJScEp6Sm1GYXJtQXVEUjhDS204YnN3Z2lYbTBfUGVTM0pYNkV1S0UtYzVjY2Q2Z2VyUE1jUW01dl9mN0ZYT0pma0NGcXlQTUluSGdlMnRKRXFrbmR4NExmZmtWVFUwM1dmNXVPeTBncVBCdG5sQXItQVJ1N0wxZDBLMG40ZE1YcTFoS1R6b1Y0MXRrWVgxRE9sQUFPTEFLQVhR
    kind: Secret
    metadata:
      annotations:
        kubernetes.io/service-account.name: sa-test
        kubernetes.io/service-account.uid: 643de453-5e3f-467b-9f5b-cae566544d98
      creationTimestamp: "2023-11-10T15:03:11Z"
      name: sa-test-token-kdzjj
      namespace: default
      resourceVersion: "314819"
      uid: 04ae1444-f9d3-4655-b262-49a122432f0c
    type: kubernetes.io/service-account-token
    [root@k8s-master learning-k8s]#
    
    # 将这个token使用base64解码
    [root@k8s-master learning-k8s]# echo $token | base64 -d && echo
    eyJhbGciOiJSUzI1NiIsImtpZCI6Il9fcjJBTVpGbDhrd3VCSm0zNU83QjVCLXZaR2FqUmJxdW5BZHZGWVVEb00ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InNhLXRlc3QtdG9rZW4ta2R6amoiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoic2EtdGVzdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjY0M2RlNDUzLTVlM2YtNDY3Yi05ZjViLWNhZTU2NjU0NGQ5OCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OnNhLXRlc3QifQ.U9wOtVfdaH6pgbbH4omSf5kqW4GtqNFmh5MZg_liOglCyrYVNeosEAebY9jnXc8DAnTonNZHFIwCxCsjo3sZ0sC2M4cef8Jl40rFzbJxWmoNZmekytbVrOfaMwK2KmY813ogphf2xvYXj6v81Mqf3lhcVldKrc0b8xhoA3stb-N7W5giYhuQqFtb4tNrRpJzJmFarmAuDR8CKm8bswgiXm0_PeS3JX6EuKE-c5ccd6gerPMcQm5v_f7FXOJfkCFqyPMInHge2tJEqkndx4LffkVTU03Wf5uOy0gqPBtnlAr-ARu7L1d0K0n4dMXq1hKTzoV41tkYX1DOlAAOLAKAXQ
    [root@k8s-master learning-k8s]#
    

    在node02节点使用这个token访问apiserver测试

    # 访问成功认证通过且有权限
    [root@k8s-node02 ~]# kubectl --insecure-skip-tls-verify=true -s https://192.168.62.131:6443 --token=${token} get pods                                                                                  NAME                           READY   STATUS    RESTARTS       AGE
    demoapp-v10-766d4bc975-r7dfn   1/1     Running   2 (2d3h ago)   6d1h
    demoapp-v11-5bdc87cb7f-7zq2g   1/1     Running   2 (2d3h ago)   6d1h
    [root@k8s-node02 ~]#
    
  • 5
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值