Kubernetes二进制单节点部署

Kubernetes二进制单节点部署

一、环境部署

  • 负载均衡

    Load Balance(Master): 20.0.0.150/24

    Load Balance(Backup): 20.0.0.160/24

  • Master节点

    Master01: 20.0.0.110/24

    Master02:20.0.0.120/24

  • Node节点

    Node01: 20.0.0.130/24

    Node02: 20.0.0.140/24

  • Harbor私有仓库

    Registry: 20.0.0.166/24

官网地址:https://github.com/kubernetes/kubernetes/releases?after=v1.13.1

  • etcd: 使用 ca.pem、server-key.pem、server.pem;
  • kube-apiserver: 使用 ca.pem、server-key.pem、server.pem;
  • kubelet: 使用 ca.pem;
  • kube-proxy: 使用 ca.pem、kube-proxy-key.pem、kube-proxy.pem;
  • kubectl: 使用 ca.pem、admin-key.pem、admin.pem;
  • kube-controller-manager: 使用 ca-key.pem、ca.pem

二、K8S部署

  • Master:20.0.0.110/24 kube-apiserver、kube-controller-manager、kube-scheduler、etcd
  • Node01:20.0.0.130/24 kubelet、kube-proxy、docker、flannel、etcd
  • Node02:20.0.0.140/24 kubelet、kube-proxy、docker、flannel、etcd

三、部署etcd

2.1 master操作

  • etcd证书制作
    [root@localhost ~]# hostnamectl set-hostname master01		(20.0.0.110)
    [root@localhost ~]# su
    [root@master01 ~]# 
    [root@localhost ~]# hostnamectl set-hostname node01		(20.0.0.130)
    [root@localhost ~]# su
    [root@node01 ~]# 
    [root@localhost ~]# hostnamectl set-hostname node02		(20.0.0.140)
    [root@localhost ~]# su
    [root@node02 ~]#
    
    #	创建/root/k8s/目录存放软件包等
    [root@master01 ~]# mkdir k8s
    [root@master01 ~]# cd k8s/
    [root@master01 k8s]# ls 			(将需要用到的软件包放入该目录下)
    etcd-cert.sh  etcd.sh
    
    #	创建/etcd-cert/目录存放生成的各种证书
    [root@master01 k8s]# mkdir etcd-cert
    [root@master01 k8s]# mv etcd-cert.sh etcd-cert
    
    #	下载证书脚本
    [root@master01 k8s]# vim cfssl.sh
    curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
    curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
    curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
    chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
    
    #	使用脚本下载cfssl官方包
    [root@master01 k8s]# bash cfssl.sh
    [root@master01 k8s]# ls /usr/local/bin/
    cfssl  cfssl-certinfo  cfssljson
    
    #	开始制作证书
    cfssl 生成证书工具
    cfssljson 通过传入json文件生成证书
    cfssl-certinfo 查看证书信息
    
    [root@master01 k8s]# cd etcd-cert/
    [root@master01 etcd-cert]# vim etcd-cert.sh 
    [root@master01 etcd-cert]# bash etcd-cert.sh 
    2020/09/30 09:40:57 [INFO] generating a new CA key and certificate from CSR
    2020/09/30 09:40:57 [INFO] generate received request
    2020/09/30 09:40:57 [INFO] received CSR
    2020/09/30 09:40:57 [INFO] generating key: rsa-2048
    2020/09/30 09:40:57 [INFO] encoded CSR
    2020/09/30 09:40:57 [INFO] signed certificate with serial number 410796204422550690335906262042191550817128924636
    2020/09/30 09:40:57 [INFO] generate received request
    2020/09/30 09:40:57 [INFO] received CSR
    2020/09/30 09:40:57 [INFO] generating key: rsa-2048
    2020/09/30 09:40:57 [INFO] encoded CSR
    2020/09/30 09:40:57 [INFO] signed certificate with serial number 274902014881662587964729306950776951928266845738
    2020/09/30 09:40:57 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    
    [root@master01 etcd-cert]# ls *.pem
    ca-key.pem  ca.pem  server-key.pem  server.pem
    
    
  • etcd-cert脚本
    #	定义ca证书
    cat>ca-config.json<<EOF
    {
     "signing": {
      "default": {
       "expiry": "87600h"
      },
      "profiles": {
       "www": {
         "expiry": "87600h",
         "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"   
        ] 
       } 
      }     
     }
    }
    EOF
    #	实现证书签名
    cat > ca-csr.json <<EOF 
    {  
      "CN": "etcd CA",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "Beijing",
          "ST": "Beijing"
        }
      ]
    }
    EOF
    #	生成证书,生成ca-key.pem与ca.pem
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    #	指定etcd三个节点之间的通信验证
    cat > server-csr.json <<EOF
    {
      "CN": "etcd",
      "hosts": [
      "20.0.0.110",
      "20.0.0.130",
      "20.0.0.140"
      ],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "BeiJing",
          "ST": "BeiJing"
        }
      ]
    }
    EOF
    #	生成etcd证书 server-key.pem与server.pem
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
    
  • ETCD二进制部署
    #	下载地址
    https://github.com/etcd-io/etcd/releases
    
    #	将软件包拖入k8s目录下
    [root@master01 etcd-cert]# cd ..
    [root@master01 k8s]# ls
    cfssl.sh   etcd.sh                          flannel-v0.10.0-linux-amd64.tar.gz
    etcd-cert  etcd-v3.3.10-linux-amd64.tar.gz  kubernetes-server-linux-amd64.tar.gz
    
    #	在/opt/下创建目录用于存放配置文件,命令文件,证书
    [root@master01 k8s]# mkdir /opt/etcd/{cfg,bin,ssl} -p
    
    #	解压源码包
    [root@master01 k8s]# tar zxvf etcd-v3.3.10-linux-amd64.tar.gz
    [root@master01 k8s]# mv etcd-v3.3.10-linux-amd64/etcd etcd-v3.3.10-linux-amd64/etcdctl /opt/etcd/bin/
    
    #	拷贝证书
    [root@master01 k8s]# cp etcd-cert/*.pem /opt/etcd/ssl/
    
    #	进入等待状态等待其他节点加入
    [root@master01 k8s]# bash etcd.sh etcd01 20.0.0.110 etcd02=https://20.0.0.130:2380,etcd03=https://20.0.0.140:2380
    
    #	使用另外一个会话打开,会发现etcd进程已经开启
    [root@master01 ~]# ps -ef | grep etcd
    root      22372  21700  0 09:47 pts/1    00:00:00 bash etcd.sh etcd01 20.0.0.110 etcd02=https://20.0.0.130:2380,etcd03=https://20.0.0.140:2380
    root      22420  22372  0 09:47 pts/1    00:00:00 systemctl restart etcd
    root      22426      1  1 09:47 ?        00:00:00 /opt/etcd/bin/etcd --name=etcd01 --data-dir=/var/lib/etcd/default.etcd --listen-peer-urls=https://20.0.0.110:2380 --listen-client-urls=https://20.0.0.110:2379,http://127.0.0.1:2379 --advertise-client-urls=https://20.0.0.110:2379 --initial-advertise-peer-urls=https://20.0.0.110:2380 --initial-cluster=etcd01=https://20.0.0.110:2380,etcd02=https://20.0.0.130:2380,etcd03=https://20.0.0.140:2380 --initial-cluster-token=etcd-cluster --initial-cluster-state=new --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --peer-cert-file=/opt/etcd/ssl/server.pem --peer-key-file=/opt/etcd/ssl/server-key.pem --trusted-ca-file=/opt/etcd/ssl/ca.pem --peer-trusted-ca-file=/opt/etc/ssl/ca.pem
    root      22484  22441  0 09:47 pts/2    00:00:00 grep --color=auto etcd
    
    #	拷贝证书去其他节点
    [root@master01 k8s]# scp -r /opt/etcd/ root@20.0.0.130:/opt/
    [root@master01 k8s]# scp -r /opt/etcd/ root@20.0.0.140:/opt/
    
    #	启动脚本拷贝到其他节点
    [root@master01 k8s]# scp /usr/lib/systemd/system/etcd.service root@20.0.0.130:/usr/lib/systemd/system/
    [root@master01 k8s]# scp /usr/lib/systemd/system/etcd.service root@20.0.0.140:/usr/lib/systemd/system/
    
    #	在Node01节点修改
    [root@node01 ~]# vim /opt/etcd/cfg/etcd
    #[Member]
    ETCD_NAME="etcd02"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://20.0.0.130:2380"
    ETCD_LISTEN_CLIENT_URLS="https://20.0.0.130:2379"
    
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://20.0.0.130:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://20.0.0.130:2379"
    ETCD_INITIAL_CLUSTER="etcd01=https://20.0.0.110:2380,etcd02=https://20.0.0.130:2380,etcd03=https://20.0.0.140:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    [root@node01 ~]# systemctl start etcd.service
    [root@node01 ~]# systemctl status etcd.service
    
    #	在Node02节点修改
    [root@node02 ~]# vim /opt/etcd/cfg/etcd
    #[Member]
    ETCD_NAME="etcd03"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://20.0.0.140:2380"
    ETCD_LISTEN_CLIENT_URLS="https://20.0.0.140:2379"
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https:// 20.0.0.140:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https:// 20.0.0.140:2379"
    ETCD_INITIAL_CLUSTER="etcd01=https://20.0.0.110:2380,etcd02=https://20.0.0.130:2380,etcd03=https://20.0.0.140:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    [root@node02 ~]# systemctl start etcd.service
    [root@node02 ~]# systemctl status etcd.service
    
    #	检查集群状态
    [root@master01 ~]# cd /opt/etcd/ssl
    [root@master01 ssl]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://20.0.0.110:2379,https://120.0.0.130:2379,https://20.0.0.140:2379" cluster-health
    member 13e88e37f3d86d3e is healthy: got healthy result from https://20.0.0.110:2379
    member 1fd0474b2d772f8e is healthy: got healthy result from https://20.0.0.130:2379
    member cc1bbfffdd5a9e7a is healthy: got healthy result from https://20.0.0.140:2379
    cluster is healthy
    

2.2 Node01与Node02操作

  • Docker引擎部署
    #	安装依赖包
    yum -y install yum-utils device-mapper-persistent-data lvm2
    
    #	设置阿里云镜像源
    yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    
    #	安装Docker-CE
    yum -y install docker-ce
    
    #	关闭防火墙与核心防护
    systemctl stop firewalld
    systemctl disable firewalld
    setenforce 0
    sed -i "/^SELINUX=/s/enforcing/disabled/" /etc/selinux/config
    
    #	开启并设置开机自启
    systemctl start docker.service
    systemctl enable docker.service
    
    #	镜像加速
    tee /etc/docker/daemon.json <<-'EOF'
    {
      "registry-mirrors": ["https://oxjoh3ip.mirror.aliyuncs.com"]
    }
    EOF
    systemctl daemon-reload
    systemctl restart docker.service
    
    #	网络优化
    echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
    sysctl -p
    service network restart
    systemctl restart docker.service
    
  • Flannel网络配置(node01与node02)
    #	写入分配的子网段到etcd中,供Flannel使用
    [root@node01 ~]# cd /opt/etcd/ssl/
    [root@node01 ssl]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://20.0.0.110:2379,https://20.0.0.130:2379,https://20.0.0.140:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
    
    { "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}
    
    #	查看写入的信息
    [root@node02 ~]# cd /opt/etcd/ssl/
    [root@node02 ssl]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://20.0.0.110:2379,https://20.0.0.130:2379,https://20.0.0.140:2379" get /coreos.com/network/config
    
    { "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}
    
    #	拷贝到所有的Node节点(只需要部署在Node节点即可)
    [root@master01 ssl]# scp /root/k8s/flannel-v0.10.0-linux-amd64.tar.gz root@20.0.0.130:/root
    
    root@20.0.0.130's password: 
    flannel-v0.10.0-linux-amd64.tar.gz                                                        100% 9479KB  81.1MB/s   00:00    
    
    [root@master01 ssl]# scp /root/k8s/flannel-v0.10.0-linux-amd64.tar.gz root@20.0.0.140:/root
    
    root@20.0.0.140's password: 
    flannel-v0.10.0-linux-amd64.tar.gz                                                        100% 9479KB  73.1MB/s   00:00
    
    #	Node节点操作解压缩(node01与node02)
    [root@node01 ssl]# cd
    [root@node01 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz 
    flanneld
    mk-docker-opts.sh
    README.md
    
    #	创建/opt/kubernetes/工作目录(node01与node02)
    [root@node01 ~]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p
    [root@node01 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/
    
    #	开启flannel网络功能(node01与node02)
    [root@node01 ~]# bash flannel.sh https://20.0.0.110:2379,https://20.0.0.130:2379,https://120.0.0.140:2379
    
    Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
    
    #	配置docker连接flannel(node01与node02)
    [root@node01 ~]# vim /usr/lib/systemd/system/docker.service
    [Service]
    Type=notify
    # the default is not to use systemd for cgroups because the delegate issues still
    # exists and systemd currently does not support the cgroup feature set required
    # for containers run by docker
    EnvironmentFile=/run/flannel/subnet.env
    ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock
    ExecReload=/bin/kill -s HUP $MAINPID
    TimeoutSec=0
    RestartSec=2
    Restart=always
     
    [root@node01 ~]# cat /run/flannel/subnet.env
    DOCKER_OPT_BIP="--bip=172.17.42.1/24"
    DOCKER_OPT_IPMASQ="--ip-masq=false"
    DOCKER_OPT_MTU="--mtu=1450"
    (说明:bip指定启动时的子网)
    DOCKER_NETWORK_OPTIONS=" --bip=172.17.42.1/24 --ip-masq=false --mtu=1450" 
    
    #	重启docker服务(node01与node02)
    [root@node01 ~]# systemctl daemon-reload
    [root@node01 ~]# systemctl restart docker.service
    
    #	查看flannel网络(node01与node02)
    [root@node01 ~]# ifconfig
    flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
            inet 172.17.67.0  netmask 255.255.255.255  broadcast 0.0.0.0
            inet6 fe80::7c1e:b7ff:fec9:f38  prefixlen 64  scopeid 0x20<link>
            ether 7e:1e:b7:c9:0f:38  txqueuelen 0  (Ethernet)
            RX packets 0  bytes 0 (0.0 B)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 0  bytes 0 (0.0 B)
            TX errors 0  dropped 26 overruns 0  carrier 0  collisions 0
    
    #	测试ping通对方docker0网卡证明flannel起到路由作用(node01与node02)
    [root@node01 ~]# docker run -it centos:7 /bin/bash
    [root@node01 ~]# yum install net-tools -y
    [root@node01 ~]# ifconfig
    eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
            inet 172.17.67.2  netmask 255.255.255.0  broadcast 172.17.67.255
            ether 02:42:ac:11:43:02  txqueuelen 0  (Ethernet)
            RX packets 15977  bytes 12475359 (11.8 MiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 7402  bytes 404638 (395.1 KiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    
    #	再次测试ping通两个node中的centos:7容器(node01与node02)
    
  • flannel.sh脚本
    #!/bin/bash
    ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}			(生产环境中填写etcd的IP:端口号)
    cat <<EOF >/opt/kubernetes/cfg/flanneld
    FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
    -etcd-cafile=/opt/etcd/ssl/ca.pem \
    -etcd-certfile=/opt/etcd/ssl/server.pem \
    -etcd-keyfile=/opt/etcd/ssl/server-key.pem"
    EOF
    cat <<EOF >/usr/lib/systemd/system/flanneld.service
    [Unit]
    Description=Flanneld overlay address etcd agent
    After=network-online.target network.target
    Before=docker.service
    [Service]
    Type=notify
    EnvironmentFile=/opt/kubernetes/cfg/flanneld
    ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
    ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
    Restart=on-failure
    [Install]
    WantedBy=multi-user.target
    EOF
    systemctl daemon-reload
    systemctl enable flanneld
    systemctl restart flanneld
    

四、部署Master

  • Master操作,apiserver生成证书
  #	上传master.zip
  [root@master01 k8s]# ls
  cfssl.sh   etcd.sh                   etcd-v3.3.10-linux-amd64.tar.gz     kubernetes-server-linux-amd64.tar.gz
  etcd-cert  etcd-v3.3.10-linux-amd64  flannel-v0.10.0-linux-amd64.tar.gz  master.zip
[root@master01 k8s]# mkdir master
  [root@master01 k8s]# unzip master.zip -d /root/k8s/master/
  Archive:  master.zip
    inflating: /root/k8s/master/apiserver.sh  
    inflating: /root/k8s/master/controller-manager.sh  
    inflating: /root/k8s/master/scheduler.sh           
  [root@master01 k8s]# cd master/
  [root@master01 master]# ls
  apiserver.sh  controller-manager.sh  scheduler.sh
  [root@master01 master]# chmod +x apiserver.sh controller-manager.sh scheduler.sh 
  [root@master01 master]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p
  [root@master01 master]# cd ..
  [root@master01 k8s]# mkdir k8s-cert
  [root@master01 k8s]# cd k8s-cert/
  [root@master01 k8s-cert]# ls
  k8s-cert.sh
  
  #	生成 k8s 证书
  [root@master01 k8s-cert]# bash k8s-cert.sh 
  [root@master01 k8s-cert]# ls *.pem
  admin-key.pem  admin.pem  ca-key.pem  ca.pem  kube-proxy-key.pem  kube-proxy.pem  server-key.pem  server.pem
  [root@master01 k8s-cert]# cp ca*pem server*pem /opt/kubernetes/ssl/
  
  #	解压kubernetes压缩包
  [root@master01 k8s]# tar zxvf kubernetes-server-linux-amd64.tar.gz
  [root@master01 bin]# ls
  apiextensions-apiserver              kube-apiserver                      kubectl                kube-scheduler.docker_tag
  cloud-controller-manager             kube-apiserver.docker_tag           kubelet                kube-scheduler.tar
  cloud-controller-manager.docker_tag  kube-apiserver.tar                  kube-proxy             mounter
  cloud-controller-manager.tar         kube-controller-manager             kube-proxy.docker_tag
  hyperkube                            kube-controller-manager.docker_tag  kube-proxy.tar
  kubeadm                              kube-controller-manager.tar         kube-scheduler
  
  #	复制关键命令文件
  [root@localhost bin]# cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
  [root@master01 bin]# vim /opt/kubernetes/cfg/token.csv
  75a331cb63be986d8fa074543ecbf29c,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
  序列号,用户名,id,角色
  使用 head -c 16 /dev/urandom | od -An -t x | tr -d '' 可以随机生成序列号
  
  #	二进制文件,token,证书都准备好,开启apiserver
  [root@master01 bin]# cd ../../../master/
  [root@master01 master]# bash apiserver.sh 20.0.0.110 https://20.0.0.110:2379,https://20.0.0.130:2379,https://20.0.0.140:2379
  
  Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
  
  #	检查进程是否启动成功
  [root@master01 k8s]# ps aux | grep kube
  root      23639  104  8.1 399804 313804 ?       Ssl  10:36   0:06 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://20.0.0.110:2379,https://20.0.0.130:2379,https://20.0.0.140:2379 --bind-address=20.0.0.110 --secure-port=6443 --advertise-address=20.0.0.110 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
  root      23655  0.0  0.0 112724   988 pts/1    S+   10:36   0:00 grep --color=auto kube
  
  #	查看配置文件
  [root@master01 master]# cat /opt/kubernetes/cfg/kube-apiserver
  
  KUBE_APISERVER_OPTS="--logtostderr=true \
  --v=4 \
  --etcd-servers=https://20.0.0.110:2379,https://20.0.0.130:2379,https://20.0.0.140:2379 \
  --bind-address=20.0.0.110 \
  --secure-port=6443 \
  --advertise-address=20.0.0.110 \
  --allow-privileged=true \
  --service-cluster-ip-range=10.0.0.0/24 \
  --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
  --authorization-mode=RBAC,Node \
  --kubelet-https=true \
  --enable-bootstrap-token-auth \
  --token-auth-file=/opt/kubernetes/cfg/token.csv \
  --service-node-port-range=30000-50000 \
  --tls-cert-file=/opt/kubernetes/ssl/server.pem  \
  --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
  --client-ca-file=/opt/kubernetes/ssl/ca.pem \
  --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --etcd-cafile=/opt/etcd/ssl/ca.pem \
  --etcd-certfile=/opt/etcd/ssl/server.pem \
  --etcd-keyfile=/opt/etcd/ssl/server-key.pem"
  
  #	监听的https端口
  [root@localhost k8s]# netstat -ntap | grep 6443
  tcp        0      0 192.168.195.149:6443    0.0.0.0:*               LISTEN      46459/kube-apiserve 
  tcp        0      0 192.168.195.149:6443    192.168.195.149:36806   ESTABLISHED 46459/kube-apiserve 
  tcp        0      0 192.168.195.149:36806   192.168.195.149:6443    ESTABLISHED 46459/kube-apiserve 
  [root@localhost k8s]# netstat -ntap | grep 8080
  tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      46459/kube-apiserve 
  
  #	启动scheduler服务
  [root@master01 master]# cd ..
  [root@master01 master]# ./scheduler.sh 127.0.0.1
  
  Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
  
  [root@master01 master]# ps aux | grep kube
  root      23639  6.1  8.1 399804 314000 ?       Ssl  10:36   0:08 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://20.0.0.110:2379,https://20.0.0.130:2379,https://20.0.0.140:2379 --bind-address=20.0.0.110 --secure-port=6443 --advertise-address=20.0.0.110 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
  root      23733  1.4  0.5  46128 19692 ?        Ssl  10:37   0:00 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect
  root      23756  0.0  0.0 112724   984 pts/1    S+   10:38   0:00 grep --color=auto kube
  
  #	启动controller-manager
  [root@master01 master]# ./controller-manager.sh 127.0.0.1
  Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
  
  #	查看master 节点状态
  [root@localhost k8s]# /opt/kubernetes/bin/kubectl get cs
  
  • k8s-cert.sh脚本
    cat > ca-config.json <<EOF
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "kubernetes": {
             "expiry": "87600h",
             "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ]
          }
        }
      }
    }
    EOF
    
    cat > ca-csr.json <<EOF
    {
        "CN": "kubernetes",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Beijing",
                "ST": "Beijing",
          	    "O": "k8s",
                "OU": "System"
            }
        ]
    }
    EOF
    
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    #-----------------------
    cat > server-csr.json <<EOF
    {
        "CN": "kubernetes",
        "hosts": [
          "10.0.0.1",
          "127.0.0.1",
          "20.0.0.110",  (master1)
          "20.0.0.120",  (master2)
          "20.0.0.100",  (vip)
          "20.0.0.150",  (lb (master))
          "20.0.0.160",  (lb (backup))
          "kubernetes",
          "kubernetes.default",
          "kubernetes.default.svc",
          "kubernetes.default.svc.cluster",
          "kubernetes.default.svc.cluster.local"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "BeiJing",
                "ST": "BeiJing",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    EOF
    
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
    #-----------------------
    
    cat > admin-csr.json <<EOF
    {
      "CN": "admin",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "BeiJing",
          "ST": "BeiJing",
          "O": "system:masters",
          "OU": "System"
        }
      ]
    }
    EOF
    
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
    #-----------------------
    
    cat > kube-proxy-csr.json <<EOF
    {
      "CN": "system:kube-proxy",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "BeiJing",
          "ST": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    EOF
    
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
    
    

五、部署Node

  • Master操作
    #	把 kubelet、kube-proxy拷贝到node节点上去
    [root@master01 master]# cd ../kubernetes/server/bin/
    [root@master01 bin]# scp kubelet kube-proxy root@20.0.0.130:/opt/kubernetes/bin/
    [root@master01 bin]# scp kubelet kube-proxy root@20.0.0.140:/opt/kubernetes/bin/
    
    
  • Master操作
    [root@master01 bin]# cd ../../../
    [root@master01 k8s]# mkdir kebeconfig
    [root@master01 k8s]# cd kebeconfig/
    
    #	拷贝kubeconfig.sh文件进行重命名
    [root@localhost kubeconfig]# mv kubeconfig.sh kubeconfig
    
    #	获取token信息(序列号)
    [root@localhost ~]# cat /opt/kubernetes/cfg/token.csv 
    75a331cb63be986d8fa074543ecbf29c,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    
    #	配置文件修改为tokenID
    [root@localhost kubeconfig]# vim kubeconfig
    # 设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap \
      --token=75a331cb63be986d8fa074543ecbf29c \		(修改这一行的token)
      --kubeconfig=bootstrap.kubeconfig
    
    #	设置环境变量(可以写入到/etc/profile中)
    [root@master01 kebeconfig]# vim /etc/profile
    ...
    export PATH=$PATH:/opt/kubernetes/bin/
    [root@master01 kebeconfig]# source /etc/profile
    
    #检查etcd集群健康状态
    [root@localhost kubeconfig]# kubectl get cs
    NAME                 STATUS    MESSAGE             ERROR
    controller-manager   Healthy   ok                  
    scheduler            Healthy   ok                  
    etcd-0               Healthy   {"health":"true"}   
    etcd-1               Healthy   {"health":"true"}   
    etcd-2               Healthy   {"health":"true"} 
    
    #	生成配置文件
    [root@master01 kebeconfig]# bash kubeconfig 20.0.0.110 /opt/kubernetes/ssl/
    Cluster "kubernetes" set.
    User "kubelet-bootstrap" set.
    Context "default" created.
    Switched to context "default".
    Cluster "kubernetes" set.
    error: error reading client-certificate data from /opt/kubernetes/ssl//kube-proxy.pem: open /opt/kubernetes/ssl//kube-proxy.pem: no such file or directory
    Context "default" created.
    Switched to context "default".
    [root@master01 kebeconfig]# ls
    bootstrap.kubeconfig  kubeconfig  kube-proxy.kubeconfig
    
    #	拷贝配置文件到node节点
    [root@master01 kebeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@20.0.0.130:/opt/kubernetes/cfg/
    [root@master01 kebeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@20.0.0.140:/opt/kubernetes/cfg/
    
    #	创建bootstrap角色赋予权限用于连接apiserver请求签名(关键)
    [root@localhost kubeconfig]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
    
    clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
    
  • kubeconfig.sh脚本
    
    APISERVER=$1
    SSL_DIR=$2
    
    # 创建kubelet bootstrapping kubeconfig 
    export KUBE_APISERVER="https://$APISERVER:6443"
    
    # 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=$SSL_DIR/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=bootstrap.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap \
      --token=${BOOTSTRAP_TOKEN} \
      --kubeconfig=bootstrap.kubeconfig
    
    # 设置上下文参数
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=kubelet-bootstrap \
      --kubeconfig=bootstrap.kubeconfig
    
    # 设置默认上下文
    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
    
    #----------------------
    
    # 创建kube-proxy kubeconfig文件
    
    kubectl config set-cluster kubernetes \
      --certificate-authority=$SSL_DIR/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-credentials kube-proxy \
      --client-certificate=$SSL_DIR/kube-proxy.pem \
      --client-key=$SSL_DIR/kube-proxy-key.pem \
      --embed-certs=true \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=kube-proxy \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
    
  • Node01节点上操作
    #	复制 node.zip 到 /root/目录下解压
    [root@node01 ~]# ls
    node.zip
    
    #	解压node.zip 获得kubelet  proxy.sh
    [root@node01 ~]# unzip node.zip
    
    #	启动kubelet服务
    [root@localhost ~]# bash kubelet.sh 20.0.0.130
    Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
    
    #	检查kubelet服务启动
    [root@node01 ~]# ps aux | grep kube
    root      24092  0.0  0.4 325908 18680 ?        Ssl  10:06   0:01 /opt/kubernetes/bin/flanneld --ip-masq --etcd-endpoints=https://20.0.0.110:2379,https://20.0.0.130:2379,https://120.0.0.140:2379 -etcd-cafile=/opt/etcd/ssl/ca.pem -etcd-certfile=/opt/etcd/ssl/server.pem -etcd-keyfile=/opt/etcd/ssl/server-key.pem
    root      84898  3.3  1.1 476704 44624 ?        Ssl  10:55   0:00 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --hostname-override=20.0.0.130 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --config=/opt/kubernetes/cfg/kubelet.config --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
    root      84929  0.0  0.0 112724   984 pts/1    S+   10:55   0:00 grep --color=auto kube
    
    
  • Master操作
    #	检查到node01节点的请求
    [root@master01 kebeconfig]# kubectl get csr
    NAME                                                   AGE   REQUESTOR           CONDITION
    node-csr-0K7kO9QDqs1DGi-YBGyLii_p1WO7TN_JQw6Kd8Dd6Ws   76s   kubelet-bootstrap   Pending(等待集群给该节点颁发证书)
    
    #	手动颁发证书
    [root@master01 kebeconfig]# kubectl certificate approve node-csr-0K7kO9QDqs1DGi-YBGyLii_p1WO7TN_JQw6Kd8Dd6Ws
    certificatesigningrequest.certificates.k8s.io/node-csr-0K7kO9QDqs1DGi-YBGyLii_p1WO7TN_JQw6Kd8Dd6Ws approved
    
    #	继续查看证书状态
    [root@master01 kebeconfig]# kubectl get csr
    NAME                                                   AGE     REQUESTOR           CONDITION
    node-csr-0K7kO9QDqs1DGi-YBGyLii_p1WO7TN_JQw6Kd8Dd6Ws   2m31s   kubelet-bootstrap   Approved,Issued(已经被允许加入群集)
    
    #	查看集群节点,node01节点成功加入
    [root@master01 kebeconfig]# kubectl get nodes
    NAME         STATUS   ROLES    AGE    VERSION
    20.0.0.130   Ready    <none>   2m7s   v1.12.3
    
    #	node01节点操作
    [root@node01 ~]# bash proxy.sh 20.0.0.130
    Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
    [root@node01 ~]# systemctl start kube-proxy.service
    [root@node01 ~]# systemctl enable kube-proxy.service
    [root@node01 ~]# systemctl status kube-proxy.service
    ● kube-proxy.service - Kubernetes Proxy
       Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
       Active: active (running) since 三 2020-09-30 11:00:43 CST; 35s ago
     Main PID: 86355 (kube-proxy)
       CGroup: /system.slice/kube-proxy.service
               ‣ 86355 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=20.0.0.130 --cluster-cidr=...
    
    9月 30 11:01:16 node01 kube-proxy[86355]: I0930 11:01:16.700913   86355 reflector.go:169] Listing and watching *v1...go:131
    9月 30 11:01:16 node01 kube-proxy[86355]: E0930 11:01:16.702155   86355 reflector.go:134] k8s.io/client-go/informe... scope
    9月 30 11:01:17 node01 kube-proxy[86355]: I0930 11:01:17.701195   86355 reflector.go:169] Listing and watching *v1...go:131
    9月 30 11:01:17 node01 kube-proxy[86355]: I0930 11:01:17.702266   86355 reflector.go:169] Listing and watching *v1...go:131
    9月 30 11:01:17 node01 kube-proxy[86355]: E0930 11:01:17.702370   86355 reflector.go:134] k8s.io/client-go/informe... scope
    9月 30 11:01:17 node01 kube-proxy[86355]: E0930 11:01:17.703219   86355 reflector.go:134] k8s.io/client-go/informe... scope
    9月 30 11:01:18 node01 kube-proxy[86355]: I0930 11:01:18.703592   86355 reflector.go:169] Listing and watching *v1...go:131
    9月 30 11:01:18 node01 kube-proxy[86355]: I0930 11:01:18.703590   86355 reflector.go:169] Listing and watching *v1...go:131
    9月 30 11:01:18 node01 kube-proxy[86355]: E0930 11:01:18.704671   86355 reflector.go:134] k8s.io/client-go/informe... scope
    9月 30 11:01:18 node01 kube-proxy[86355]: E0930 11:01:18.704671   86355 reflector.go:134] k8s.io/client-go/informe... scope
    Hint: Some lines were ellipsized, use -l to show in full.
    
  • Node02节点部署
    #	node01操作
    [root@node01 ~]# scp -r /opt/kubernetes/ root@20.0.0.140:/opt/
    The authenticity of host '20.0.0.140 (20.0.0.140)' can't be established.
    ECDSA key fingerprint is SHA256:+3jHeqm5xkqqaUAmCEWoAuf2aHw8Y8GV40GD8uGZ6m8.
    ECDSA key fingerprint is MD5:a3:39:bb:8a:09:ac:8f:53:01:8a:84:53:14:85:d5:57.
    Are you sure you want to continue connecting (yes/no)? yes
    Warning: Permanently added '20.0.0.140' (ECDSA) to the list of known hosts.
    root@20.0.0.140's password: 
    flanneld                                                                                  100%  227   373.3KB/s   00:00    
    bootstrap.kubeconfig                                                                      100% 2164     3.4MB/s   00:00    
    kube-proxy.kubeconfig                                                                     100% 2082     4.4MB/s   00:00    
    kubelet                                                                                   100%  374   784.6KB/s   00:00    
    kubelet.config                                                                            100%  264   604.0KB/s   00:00    
    kubelet.kubeconfig                                                                        100% 2293     4.1MB/s   00:00    
    kube-proxy                                                                                100%  186   341.3KB/s   00:00    
    mk-docker-opts.sh                                                                         100% 2139     4.6MB/s   00:00    
    scp: /opt//kubernetes/bin/flanneld: Text file busy
    kubelet                                                                                   100%  168MB 158.4MB/s   00:01    
    kube-proxy                                                                                100%   48MB 163.9MB/s   00:00    
    kubelet.crt                                                                               100% 2169     3.8MB/s   00:00    
    kubelet.key                                                                               100% 1679     3.3MB/s   00:00    
    kubelet-client-2020-09-30-10-57-23.pem                                                    100% 1269   329.7KB/s   00:00    
    kubelet-client-current.pem                                                                100% 1269   421.4KB/s   00:00
    
    #	把kubelet,kube-proxy 的service文件拷贝到node02
    [root@node01 ~]# scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@20.0.0.140:/usr/lib/systemd/system/
    root@20.0.0.140's password: 
    kubelet.service                                                                           100%  264   422.8KB/s   00:00    
    kube-proxy.service                                                                        100%  231   373.7KB/s   00:00 
    
    #	在node02上操作,进行修改
    #	删除复制过来的证书,node02会申请证书
    [root@node02 ~]# cd /opt/kubernetes/ssl
    [root@node02 ssl]# rm -rf *
    [root@node02 ssl]# ls
    
    #	修改配置文件 kubelet kubelet.config kube-proxy(三个配置文件)
    [root@node02 ssl]# cd ../cfg/
    [root@node02 cfg]# ls
    bootstrap.kubeconfig  flanneld  kubelet  kubelet.config  kubelet.kubeconfig  kube-proxy  kube-proxy.kubeconfig
    [root@node02 cfg]# vim kubelet
    
    KUBELET_OPTS="--logtostderr=true \
    --v=4 \
    --hostname-override=20.0.0.140 \
    --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
    --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
    --config=/opt/kubernetes/cfg/kubelet.config \
    --cert-dir=/opt/kubernetes/ssl \
    --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
    
    [root@node02 cfg]# vim kubelet.config
    
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 20.0.0.140
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS:
    - 10.0.0.2
    clusterDomain: cluster.local.
    failSwapOn: false
    authentication:
      anonymous:
        enabled: true
    
    [root@node02 cfg]# vim kube-proxy
    
    KUBE_PROXY_OPTS="--logtostderr=true \
    --v=4 \
    --hostname-override=20.0.0.140 \
    --cluster-cidr=10.0.0.0/24 \
    --proxy-mode=ipvs \
    --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
    
    #	启动服务并设置开机自启
    [root@node02 cfg]# systemctl start kubelet.service
    [root@node02 cfg]# systemctl enable kubelet.service
    Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
    [root@node02 cfg]# systemctl start kube-proxy.service 
    [root@node02 cfg]# systemctl enable kube-proxy.service 
    Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
    
    #	在master01上操作
    [root@master01 kebeconfig]# kubectl get csr
    NAME                                                   AGE   REQUESTOR           CONDITION
    node-csr-0K7kO9QDqs1DGi-YBGyLii_p1WO7TN_JQw6Kd8Dd6Ws   14m   kubelet-bootstrap   Approved,Issued
    node-csr-H78re3C9fGbFuBVqWxkr1GcFlPI8zUAFhgBrSR_LYrA   89s   kubelet-bootstrap   Pending
    
    #	授权加入集群
    [root@master01 kebeconfig]# kubectl certificate approve node-csr-H78re3C9fGbFuBVqWxkr1GcFlPI8zUAFhgBrSR_LYrA
    certificatesigningrequest.certificates.k8s.io/node-csr-H78re3C9fGbFuBVqWxkr1GcFlPI8zUAFhgBrSR_LYrA approved
    [root@master01 kebeconfig]# kubectl get csr
    NAME                                                   AGE     REQUESTOR           CONDITION
    node-csr-0K7kO9QDqs1DGi-YBGyLii_p1WO7TN_JQw6Kd8Dd6Ws   15m     kubelet-bootstrap   Approved,Issued
    node-csr-H78re3C9fGbFuBVqWxkr1GcFlPI8zUAFhgBrSR_LYrA   2m12s   kubelet-bootstrap   Approved,Issued
    
    #	查看集群中的节点
    [root@master01 kebeconfig]# kubectl get nodes
    NAME         STATUS   ROLES    AGE   VERSION
    20.0.0.130   Ready    <none>   14m   v1.12.3
    20.0.0.140   Ready    <none>   77s   v1.12.3
    
    
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值