Kubernetes——Kubeadm部署高可用K8S集群

一、基础环境

1.1 资源列表

IP地址           节点名称
192.168.34.100  master  # VIP
192.168.34.32   master01
192.168.34.28   master02
192.168.34.59   master03
192.168.34.49   node01
192.168.34.52   node02

1.2 修改hosts文件(所有节点)

[root@master01 ~]# cat >> /etc/hosts<<EOF
192.168.34.100  master
192.168.34.32   master01
192.168.34.28   master02
192.168.34.59   master03
192.168.34.49   node01
192.168.34.52   node02
EOF

1.3 配置免密登录(master01节点)

# 获取密钥
[root@master01 ~]# ssh-keygen -t rsa

# 备份公钥
[root@master01 ~]# cp -p .ssh/id_rsa.pub .ssh/authorized_keys 

# 本地密钥同步其它节点
[root@master01 ~]# for H in master0{2..3}; do ssh-copy-id $H; done
[root@master01 ~]# for H in node0{1..2}; do ssh-copy-id $H; done

1.4 修改各节点hostname

[root@master01 ~]# for H in master0{2..3}; do ssh $H hostnamectl set-hostname $H; done
[root@master01 ~]# for H in node0{1..2}; do ssh $H hostnamectl set-hostname $H; done

1.5 关闭防火墙、SeLinux、swap分区(所有节点)

# 关闭防火墙
[root@master01 ~]# systemctl stop firewalld
[root@master01 ~]# systemctl disable firewalld

# 安装iptales
[root@master01 ~]# yum install iptables-services -y

# 关闭SeLinux
[root@master01 ~]# setenforce 0
[root@master01 ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 

# 关闭swap
[root@master01 ~]# swapoff -a
[root@master01 ~]# cp /etc/fstab /etc/fstab_bak
[root@master01 ~]# cat /etc/fstab_bak | grep -v swap > /etc/fstab

1.6 更新系统(所有节点)

# 安装wget
[root@master01 ~]# yum install wget -y

# 删除本地yum源
[root@master01 ~]# rm -rf /etc/yum.repos.d/*

# 下载阿里源
[root@master01 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

# 升级所有包,系统版本升级,内核不变
[root@master01 ~]# yum update -y

1.7 允许 iptables 检查桥接流量(所有节点)

#加载br_netfilter模块
[root@master01 ~]# modprobe br_netfilter
[root@master01 ~]# modprobe overlay

# 查看br_netfilter 模块是否已加载
[root@master01 ~]# lsmod | grep br_netfilter
br_netfilter           22256  0 
bridge                151336  1 br_netfilter

# 开机自动加载
[root@master01 ~]# cat << EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

# 设置所需的 sysctl 参数,参数在重新启动后保持不变
[root@master01 ~]# cat << EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf
EOF

# 应用 sysctl 参数而不重新启动
[root@master01 ~]# sysctl --system

1.8 加载ipvs模块(所有节点)

# 查看Linux内核版本,若内核大于4.19替换nf_conntrack_ipv4为nf_conntrack
[root@master01 ~]# uname -r
3.10.0-957.el7.x86_64

[root@master01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
> #!/bin/bash
> modprobe -- ip_vs
> modprobe -- ip_vs_rr
> modprobe -- ip_vs_wrr
> modprobe -- ip_vs_sh
> modprobe -- nf_conntrack_ipv4
> EOF

# 执行脚本
[root@master01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
[root@master01 ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4      15053  0 
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 145497  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133095  2 ip_vs,nf_conntrack_ipv4
libcrc32c              12644  3 xfs,ip_vs,nf_conntrack

# 安装相关管理工具
[root@master01 ~]# yum install ipset ipvsadm -y

1.9 时钟同步(所有节点)

[root@master01 ~]# yum install chrony -y

# 编辑chronyd配置文件,完成之后该文件内容如下
[root@master01 ~]# egrep -v "^$|#" /etc/chrony.conf 
server ntp1.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.0.0/16
local stratum 10
logdir /var/log/chrony

二、高可用环境配置

2.1 部署pacemaker(所有master节点)

# 安装pacemaker
[root@master01 ~]# yum install pacemaker pcs corosync fence-agents resource-agents -y

# 启动pcs服务
[root@master01 ~]# systemctl enable pcsd
[root@master01 ~]# systemctl start pcsd
[root@master01 ~]# systemctl status pcsd

# 修改集群管理员hacluster(默认生成)密码
[root@master01 ~]# echo pacemaker_pass | passwd --stdin hacluster
Changing password for user hacluster.
passwd: all authentication tokens updated successfully.

2.2 配置pacemaker集群(其中一台master节点执行)

# 这里选择master01节点执行
# 节点认证配置,组件集群,采用上一步设置的password
[root@master01 ~]# pcs cluster auth master01 master02 master03 -u hacluster -p pacemaker_pass --force
master02: Authorized
master03: Authorized
master01: Authorized

# 创建并命名集群
# 生成配置文件:/etc/corosync/corosync.conf
[root@master01 ~]# pcs cluster setup --force --name k8s_cluster_ha master01 master02 master03
Destroying cluster on nodes: master01, master02, master03...
master03: Stopping Cluster (pacemaker)...
master02: Stopping Cluster (pacemaker)...
master01: Stopping Cluster (pacemaker)...
master01: Successfully destroyed cluster
master02: Successfully destroyed cluster
master03: Successfully destroyed cluster

Sending 'pacemaker_remote authkey' to 'master01', 'master02', 'master03'
master01: successful distribution of the file 'pacemaker_remote authkey'
master03: successful distribution of the file 'pacemaker_remote authkey'
master02: successful distribution of the file 'pacemaker_remote authkey'
Sending cluster config files to the nodes...
master01: Succeeded
master02: Succeeded
master03: Succeeded

Synchronizing pcsd certificates on nodes master01, master02, master03...
master02: Success
master03: Success
master01: Success
Restarting pcsd on the nodes in order to reload the certificates...
master02: Success
master03: Success
master01: Success

2.3 启动集群(其中一台master节点执行)

# 启动集群
[root@master01 ~]# pcs cluster start --all
[root@master01 ~]# pcs status
Cluster name: k8s_cluster_ha

WARNINGS:
No stonith devices and stonith-enabled is not false

Stack: corosync
Current DC: master03 (version 1.1.23-1.el7_9.1-9acf116022) - partition with quorum
Last updated: Tue Apr 25 16:19:56 2023
Last change: Tue Apr 25 16:19:55 2023 by hacluster via crmd on master03

3 nodes configured
0 resource instances configured

Online: [ master01 master02 master03 ]

No resources

Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/enabled


# 设置集群开机启动
[root@master01 ~]# pcs cluster enable --all
master01: Cluster Enabled
master02: Cluster Enabled
master03: Cluster Enabled

# 设置集群属性
[root@master01 ~]# pcs property set pe-warn-series-max=1000 pe-input-series-max=1000 pe-error-series-max=1000 cluster-recheck-interval=5

# corosync默认启用stonith,但stonith机制并没有配置相应的stonith设备,此时pacemaker将拒绝启动任何资源,关闭stonith
[root@master01 ~]# pcs property set stonith-enabled=false

2.4 安装haproxy负载均衡(所有master节点)

[root@master01 ~]# yum install haproxy -y

# 修改happroxy配置
[root@master01 ~]# vi /etc/haproxy/haproxy.cfg
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
listen stats
  bind 0.0.0.0:1080
  mode http
  stats enable
  stats uri /
  stats realm kubernetes\ Haproxy
  stats auth admin:admin
  stats  refresh 30s
  stats  show-node
  stats  show-legends
  stats  hide-version
listen  k8s-api
   bind 192.168.34.100:64430
   mode tcp
   option tcplog
   log global
   server master01  192.168.34.32:6443  check inter 3000 fall 2 rise 5
   server master02  192.168.34.28:6443  check inter 3000 fall 2 rise 5
   server master03  192.168.34.59:6443 check inter 3000 fall 2 rise 5
# k8s-api下bind 192.168.34.100:64430,此端口更改为64430,避开6443端口,否则haproxy与master01在

# 开机自启
[root@master01 ~]# systemctl enable haproxy

2.5 创建集群资源(其中一台master节点执行)

# 配置虚ip
[root@master01 ~]# pcs resource create kube-api-vip ocf:heartbeat:IPaddr2 ip=192.168.34.100 cidr_netmask=24 op monitor interval=30s

# 添加haproxy资源
[root@master01 ~]# pcs resource create k8s-haproxy systemd:haproxy

# 配置资源启动顺序
[root@master01 ~]# pcs constraint order kube-api-vip then k8s-haproxy
Adding kube-api-vip k8s-haproxy (kind: Mandatory) (Options: first-action=start then-action=start)

# 设置资源开机自启
[root@master01 ~]# pcs resource enable kube-api-vip k8s-haproxy

# 通过绑定资源服务,将两种资源约束在1个节点
[root@master01 ~]# pcs constraint colocation add k8s-haproxy with kube-api-vip

# 查看集群情况
[root@master01 ~]# pcs status
Stack: corosync
Current DC: master01 (version 1.1.23-1.el7_9.1-9acf116022) - partition with quorum
Last updated: Tue Apr 25 16:48:48 2023
Last change: Tue Apr 25 16:33:14 2023 by root via cibadmin on master01

3 nodes configured
2 resource instances configured

Online: [ master01 master02 master03 ]

Full list of resources:

 kube-api-vip	(ocf::heartbeat:IPaddr2):	Started master01
 k8s-haproxy	(systemd:haproxy):	Started master01

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled

三、Kubernetes集群安装

3.1 docker安装(所有节点)

# 安装依赖环境
[root@master01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 nfs-utils vim

# 配置docker yum源
[root@master01 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 查看docker版本列表
[root@master01 ~]# yum list docker-ce --showduplicates | sort -r

# 安装指定版本,我安装的此版本
[root@master01 ~]# yum install docker-ce-20.10.9-3.el7 docker-ce-cli-20.10.9-3.el7 containerd.io-1.4.11-3 -y
# 或安装最新版本
[root@master01 ~]# yum install docker-ce docker-ce-cli containerd.io -y

# 设置镜像加速
# 这一步配置自己的
[root@master01 ~]# curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io

# 启动docker
[root@master01 ~]# systemctl daemon-reload
[root@master01 ~]# systemctl start docker
[root@master01 ~]# systemctl enable docker

3.2 kubernetes安装

# 配置kubernetes yum源

[root@master01 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 查看kubernetes版本
yum list kubeadm --showduplicates | sort -r

# 安装指定版本
# 我这里安装1.22.2版本。
yum install kubeadm-1.22.2-0 kubelet-1.22.2-0 kubectl-1.22.2-0 lrzsz -y
# 或安装最新版本
yum install kubeadm kubelet kubectl lrzsz -y

# 修改docker Cgroup Driver为systemd管理
# 将/usr/lib/systemd/system/docker.service文件中的这一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
# 修改为 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd
# 如果不修改,在添加 worker 节点时可能会碰到如下错误
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd".
Please follow the guide at https://kubernetes.io/docs/setup/cri/

[root@master01 ~]# sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service

# docker重启
[root@master01 ~]# systemctl restart docker

# 启动kubelet
[root@master01 ~]# systemctl enable kubelet && systemctl start kubelet

3.3 初始化集群(master01节点)

# 获取初始化配置文件
[root@master01 ~]# kubeadm config print init-defaults > kubeadm-init.yaml

# 修改初始化配置
[root@master01 ~]# vim kubeadm-init.yaml
#主要修改localAPIEndpoint:的ip地址,此处为master01ip地址,controlPlaneEndpoint:,imageRepository: ,apiVersion: kubeproxy.config.k8s.io/v1alpha1等。
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.12.221
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  imagePullPolicy: IfNotPresent
  name: master01
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
controlPlaneEndpoint: "master:6443"
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.22.0
networking:
  dnsDomain: cluster.local
  podSubnet: "10.100.0.1/16"
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"

# 根据配置文件初始化集群
[root@master01 ~]# kubeadm init --config kubeadm-init.yaml
......
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join master:64430 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:503e9566a6b17fe9560d42b0fdc5be01c18671808363891ed130adff2298d01f \
	--control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join master:64430 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:503e9566a6b17fe9560d42b0fdc5be01c18671808363891ed130adff2298d01f 

3.4 部署calico网络(master01节点运行)

# 获取calico配置文件
# 该文件可能下载不下来,文件获取连接在本节末尾
[root@master01 ~]# curl https://docs.projectcalico.org/manifests/calico.yaml -O

# 修改calico配置
[root@master01 ~]# vim calico.yaml
# 取消CALICO_IPV4POOL_CIDR注释,修改pod网段地址
# 输入CALICO_IPV4POOL_CIDR搜索定位即可
......
            # The default IPv4 pool to create on startup if none exists. Pod IPs will be
            # chosen from this range. Changing this value after installation will have
            # no effect. This should fall within `--cluster-cidr`.
            - name: CALICO_IPV4POOL_CIDR
              value: "10.100.0.1/16"
            # Disable file logging so `kubectl logs` works.
            - name: CALICO_DISABLE_FILE_LOGGING
              value: "true"
            # Set Felix endpoint to host default action to ACCEPT.
......

# 配置kubectl环境变量

[root@master01 ~]# mkdir -p $HOME/.kube
[root@master01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 部署calico
[root@master01 ~]# kubectl apply -f calico.yaml

calico配置文件获取链接

3.5 其他节点加入集群

# 分配kubernetes秘钥至其他master节点
[root@master01 ~]# USER=root
[root@master01 ~]# CONTROL_PLANE_IPS="master02 master03"
[root@master01 ~]# for host in ${CONTROL_PLANE_IPS}; do ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"; scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/; scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/; scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/; scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/; scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/; done

# 其他master节点加入集群
[root@master01 ~]# kubeadm join master:64430 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:503e9566a6b17fe9560d42b0fdc5be01c18671808363891ed130adff2298d01f  --control-plane

# 其他node节点加入集群
[root@master01 ~]# kubeadm join master:64430 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:503e9566a6b17fe9560d42b0fdc5be01c18671808363891ed130adff2298d01f 

# token过期,重新获取
[root@master01 ~]# kubeadm token create --print-join-command

# 查看节点状态
[root@master01 ~]# kubectl get nodes
NAME       STATUS   ROLES                  AGE     VERSION
master01   Ready    control-plane,master   32m     v1.22.2
master02   Ready    control-plane,master   2m44s   v1.22.2
master03   Ready    control-plane,master   2m42s   v1.22.2
node01     Ready    <none>                 3m      v1.22.2
node02     Ready    <none>                 2m57s   v1.22.2

3.6 kuboard管理界面(master01节点执行)

# 在线部署
[root@master01 ~]# kubectl apply -f https://kuboard.cn/install-script/kuboard.yaml
[root@master01 ~]# kubectl apply -f https://addons.kuboard.cn/metrics-server/0.3.7/metrics-server.yaml

# 查看 Kuboard 运行状态:
[root@master01 ~]# kubectl get pods -l k8s.kuboard.cn/name=kuboard -n kube-system
NAME                       READY   STATUS    RESTARTS   AGE
kuboard-74c645f5df-jq8qv   1/1     Running   0          3m31s

# 获得管理员用户、只读用户的Token
# 此Token拥有 ClusterAdmin 的权限,可以执行所有操作

[root@master01 ~]# echo $(kubectl -n kube-system get secret $(kubectl -n kube-system get secret | grep ^kuboard-user | awk '{print $1}') -o go-template='{{.data.token}}' | base64 -d)
eyJhbGciOiJSUzI1NiIsImtpZCI6Ii1qWUN4QkFoVzFvSWg4bG1IX09QMGMtUDRsWkRiZmNfSFBDelc5ZGlSajgifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJvYXJkLXVzZXItdG9rZW4taGR2dGQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoia3Vib2FyZC11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNWI3NTE4MGYtOWM1MC00NmJiLWFjN2EtOTI5OGUyNGYyYjA2Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmt1Ym9hcmQtdXNlciJ9.GrRccXm0vtU-RyaBGKLduG7y6Uj-w4g8B15ZWBOsiGti-p7zerp5k-yi3h32u6-e0nbVZhlyqyjpgge2Eenry-qdXCP6Tl4lBd82zomE3ppJB1-xOB9a4Y_E1h9lNXpqzGUuLsDdIhy3C0EnSmA2uVT7jTVS3QKajXGiEgoMHINYp36Ss6Bk61XDVwur5X4fhpOetuku6UrSX7mjTDe0Z7O-Du0yrqp7C-dKTuGmVI0EjilHzeuGDA858Xl7wuiN5UkJGAKrABwGGCjQRkKjp-V99I9y9T2bNV1daLql7d_VkVAJYdDSFsG6YSK1-xjvWhuqokTlCjCq8vZK0VD4qQ

# 通过NodePort访问
# Kuboard Service 使用了 NodePort 的方式暴露服务,NodePort 为 32567;
# http://任意一个Worker节点的IP地址:32567/

k8s-dashboard

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值