k8s 单master群集部署

4 篇文章 0 订阅

环境

master		 192.168.30.7	    docker、kubeadm、kubelet、kubectl、flannel
node01		 192.168.30.8	    docker、kubeadm、kubelet、kubectl、flannel
node02		 192.168.30.9		docker、kubeadm、kubelet、kubectl、flannel
Harbor		 192.168.30.6		docker、docker-compose、harbor-offline-v1.2.2

一、kubeadm 部署单master集群

1.关闭防火墙、核心防护(所有k8s节点)

[root@master ~]# systemctl stop firewalld   //关闭防火墙
[root@master ~]# systemctl disable firewalld   //开机不启动
[root@master ~]# setenforce 0                 //关闭核心防护
setenforce: SELinux is disabled
[root@master ~]# iptables -F                 //清空防火墙规则
[root@master ~]# swapoff -a                  //关闭交换分区
[root@master ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab    //永久关闭交换分区
[root@master ~]# for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done  //为kube-proxy开启ipvs的前提需要加载以下的内核模块
ip_vs_dh
ip_vs_ftp
ip_vs
ip_vs_lblc
ip_vs_lblcr
ip_vs_lc
ip_vs_nq
ip_vs_pe_sip
ip_vs_rr
ip_vs_sed
ip_vs_sh
ip_vs_wlc
ip_vs_wrr
[root@master ~]# 

2.添加映射,加载ipvs模块

[root@master ~]# vim /etc/hosts    //添加映射

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.30.7 master
192.168.30.8 node1
192.168.30.9 node2
192.168.30.6 harbor

[root@master ~]# cat > /etc/sysctl.d/k8s.conf << EOF   //加载ipvs模块
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF
[root@master ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /usr/lib/sysctl.d/60-libvirtd.conf ...
fs.aio-max-nr = 1048576
* Applying /etc/sysctl.d/99-sysctl.conf ...
net.ipv4.ip_forward = 1
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
net.ipv4.ip_forward = 1
[root@master ~]# 
[root@master ~]# echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf 
[root@master ~]# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.ip_forward = 1
[root@master ~]# systemctl restart network
[root@master ~]# 

3.安装Docker、kubeadm、kubelet

3.1 安装Docker

[root@master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2    //安装docker依赖环境 
已加载插件:fastestmirror, langpacks
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
软件包 yum-utils-1.1.31-54.el7_8.noarch 已安装并且是最新版本
软件包 device-mapper-persistent-data-0.8.5-3.el7_9.2.x86_64 已安装并且是最新版本
软件包 7:lvm2-2.02.187-6.el7_9.5.x86_64 已安装并且是最新版本
无须任何处理
[root@master ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo  //docker源
已加载插件:fastestmirror, langpacks
adding repo from: https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@master ~]# yum install -y docker-ce  //安装社区版docker
已加载插件:fastestmirror, langpacks
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
软件包 3:docker-ce-20.10.8-3.el7.x86_64 已安装并且是最新版本
无须任何处理
[root@master ~]# systemctl start docker
[root@master ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@master ~]# 
[root@master ~]# systemctl daemon-reload   //重载配置文件
[root@master ~]# systemctl restart docker   //重启docker
[root@master ~]# docker  --version    //查看docker版本
Docker version 20.10.8, build 3967b7d

3.2 安装kubeadm、kubelet

[root@master ~]# yum install -y kubelet-1.15.0 kubeadm-1.15.0 kubectl-1.15.0
已加载插件:fastestmirror, langpacks
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
软件包 kubelet-1.15.0-0.x86_64 已安装并且是最新版本
软件包 kubeadm-1.15.0-0.x86_64 已安装并且是最新版本
软件包 kubectl-1.15.0-0.x86_64 已安装并且是最新版本
无须任何处理
[root@master ~]# rpm -qa | grep kube   //过滤已安装
kubectl-1.15.0-0.x86_64
kubernetes-cni-0.8.7-0.x86_64
kubelet-1.15.0-0.x86_64
kubeadm-1.15.0-0.x86_64
[root@master ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

3.3 初始化kubernetes

[root@master k8s]# kubeadm init --apiserver-advertise-address=192.168.30.7 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.15.0 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16
。。。
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.30.7:6443 --token 14awt0.4tsyimtfoszxla45 \
    --discovery-token-ca-cert-hash sha256:83da59d256f6914646b604af179506983e099a59555142697db90c2c18ff5051 

3.4 安装pod网络插件

[root@master k8s]#  mkdir -p $HOME/.kube
[root@master k8s]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master k8s]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master k8s]# kubectl apply -f kube-flannel.yml 
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.extensions/kube-flannel-ds-amd64 created
daemonset.extensions/kube-flannel-ds-arm64 created
daemonset.extensions/kube-flannel-ds-arm created
daemonset.extensions/kube-flannel-ds-ppc64le created
daemonset.extensions/kube-flannel-ds-s390x created

3.5 查看状态

在这里插入代码片

3.6 node组件拉取flannel镜像

[root@node2 k8s]# docker pull lizhenliang/flannel:v0.11.0-amd64
v0.11.0-amd64: Pulling from lizhenliang/flannel
cd784148e348: Pull complete 
04ac94e9255c: Pull complete 
e10b013543eb: Pull complete 
005e31e443b1: Pull complete 
74f794f05817: Pull complete 
Digest: sha256:bd76b84c74ad70368a2341c2402841b75950df881388e43fc2aca000c546653a
Status: Downloaded newer image for lizhenliang/flannel:v0.11.0-amd64
docker.io/lizhenliang/flannel:v0.11.0-amd64
[root@node2 k8s]# 

3.7 node节点加入集群

[root@node1 k8s]# kubeadm join 192.168.30.7:6443 --token 14awt0.4tsyimtfoszxla45     --discovery-token-ca-cert-hash sha256:83da59d256f6914646b604af179506983e099a59555142697db90c2c18ff5051 [preflight] Running pre-flight checks
	[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.8. Latest validated version: 18.09
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@master k8s]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
[root@master k8s]# kubectl get nodes    //查看组件状态
NAME     STATUS   ROLES    AGE   VERSION
master   Ready    master   79m   v1.15.0
node1    Ready    <none>   18m   v1.15.0
node2    Ready    <none>   18m   v1.15.0


3.8 给node节点打上标签

[root@master k8s]# kubectl label node node1 node-role.kubernetes.io/node=node
node/node1 labeled
[root@master k8s]# kubectl label node node2 node-role.kubernetes.io/node=node
node/node2 labeled
[root@master k8s]# kubectl get nodes
NAME     STATUS   ROLES    AGE   VERSION
master   Ready    master   81m   v1.15.0
node1    Ready    node     21m   v1.15.0
node2    Ready    node     21m   v1.15.0
[root@master k8s]# 

3.9 查看pod状态

[root@master k8s]# kubectl get pods -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-bccdc95cf-c56p5          1/1     Running   0          82m
coredns-bccdc95cf-pqd8n          1/1     Running   0          82m
etcd-master                      1/1     Running   0          82m
kube-apiserver-master            1/1     Running   0          81m
kube-controller-manager-master   1/1     Running   0          82m
kube-flannel-ds-amd64-6d7wp      1/1     Running   0          31m
kube-flannel-ds-amd64-h6zs6      1/1     Running   0          22m
kube-flannel-ds-amd64-m2szz      1/1     Running   0          22m
kube-proxy-92cb7                 1/1     Running   0          22m
kube-proxy-ltql5                 1/1     Running   0          82m
kube-proxy-p7xth                 1/1     Running   0          22m
kube-scheduler-master            1/1     Running   0          81m
[root@master k8s]# 

二、harbor仓库部署

1.修改主机名。添加映射

[root@localhost ~]# hostnamectl set-hostname harbor
[root@localhost ~]# su
[root@harbor ~]# cat <<EOF >>/etc/hosts
> 192.168.30.7 master
> 192.168.30.8 node1
> 192.168.30.9 node2
> 192.168.30.6 harbor
> EOF

2.关闭防护墙,核心防护,开启路由转发

[root@harbor ~]# systemctl stop firewalld 
[root@harbor ~]# systemctl disable firewalld 
[root@harbor ~]# setenforce 0
setenforce: SELinux is disabled
[root@harbor ~]# echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
[root@harbor ~]# sysctl -p
net.ipv4.ip_forward = 1
[root@harbor ~]# 

3.部署harbor

3.1 安装docker

[root@harbor ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
Package yum-utils-1.1.31-54.el7_8.noarch already installed and latest version
Package device-mapper-persistent-data-0.8.5-3.el7_9.2.x86_64 already installed and latest version
Package 7:lvm2-2.02.187-6.el7_9.5.x86_64 already installed and latest version
Nothing to do
[root@harbor ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
Loaded plugins: fastestmirror, langpacks
adding repo from: https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@harbor bin]# systemctl start docker
[root@harbor bin]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.


3.2 解压、赋权

[root@harbor ~]# chmod +x /usr/local/bin/docker-compose
[root@harbor ~]# tar zxf harbor-offline-installer-v1.2.2.tgz -C /usr/local

3.3 修改harbor节点IP、执行安装脚本

[root@harbor ~]# cd /usr/local/harbor/
[root@harbor harbor]# ls
common                    docker-compose.notary.yml  harbor_1_1_0_template  harbor.v1.2.2.tar.gz  LICENSE  prepare
docker-compose.clair.yml  docker-compose.yml         harbor.cfg             install.sh            NOTICE   upgrade
[root@harbor harbor]# sed -i "5s/reg.mydomain.com/192.168.30.6/" /usr/local/harbor/harbor.cfg
[root@harbor harbor]# sh /usr/local/harbor/install.sh
。。。
✔ ----Harbor has been installed and started successfully.----

Now you should be able to visit the admin portal at http://192.168.30.6. 
For more details, please visit https://github.com/vmware/harbor .


3.4 网页登录

在这里插入图片描述
在这里插入图片描述

三、拉取代码-生成镜像、推送到Harbor

1.拉取代码

[root@master ~]# git clone https://github.com/otale/tale.git  //拉取代码
正克隆到 'tale'...
remote: Enumerating objects: 6759, done.
remote: Counting objects: 100% (3/3), done.
remote: Compressing objects: 100% (3/3), done.
remote: Total 6759 (delta 0), reused 0 (delta 0), pack-reused 6756
接收对象中: 100% (6759/6759), 27.13 MiB | 6.40 MiB/s, done.
处理 delta 中: 100% (3532/3532), done.

2.生成dockerfile文件

[root@master ~]# mkdir dockerfile && cd dockerfile  //生成dockerfile文件
[root@master dockerfile]# cp -r /root/tale /root/dockerfile
[root@master dockerfile]# cat <<EOF >> /root/dockerfile/Dockerfile
> FROM docker.io/centos:7
> RUN yum install wget curl curl-devel -y
> RUN wget -c --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.rpm
> RUN yum localinstall -y jdk-8u131-linux-x64.rpm
> ADD tale tale
> RUN cd tale/ && sh install.sh
> RUN cd tale/tale/bin && chmod +x tool 
> EXPOSE 9000
> EOF

3.生成镜像

[root@master dockerfile]# docker build -t="centos:tale" .
Sending build context to Docker daemon  40.05MB
Step 1/8 : FROM docker.io/centos:7
 ---> 8652b9f0cb4c
Step 2/8 : RUN yum install wget curl curl-devel -y
 ---> Using cache
 ---> a4db1c710f2a
Step 3/8 : RUN wget -c --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.rpm
 ---> Using cache
 ---> deb50b8b8d18
Step 4/8 : RUN yum localinstall -y jdk-8u131-linux-x64.rpm
 ---> Using cache
 ---> c599c071623d
Step 5/8 : ADD tale tale
 ---> Using cache
 ---> f75ee8207f44
Step 6/8 : RUN cd tale/ && sh install.sh
 ---> Using cache
 ---> 4be069f04e72
Step 7/8 : RUN cd tale/bin && chmod +x tool
 ---> Running in b3046cdad740
Removing intermediate container b3046cdad740
 ---> 06a20ecc1fab
Step 8/8 : EXPOSE 9000
 ---> Running in 6b13c19a6312
Removing intermediate container 6b13c19a6312
 ---> 7a5112f75701
Successfully built 7a5112f75701
Successfully tagged centos:tale
[root@master dockerfile]# docker run -dit -p 9000:9000 centos:tale /bin/bash    //指定端口号映射
5d65a8dad7b176029b51ba31a1d2a4fa7fe4c1b1573e9007076d23a6a9305ea4

4.进入容器,启动服务

[root@master tale]# docker ps
CONTAINER ID   IMAGE                                               COMMAND                  CREATED         STATUS         PORTS                                       NAMES
f2b0d0b604ee   centos:tale                                         "/bin/bash"              8 seconds ago   Up 7 seconds   0.0.0.0:9000->9000/tcp, :::9000->9000/tcp   tender_herschel
[root@master tale]# docker exec -it f2b0d0b604ee /bin/bash
[root@f2b0d0b604ee tale]# ls
lib  resources  tale-latest.jar  tale.tar.gz  tool
[root@f2b0d0b604ee tale]# chmod +x tool 
[root@f2b0d0b604ee tale]# ls
lib  resources  tale-latest.jar  tale.tar.gz  tool
[root@f2b0d0b604ee tale]# ./tool start
Starting tale ...
(pid=85) [OK]

在这里插入图片描述

5.k8s节点指定harbor仓库位置

[root@master tale]# vim /usr/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd --insecure-registry 192.168.30.9 -H fd:// --containerd=/run/containerd/containerd.sock
[root@master tale]# systemctl daemon-reload
[root@master tale]# systemctl restart docker 

6.生成镜像,推送到harbor

[root@master tale]# docker login -u admin -p Harbor12345 http://192.168.30.6  //登陆镜像仓库(master操作)
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
[root@master tale]# docker tag centos:tale 192.168.30.6/public/centos:tale  //对tale项目打标签
[root@master tale]# ls
Dockerfile  jdk1.8.0_91  jdk-8u91-linux-x64.tar.gz  tale

[root@master tale]# docker  login -u admin -p Harbor12345 http://192.168.30.6 
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
[root@master tale]# docker push 192.168.30.6/public/centos:tale  //上传镜像,在harbor页面确认镜像
The push refers to repository [192.168.30.6/public/centos]
60ad6954394b: Pushed 
03ce414d4120: Pushed 
9574e0c5027d: Pushed 
5c27272ecf72: Pushed 
6b3ac3b6047c: Pushed 
8eed47bd0b50: Pushed 
174f56854903: Pushed 
tale: digest: sha256:39eaa0ef2fb95740e19419877f34cec97ddc5a71c430f70c291ef7db588df085 size: 1798

在这里插入图片描述

7.执行k8s服务发布命令、发布java程序到K8S

[root@master ~]# cat tale.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: tale-deployment
  labels:
    app: tale
spec:
  replicas: 1
  selector:
    matchLabels:
      app: tale
  template:
    metadata:
       labels:
        app: tale
    spec:
      containers:
      - name: tale
        image: 192.168.30.6/public/centos:tale
        command: [ "bin/bash","-ce","tail -f /dev/null"]
        ports:
        - containerPort: 9000
[root@master ~]# cat tale-service.yaml 
apiVersion: v1  
kind: Service  
metadata:
  name: tale-service
  labels:
    app: tale
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 9000
  selector:
    app: tale
[root@master ~]# kubectl apply -f tale.yaml
deployment.apps/tale-deployment created
[root@master ~]# kubectl apply -f tale-service.yaml
service/tale-service created
[root@master ~]# 

8.查询

[root@master ~]#  kubectl get po,svc -o wide
NAME                                   READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
pod/tale-deployment-59675cd9f7-wzlr8   1/1     Running   0          29m   10.244.1.5   node1   <none>           <none>

NAME                   TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE    SELECTOR
service/kubernetes     ClusterIP   10.1.0.1     <none>        443/TCP        17h    <none>
service/tale-service   NodePort    10.1.18.5    <none>        80:30912/TCP   124m   app=tale

9.启动tale

[root@master ~]# kubectl exec -it tale-deployment-59675cd9f7-wzlr8 /bin/bash
[root@tale-deployment-59675cd9f7-wzlr8 tale]# cd tale
[root@tale-deployment-59675cd9f7-wzlr8 tale]# ls
lib  resources  tool
[root@tale-deployment-59675cd9f7-wzlr8 tale]# sh tool start
Starting tale ...
(pid=43) [OK]
[root@tale-deployment-59675cd9f7-wzlr8 tale]# 

测试

[root@master ~]#  kubectl get po,svc -o wide  //查看对应端口
NAME                                   READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
pod/tale-deployment-59675cd9f7-wzlr8   1/1     Running   0          34m   10.244.1.5   node1   <none>           <none>

NAME                   TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE    SELECTOR
service/kubernetes     ClusterIP   10.1.0.1     <none>        443/TCP        17h    <none>
service/tale-service   NodePort    10.1.18.5    <none>        80:30912/TCP   129m   app=tale

在这里插入图片描述

  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值