配置ceph存储

ceph存储

centos7.6主机准备 (禁用selinux, 关闭防火墙)

主机名IP地址(内网)IP地址(外网)
ceph01192.168.88.11172.18.127.11
ceph02192.168.88.12172.18.127.12
ceph03192.168.88.13172.18.127.13
web192.168.88.20172.18.127.20
[root@ceph01 ~]# ifconfig |grep inet | sed -n '1p;3p'|awk '{print $2}'
172.18.127.11
192.168.88.11
[root@ceph02 ~]#  ifconfig |grep inet | sed -n '1p;3p'|awk '{print $2}'
172.18.127.12
192.168.88.12
[root@ceph03 ~]#  ifconfig |grep inet | sed -n '1p;3p'|awk '{print $2}'
172.18.127.13
192.168.88.13
[root@web2 ~]# ifconfig |grep inet | sed -n '1p;3p'|awk '{print $2}'
172.18.127.20
192.168.88.20

一块40G的SCSI系统盘,一块40G的SCSI盘用来当ceph数据盘,一块10G的NVME盘用来当ceph的日志盘

[root@ceph03 ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0   40G  0 disk 
├─sda1            8:1    0    1G  0 part /boot
└─sda2            8:2    0   39G  0 part 
  ├─centos-root 253:0    0 35.1G  0 lvm  /
  └─centos-swap 253:1    0  3.9G  0 lvm  [SWAP]
sdb               8:16   0   40G  0 disk 
sr0              11:0    1  4.3G  0 rom  /mnt
nvme0n1         259:0    0   10G  0 disk 

SSH免密登陆

[root@web2 ~]# ssh-keygen
[root@web2 ~]# ssh-copy-id root@192.168.88.11
[root@web2 ~]# ssh-copy-id root@192.168.88.12
[root@web2 ~]# ssh-copy-id root@192.168.88.13
[root@web2 ~]# ssh-copy-id root@192.168.88.20

设置主机名

[root@ceph01 ~]# hostnamectl set-hostname ceph01.localdomain
[root@ceph01 ~]# bash
[root@ceph02 ~]#  hostnamectl set-hostname ceph02.localdomain
[root@ceph02 ~]# bash
[root@ceph03 ~]# hostnamectl set-hostname ceph03.localdomain
[root@ceph03 ~]# bash
[root@web2 ~]# hostnamectl set-hostname web2.localdomain
[root@web2 ~]# bash
[root@web2 ~]# vim /etc/hosts
[root@web2 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.88.11 ceph01 ceph01.localdomain
192.168.88.20 web2 web2.localdomain 
192.168.88.12 ceph02 ceph02.localdomain
192.168.88.13 ceph03 ceph03.localdomain
[root@web2 ~]# vim /etc/hosts
[root@web2 ~]# scp /etc/hosts 192.168.88.11:/etc/hosts
[root@web2 ~]# scp /etc/hosts 192.168.88.12:/etc/hosts
[root@web2 ~]# scp /etc/hosts 192.168.88.13:/etc/hosts

配置ntp时间同步

[root@web2 ~]# mount /dev/cdrom /mnt/centos/
mount: /dev/sr0 写保护,将以只读方式挂载
[root@web2 ~]# cat /etc/yum.repos.d/local.repo 
[Centos]
name=centos
gpgcheck=0
enabled=1
baseurl=file:///mnt/centos
[root@web2 ~]# yum install -y ntp
[root@ceph01 ~]#  yum install -y ntp
[root@ceph02 ~]#  yum install -y ntp
[root@ceph03 ~]#  yum install -y ntp
#web2节点作为其他三台节点的时间服务器
[root@web2 ~]# systemctl start ntpd
[root@web2 ~]# ntpq -pn
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
 162.159.200.123 10.12.3.190      3 u    -   64    1  302.189  -52.158  41.206
 202.112.29.82   .BDS.            1 u    1   64    1   51.094  -16.313  12.004
 78.46.102.180   131.188.3.221    2 u    2   64    1  288.068  -72.716   0.000
 185.209.85.222  89.109.251.24    2 u    1   64    1  150.161   11.939   0.000
#ceph1,2,3操作如下:
[root@ceph01 ~]# vim /etc/ntp.conf
server web2 iburst
[root@ceph02 ~]# vim /etc/ntp.conf
server web2 iburst
[root@ceph03 ~]# vim /etc/ntp.conf
server web2 iburst
[root@ceph01 ~]# systemctl enable ntpd --now
[root@ceph02 ~]# systemctl enable ntpd --now
[root@ceph03 ~]# systemctl enable ntpd --now
[root@ceph01 ~]# ntpq -pn
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*192.168.88.20   202.112.29.82    2 u   30   64    1    0.572    4.934   0.
[root@ceph02 ~]# ntpq -pn
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*192.168.88.20   202.112.29.82    2 u   30   64    1    0.572    4.934   0.
[root@ceph03 ~]# ntpq -pn
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*192.168.88.20   202.112.29.82    2 u   30   64    1    0.572    4.934   0.

配置yum源

#四个节点都需要操作
[root@ceph03 ~]# yum -y install wget
[root@ceph03 ~]# wget -O /etc/yum.repos.d/Centos-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
[root@ceph03 ~]# wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
[root@ceph03 ~]# vim /etc/yum.repos.d/ceph.repo
[ceph_noarch]
name=noarch
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=0
[ceph_x86_64]
name=x86_64
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=0
[root@web2 ~]# yum clean all && yum makecache

ceph相关的包安装

在部署节点(web)安装ceph的部署工具

[root@web2 ~]# yum install python-setuptools -y
[root@web2 ~]# yum install ceph-deploy -y
[root@web2 ~]# ceph-deploy --version
2.0.1

在ceph节点安装相关的包

[root@web2 ~]# for i in 192.168.88.{11..13}; do ssh root@$i 'yum install -y ceph-mon ceph-osd ceph-mds ceph-radosgw ceph-mgr'; done

部署monitor

ceph01作为monitor节点,在部署节点web创建一个工作目录,后续的命令在该目录下执行,

产生的配置文件保存在该目录中

[root@web2 my-cluster]# mkdir my-cluster
[root@web2 my-cluster]# cd my-cluster/
[root@web2 my-cluster]# ceph-deploy new --public-network 172.18.127.0/16 --cluster-network 192.168.88.0/24 ceph01

初始化monitor

[root@web2 my-cluster]# ceph-deploy mon create-initial

将配置文件拷贝到对应的节点

[root@web2 my-cluster]# ceph-deploy admin ceph01 ceph02 ceph03

如果想部署高可用monitor,可以将ceph02,ceph03也加入mon集群

[root@web2 ~]# ceph-deploy mon add ceph02
[root@web2 ~]# ceph-deploy mon add ceph03

查看集群的状态

[root@ceph02 ~]# ceph -s
  cluster:
    id:     84f1a8d1-21b8-487b-8b20-e1c979082a75
    health: HEALTH_WARN
            mons are allowing insecure global_id reclaim
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 5s)
    mgr: no daemons active
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 #发现有个警告
 [root@ceph02 ~]# ceph config set mon auth_allow_insecure_global_id_reclaim false
[root@ceph02 ~]# ceph -s
  cluster:
    id:     84f1a8d1-21b8-487b-8b20-e1c979082a75
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 3m)
    mgr: no daemons active
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     

部署mgr

ceph01作为mgr节点,在部署节点web执行

[root@web2 my-cluster]# ceph-deploy mgr create ceph01

如果想部署高可用mgr,可以将ceph02,ceph03也加入进来

[root@web2 my-cluster]# ceph-deploy mgr create ceph02 ceph03

查看ceph状态

[root@ceph01 ~]# ceph -s
  cluster:
    id:     84f1a8d1-21b8-487b-8b20-e1c979082a75
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 6m)
    mgr: ceph01(active, since 62s), standbys: ceph03, ceph02
    osd: 0 osds: 0 up, 0 in
 
  task status:
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     

部署osd

OSD规划:

​ 选用filestore作为存储引擎,每个节点上选用/dev/sdb作为数据盘,每个节点上选用/dev/nvme0n1作为日志盘,先确认每个节点的硬盘情况,然后在部署节点web执行:

确认每个节点的硬盘情况

[root@web2 my-cluster]# ceph-deploy disk list ceph01 ceph02 ceph03

清理ceph01,ceph02,ceph03节点上硬盘上现有数据和文件系统

[root@web2 my-cluster]# ceph-deploy disk zap ceph01 /dev/nvme0n1
[root@web2 my-cluster]# ceph-deploy disk zap ceph02 /dev/nvme0n1
[root@web2 my-cluster]# ceph-deploy disk zap ceph03 /dev/nvme0n1
[root@web2 my-cluster]# ceph-deploy disk zap ceph01 /dev/sdb
[root@web2 my-cluster]# ceph-deploy disk zap ceph02 /dev/sdb
[root@web2 my-cluster]# ceph-deploy disk zap ceph03 /dev/sbd

添加OSD

[root@web2 my-cluster]# ceph-deploy osd create --data /dev/sdb --journal /dev/nvme0n1 --filestore ceph01
[root@web2 my-cluster]# ceph-deploy osd create --data /dev/sdb --journal /dev/nvme0n1 --filestore ceph02
[root@web2 my-cluster]# ceph-deploy osd create --data /dev/sdb --journal /dev/nvme0n1 --filestore ceph03

查看osd状态

[root@ceph01 ~]# ceph osd status
+----+--------------------+-------+-------+--------+---------+--------+---------+-----------+
| id |        host        |  used | avail | wr ops | wr data | rd ops | rd data |   state   |
+----+--------------------+-------+-------+--------+---------+--------+---------+-----------+
| 0  | ceph01.localdomain |  107M | 39.8G |    0   |     0   |    0   |     0   | exists,up |
| 1  | ceph02.localdomain |  107M | 39.8G |    0   |     0   |    0   |     0   | exists,up |
| 2  | ceph03.localdomain |  107M | 39.8G |    0   |     0   |    0   |     0   | exists,up |
+----+--------------------+-------+-------+--------+---------+--------+---------+-----------+

使用systemd管理ceph服务

#列出所有的ceph服务
[root@ceph01 ~]# systemctl status ceph\*.service ceph\*.target
#启动所有服务的守护程序
[root@ceph01 ~]# systemctl start ceph.target
#停止所有服务的守护程序
[root@ceph01 ~]# systemctl stop ceph.target
#按服务类型启动所有的守护进程
[root@ceph01 ~]# systemctl start ceph-osd.target
[root@ceph01 ~]# systemctl start ceph-mon.target
[root@ceph01 ~]# systemctl start ceph-mds.target
#按服务类型停止所有的守护进程
[root@ceph01 ~]# systemctl stop ceph-osd.target
[root@ceph01 ~]# systemctl stop ceph-mon.target
[root@ceph01 ~]# systemctl stop ceph-mds.target

存储池管理

列出已经创建的存储池

[root@ceph01 ~]# ceph osd lspools
[root@ceph01 ~]# ceph osd pool ls

创建存储池

[root@ceph01 ~]# ceph osd pool create test 32 32
pool 'test' created

重新命名存储池

[root@ceph01 ~]# ceph osd pool rename test ceph
pool 'test' renamed to 'ceph'
[root@ceph01 ~]# ceph osd pool ls
ceph

查看存储池属性

#查看对象的副本数
[root@ceph01 ~]# ceph osd pool get ceph size
size: 3
#查看pg数
[root@ceph01 ~]# ceph osd pool get ceph pg_num
pg_num: 32
#查看pgp数,一般小于等于pg_num
[root@ceph01 ~]# ceph osd pool get ceph pgp_num
pgp_num: 32

删除存储池

#删除存储池
[root@ceph01 ~]# ceph osd pool rm ceph
Error EPERM: WARNING: this will *PERMANENTLY DESTROY* all data stored in pool ceph.  If you are *ABSOLUTELY CERTAIN* that is what you want, pass the pool name *twice*, followed by --yes-i-really-really-mean-it.
#第一次删除pool会提示错误,需要输入俩遍存储池名字+--yes....-it
[root@ceph01 ~]# ceph osd pool rm ceph ceph  --yes-i-really-really-mean-it
Error EPERM: pool deletion is disabled; you must first set the mon_allow_pool_delete config option to true before you can destroy a pool
#接着会继续报错,需要在配置文件中添加一条配置
[root@web2 my-cluster]# vim ceph.conf 
[mon]
mon allow pool delete = true
#把配置文件推送到其他节点,因为ceph1--3已经存在配置文件,所有需要加 --overwrite-conf,使其覆盖
[root@web2 my-cluster]# ceph-deploy --overwrite-conf config push ceph01 ceph02 ceph03
[root@ceph01 ~]# systemctl restart ceph-mon.target
[root@ceph02 ~]# systemctl restart ceph-mon.target
[root@ceph03 ~]# systemctl restart ceph-mon.target
[root@ceph01 ~]# ceph osd pool rm ceph ceph --yes-i-really-really-mean-it
pool 'ceph' removed
删除成功

状态检测

检查集群的状态

[root@ceph01 ~]# ceph -s
  cluster:
    id:     84f1a8d1-21b8-487b-8b20-e1c979082a75
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 90s)
    mgr: ceph01(active, since 17m), standbys: ceph03, ceph02
    osd: 3 osds: 3 up (since 16m), 3 in (since 24m)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   322 MiB used, 120 GiB / 120 GiB avail
    pgs:     
[root@ceph01 ~]# ceph health
HEALTH_OK
#更详细查看
[root@ceph01 ~]# ceph health detail
HEALTH_OK

检查OSD状态

[root@ceph01 ~]# ceph osd status
+----+--------------------+-------+-------+--------+---------+--------+---------+-----------+
| id |        host        |  used | avail | wr ops | wr data | rd ops | rd data |   state   |
+----+--------------------+-------+-------+--------+---------+--------+---------+-----------+
| 0  | ceph01.localdomain |  107M | 39.8G |    0   |     0   |    0   |     0   | exists,up |
| 1  | ceph02.localdomain |  107M | 39.8G |    0   |     0   |    0   |     0   | exists,up |
| 2  | ceph03.localdomain |  107M | 39.8G |    0   |     0   |    0   |     0   | exists,up |
+----+--------------------+-------+-------+--------+---------+--------+---------+-----------+
[root@ceph01 ~]# ceph osd tree
ID CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF 
-1       0.11696 root default                            
-3       0.03899     host ceph01                         
 0   hdd 0.03899         osd.0       up  1.00000 1.00000 
-5       0.03899     host ceph02                         
 1   hdd 0.03899         osd.1       up  1.00000 1.00000 
-7       0.03899     host ceph03                         
 2   hdd 0.03899         osd.2       up  1.00000 1.00000

检查Mon状态

[root@ceph01 ~]# ceph mon stat
e3: 3 mons at {ceph01=[v2:172.18.127.11:3300/0,v1:172.18.127.11:6789/0],ceph02=[v2:172.18.127.12:3300/0,v1:172.18.127.12:6789/0],ceph03=[v2:172.18.127.13:3300/0,v1:172.18.127.13:6789/0]}, election epoch 40, leader 0 ceph01, quorum 0,1,2 ceph01,ceph02,ceph03
[root@ceph01 ~]# ceph quorum_status
{"election_epoch":40,"quorum":[0,1,2],"quorum_names":["ceph01","ceph02","ceph03"],"quorum_leader_name":"ceph01","quorum_age":185,"monmap":{"epoch":3,"fsid":"84f1a8d1-21b8-487b-8b20-e1c979082a75","modified":"2024-03-13 23:26:55.853908","created":"2024-03-13 23:22:18.695928","min_mon_release":14,"min_mon_release_name":"nautilus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus"],"optional":[]},"mons":[{"rank":0,"name":"ceph01","public_addrs":{"addrvec":[{"type":"v2","addr":"172.18.127.11:3300","nonce":0},{"type":"v1","addr":"172.18.127.11:6789","nonce":0}]},"addr":"172.18.127.11:6789/0","public_addr":"172.18.127.11:6789/0"},{"rank":1,"name":"ceph02","public_addrs":{"addrvec":[{"type":"v2","addr":"172.18.127.12:3300","nonce":0},{"type":"v1","addr":"172.18.127.12:6789","nonce":0}]},"addr":"172.18.127.12:6789/0","public_addr":"172.18.127.12:6789/0"},{"rank":2,"name":"ceph03","public_addrs":{"addrvec":[{"type":"v2","addr":"172.18.127.13:3300","nonce":0},{"type":"v1","addr":"172.18.127.13:6789","nonce":0}]},"addr":"172.18.127.13:6789/0","public_addr":"172.18.127.13:6789/0"}]}}

为存储池指定ceph的应用类型

ceph osd pool application enable ceph <app>
说明:
   app的可选值是cephfs,rbd,rgw,如果不显示指定类型,集群将显示HEALTH_WARN状态
 使用ceph health detail命令查看
[root@ceph01 ~]# ceph osd pool create ceph 16 16
pool 'ceph' created
[root@ceph01 ~]# ceph osd pool application enable ceph cephfs
enabled application 'cephfs' on pool 'ceph'

存储池配额管理

#根据对象数配额
[root@ceph01 ~]# ceph osd pool set-quota ceph max_objects 10000
set-quota max_objects = 10000 for pool ceph
#根据容量配额(单位:字节)
[root@ceph01 ~]# ceph osd pool set-quota ceph max_bytes 1048576000
set-quota max_bytes = 1048576000 for pool ceph

存储池对象访问

上传对象到存储池

[root@ceph01 ~]# echo "test111">test.txt
[root@ceph01 ~]# rados -p ceph put test ./test.txt

列出存储池中的对象

[root@ceph01 ~]# rados -p ceph ls
test

从存储池下载对象

[root@ceph02 ~]# rados -p ceph get test test.txt.tmp
[root@ceph02 ~]# ls
anaconda-ks.cfg  test.txt.tmp

删除存储池的对象

[root@ceph02 ~]# rados -p ceph rm test
[root@ceph02 ~]# rados -p ceph ls

配置ceph fs

安装并启用mds

[root@web2 my-cluster]# ceph-deploy mds create ceph01 ceph02 ceph03
[root@ceph01 ~]#  systemctl status ceph-mds*
● ceph-mds@ceph01.service - Ceph metadata server daemon
   Loaded: loaded (/usr/lib/systemd/system/ceph-mds@.service; enabled; vendor preset: disabled)
   Active: active (running) since 四 2024-03-14 00:26:37 CST; 20s ago
 Main PID: 32072 (ceph-mds)
   CGroup: /system.slice/system-ceph\x2dmds.slice/ceph-mds@ceph01.service
           └─32072 /usr/bin/ceph-mds -f --cluster ceph --id ceph01 --setuser ceph --setgroup ceph

314 00:26:37 ceph01.localdomain systemd[1]: Started Ceph metadata server daemon.
314 00:26:37 ceph01.localdomain systemd[1]: [/usr/lib/systemd/system/ceph-mds@.service:15] Unkno...ce'
3月 14 00:26:37 ceph01.localdomain systemd[1]: [/usr/lib/systemd/system/ceph-mds@.service:16] Unkno...ce'
314 00:26:37 ceph01.localdomain systemd[1]: [/usr/lib/systemd/system/ceph-mds@.service:19] Unkno...ce'
3月 14 00:26:37 ceph01.localdomain systemd[1]: [/usr/lib/systemd/system/ceph-mds@.service:21] Unkno...ce'
314 00:26:37 ceph01.localdomain systemd[1]: [/usr/lib/systemd/system/ceph-mds@.service:22] Unkno...ce'
314 00:26:37 ceph01.localdomain ceph-mds[32072]: starting mds.ceph01 at
Hint: Some lines were ellipsized, use -l to show in full.

存储池创建

#新建一个名为data1的存储池,目的是存储数据
[root@ceph02 ~]# ceph osd pool create data1 16
pool 'data1' created
#新建一个名为metadata1的存储池,目的是存储元数据
[root@ceph02 ~]# ceph osd pool create metadata1 16
pool 'metadata1' created
#创建名为myfs1的cephfs,数据保存到data1中,元数据保存到metadata1中
[root@ceph02 ~]# ceph fs new myfs1 metadata1 data1
new fs with metadata pool 4 and data pool 3
[root@ceph02 ~]# ceph df
RAW STORAGE:
    CLASS     SIZE        AVAIL       USED        RAW USED     %RAW USED 
    hdd       120 GiB     120 GiB     323 MiB      323 MiB          0.26 
    TOTAL     120 GiB     120 GiB     323 MiB      323 MiB          0.26 
 
POOLS:
    POOL          ID     PGS     STORED      OBJECTS     USED        %USED     MAX AVAIL 
    data1          3      16         0 B           0         0 B         0        38 GiB 
    metadata1      4      16     2.2 KiB          22     2.2 KiB   40        38 GiB
# 查看创建文件系统
[root@ceph02 ~]# ceph fs ls
name: myfs1, metadata pool: metadata1, data pools: [data1 ]

挂载ceph fs

#查看连接ceph的用户名和密码
[root@ceph02 ~]# cat /etc/ceph/ceph.client.admin.keyring 
[client.admin]
	key = AQCqxPFl8v/aORAArTo47oVAzrLSMkGEUXqJDQ==
	caps mds = "allow *"
	caps mgr = "allow *"
	caps mon = "allow *"
	caps osd = "allow *"
[root@web2 my-cluster]# mkdir /ceph
[root@web2 my-cluster]# vim /etc/fstab
172.18.127.11:6789,172.18.127.12:6789,172.18.127.13:6789:/ /ceph  ceph   _netdev,name=admin,secret=AQCqxPFl8v/aORAArTo47oVAzrLSMkGEUXqJDQ==  0  0
[root@web2 my-cluster]# mount -a
[root@web2 my-cluster]# df -Th
文件系统                                                   类型      容量  已用  可用 已用% 挂载点
/dev/mapper/centos-root                                    xfs        36G  2.2G   33G    7% /
devtmpfs                                                   devtmpfs  475M     0  475M    0% /dev
tmpfs                                                      tmpfs     487M     0  487M    0% /dev/shm
tmpfs                                                      tmpfs     487M  7.7M  479M    2% /run
tmpfs                                                      tmpfs     487M     0  487M    0% /sys/fs/cgroup
/dev/sda1                                                  xfs      1014M  146M  869M   15% /boot
tmpfs                                                      tmpfs      98M     0   98M    0% /run/user/0
/dev/sr0                                                   iso9660   4.3G  4.3G     0  100% /mnt
172.18.127.11:6789,172.18.127.12:6789,172.18.127.13:6789:/ ceph       38G     0   38G    0% /ceph

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值