1 初始化
sed -i 's/enforcing/disabled/g' /etc/selinux/config
setenforce 0
sed -i 's/#UseDNS yes/UseDNS no/g' /etc/ssh/sshd_config
systemctl restart sshd
grep DNS /etc/ssh/sshd_config
grep SELINUX=disabled /etc/selinux/config
systemctl disable firewalld NetworkManager
systemctl stop firewalld NetworkManager
2 配置 repo
cat > /etc/yum.repos.d/epel.repo << EOF
[epel]
name=epel
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/x86_64/
enabled=1
gpgcheck=0
EOF
cat > /etc/yum.repos.d/ceph.repo << EOF
[ceph]
name=ceph
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/x86_64/
gpgcheck=0
[ceph-noarch]
name=ceph-noarch
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/noarch/
gpgcheck=0
EOF
3 pre install
hostnamectl set-hostname ceph-mon01
echo 192.168.3.16 ceph-mon01 >> /etc/hosts
yum -y install ceph-deploy-1.5.39
4 创建块设备
dd if=/dev/zero of=/ceph-mon01.img bs=1G count=5
losetup /dev/loop0 /ceph-mon01.img
losetup -l
4 install
mkdir /etc/ceph
cd /etc/ceph
yum -y install ceph-12.2.13-0.el7 ceph-radosgw-12.2.13-0.el7
ceph-deploy install --no-adjust-repos ceph-mon01
ceph-deploy new --public-network 192.168.3.0/24 ceph-mon01
echo "osd crush chooseleaf type = 0" >> ceph.conf
echo "osd pool default size = 1" >> ceph.conf
echo "osd journal size = 100" >> ceph.conf
ceph-deploy mon create ceph-mon01
ceph-deploy gatherkeys ceph-mon01
ceph-deploy disk zap ceph-mon01:/dev/loop0
ceph-deploy osd create ceph-mon01:/dev/loop0
ceph-deploy mgr create ceph-mon01
ceph-deploy rgw create ceph-mon01
5 测试对象存储
创建用户
radosgw-admin user create --uid="user1" --display-name="First User"
{
"user_id": "user1",
"display_name": "First User",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"auid": 0,
"subusers": [],
"keys": [
{
"user": "user1",
"access_key": "XBFW5DI9682ZQBTZBPT5",
"secret_key": "94QloxgYrWCvqJ52NGCCuXjqEBkavgUAIA01MoZF"
}
],
"swift_keys": [],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw"
}
yum -y install s3cmd
s3cmd --configure
[root@ceph-mon01 ~]# s3cmd --configure
Enter new values or accept defaults in brackets with Enter.
Refer to user manual for detailed description of all options.
Access key and Secret key are your identifiers for Amazon S3. Leave them empty for using the env variables.
Access Key: XBFW5DI9682ZQBTZBPT5
Secret Key: 94QloxgYrWCvqJ52NGCCuXjqEBkavgUAIA01MoZF
Default Region [US]:
Use "s3.amazonaws.com" for S3 Endpoint and not modify it to the target Amazon S3.
S3 Endpoint [s3.amazonaws.com]: 192.168.3.16:7480
Use "%(bucket)s.s3.amazonaws.com" to the target Amazon S3. "%(bucket)s" and "%(location)s" vars can be used
if the target S3 system supports dns based buckets.
DNS-style bucket+hostname:port template for accessing a bucket [%(bucket)s.s3.amazonaws.com]: n
Encryption password is used to protect your files from reading
by unauthorized persons while in transfer to S3
Encryption password:
Path to GPG program [/usr/bin/gpg]:
When using secure HTTPS protocol all communication with Amazon S3
servers is protected from 3rd party eavesdropping. This method is
slower than plain HTTP, and can only be proxied with Python 2.7 or newer
Use HTTPS protocol [Yes]: no
On some networks all internet access must go through a HTTP proxy.
Try setting it here if you can't connect to S3 directly
HTTP Proxy server name:
New settings:
Access Key: XBFW5DI9682ZQBTZBPT5
Secret Key: 94QloxgYrWCvqJ52NGCCuXjqEBkavgUAIA01MoZF
Default Region: US
S3 Endpoint: 192.168.3.16:7480
DNS-style bucket+hostname:port template for accessing a bucket: n
Encryption password:
Path to GPG program: /usr/bin/gpg
Use HTTPS protocol: False
HTTP Proxy server name:
HTTP Proxy server port: 0
Test access with supplied credentials? [Y/n] y
Please wait, attempting to list all buckets...
Success. Your access key and secret key worked fine :-)
Now verifying that encryption works...
Not configured. Never mind.
Save settings? [y/N] y
Configuration saved to '/root/.s3cfg'
[root@ceph-mon01 ~]#
[root@ceph-mon01 ~]# s3cmd mb s3://bucket-1
Bucket 's3://bucket-1/' created
[root@ceph-mon01 ~]# s3cmd ls
2021-04-25 15:36 s3://bucket-1
6 测试 ceph fs
ceph osd pool create cephfs_data 64
ceph osd pool create cephfs_metadata 64
ceph fs new my-fs01 cephfs_metadata cephfs_data
ceph fs ls
name: my-fs01, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
mount -t ceph 192.168.3.16:6379:/ /mnt/ -o name=admin,secretfile=/root/admin.secret
[root@ceph-mon01 ~]# ceph mds stat
my-fs01-0/0/1 up
[root@ceph-mon01 ~]# ceph -s
cluster:
id: ec68ac2d-0bd1-4454-b584-1a352f4f8e80
health: HEALTH_OK
services:
mon: 1 daemons, quorum ceph-mon01
mgr: ceph-mon01(active)
mds: my-fs01-0/0/1 up
osd: 1 osds: 1 up, 1 in
rgw: 1 daemon active
data:
pools: 7 pools, 168 pgs
objects: 193 objects, 1.87KiB
usage: 1.00GiB used, 3.90GiB / 4.90GiB avail
pgs: 168 active+clean
redo
ceph-deploy purge ceph-mon01
ceph-deploy purgedata ceph-mon01
ceph-deploy forgetkeys
rm -rf /etc/ceph/*
rm -rf /var/lib/ceph/*/*
rm -rf /var/log/ceph/*
rm -rf /var/run/ceph/*
umount /dev/loop1p1
losetup -d /dev/loop1
losetup -d /dev/loop0
清除设备上的分区表
dd if=/dev/zero of=/dev/loop1 bs=512K count=1
待整理
一个 rbd 给多个系统使用
50.255.0.24
rbd create --size 10240MB vms/test-img --id cinder
rbd map vms/test-img
rbd map vms/test-img --id cinder
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable vms/test-img object-map fast-diff deep-flatten".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
rbd feature disable vms/test-img object-map fast-diff deep-flatten --id cinder
rbd map vms/test-img --id cinder
/dev/rbd0
mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=16, agsize=163840 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=2621440, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
mount /dev/rbd0 /opt/
[root@compute01-yan ~]# df -lh
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 100G 3.5G 97G 4% /
devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs 3.9G 8.8M 3.9G 1% /run
tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
tmpfs 783M 0 783M 0% /run/user/0
/dev/rbd0 10G 33M 10G 1% /opt
[root@compute01-yan ~]#
50.255.0.25
[root@compute02-yan ~]# rbd map vms/test-img --id cinder
/dev/rbd0
[root@compute02-yan ~]# mount /dev/rbd0 /opt/
[root@compute02-yan ~]# df -lh
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 100G 2.0G 99G 2% /
devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs 3.9G 8.7M 3.9G 1% /run
tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
tmpfs 783M 0 783M 0% /run/user/0
/dev/rbd0 10G 33M 10G 1% /opt