Ceph初试

http://docs.ceph.org.cn/start

192.168.86.11   dbahost1 (admin-node)
192.168.86.16   dbahost6 (mon.node1、osd.2、mds.node1、rgw)
192.168.86.17   dbahost7 (mon.node2、osd.0)
192.168.86.18   dbahost8 (mon.node3、osd.1)

#安装 CEPH 部署工具ceph-deploy
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
echo deb http://download.eu.ceph.com/debian-luminous/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
sudo apt-get update && sudo apt-get install ceph-deploy


#初始化环境
sudo useradd -d /home/cephadm -m cephadm
sudo passwd cephadm
---密码cephadm

echo "cephadm ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephadm
sudo chmod 0440 /etc/sudoers.d/cephadm

在192.168.86.11上,生成sshkey-gen:
ssh-copy-id cephadm@192.168.86.16 
ssh-copy-id cephadm@192.168.86.17 
ssh-copy-id cephadm@192.168.86.18


192.168.86.11:

vi
/etc/hosts

192.168.86.11   dbahost1
192.168.86.16   dbahost6
192.168.86.17   dbahost7
192.168.86.18   dbahost8

~/.ssh/config

Host dbahost6
   Hostname dbahost6
   User cephadm
Host dbahost7
   Hostname dbahost7
   User cephadm
Host dbahost8
   Hostname dbahost8
   User cephadm
    

ssh dbahost1(在控制节点执行ceph-deploy):
部署初始monitor节点:
cd my-cluster
ceph-deploy new dbahost6

开始部署两个osd数据节点:
ceph-deploy install dbahost1 dbahost6 dbahost7 dbahost8

配置初始监控:
ceph-deploy mon create-initial

配置osd数据节点:
ssh dbahost7
sudo mkdir /var/local/osd0
sudo chmod a+w /var/local/osd0
exit

ssh dbahost8
sudo mkdir /var/local/osd1
sudo chmod a+w /var/local/osd1
exit

ssh dbahost1:

sudo vim ceph.conf
osd_max_object_name_len = 256
osd_max_object_namespace_len = 64

ceph-deploy osd prepare dbahost7:/var/local/osd0 dbahost8:/var/local/osd1
ceph-deploy osd activate dbahost7:/var/local/osd0 dbahost8:/var/local/osd1


ssh dbahost1:
ceph-deploy --overwrite-conf admin dbahost1 dbahost6 dbahost7 dbahost8
sudo chmod +r /etc/ceph/ceph.client.admin.keyring

开始部署扩容第3个osd数据节点:
ssh dbahost6
sudo mkdir /var/local/osd2
sudo chmod a+w /var/local/osd2


ssh dbahost1
ceph-deploy --overwrite-conf osd prepare dbahost6:/var/local/osd2
ceph-deploy osd activate dbahost6:/var/local/osd2


部署CephFS的元数据服务器:
ceph-deploy mds create dbahost6

部署ceph对象网关服务器radosgw:
ceph-deploy rgw create dbahost6

添加另外2个MONITORS:
ceph-deploy mon add dbahost7
ceph-deploy mon add dbahost8

sudo chmod -R a+r /etc/ceph
ceph quorum_status --format json-pretty


ceph -s
ceph -w
ceph osd tree
ceph osd lspools 
ceph fs ls

rbd ls
rbd showmapped
rbd map foo
rbd unmap foo


#ceph文件系统(就是一个NAS,网络文件系统):
ceph osd pool create cephfs_data 50
ceph osd pool create cephfs_metadata 50
ceph fs new cephfs cephfs_metadata cephfs_data

在16上:
sudo mkdir -p /mnt/mycephfs1
sudo mount -t ceph 192.168.86.17:6789:/ /mnt/mycephfs1 -o name=admin,secret=AQDJGhRZAvkfMRAA2YzxketKJtF90jPh6InDDg==

在18上:
sudo mkdir -p /mnt/mycephfs2
sudo mount -t ceph 192.168.86.16:6789:/ /mnt/mycephfs2 -o name=admin,secret=AQDJGhRZAvkfMRAA2YzxketKJtF90jPh6InDDg==

结果,在16的/mnt/mycephfs1路径下的读写文件,在18上的/mnt/mycephfs2的路径下是能够看到和读写的,也就是二者共享了同一个网络卷。

#ceph块设备:
创建块设备的镜像:
rbd create foo --size 4096 -m 192.168.86.16 -k /etc/ceph/ceph.client.admin.keyring
rbd info foo
rbd feature disable foo exclusive-lock, object-map, fast-diff, deep-flatten

将镜像映射为块设备:
cephadm@dbahost1:/etc/ceph$ sudo rbd map foo --name client.admin -m 192.168.86.16 -k /etc/ceph/ceph.client.admin.keyring
/dev/rbd0
cephadm@dbahost1:/etc/ceph$ 

cephadm@dbahost1:/etc/ceph$ ls -l /dev/rbd/rbd/foo
lrwxrwxrwx 1 root root 10 May 12 10:10 /dev/rbd/rbd/foo -> ../../rbd0
cephadm@dbahost1:/etc/ceph$ ls -l /dev/rbd0
brw-rw---- 1 root disk 251, 0 May 12 10:10 /dev/rbd0
cephadm@dbahost1:/etc/ceph$ 

在块设备上创建文件系统(ext4):
cephadm@dbahost1:/etc/ceph$ sudo mkfs.ext4 -m0 /dev/rbd/rbd/foo
mke2fs 1.42.13 (17-May-2015)
Discarding device blocks: done                            
Creating filesystem with 1048576 4k blocks and 262144 inodes
Filesystem UUID: 5f2b8485-4477-4e6e-a049-88dd5655f04c
Superblock backups stored on blocks: 
        32768, 98304, 163840, 229376, 294912, 819200, 884736

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done 

cephadm@dbahost1:/etc/ceph$ 

挂载此文件系统:
sudo mkdir /mnt/ceph-block-device
sudo mount /dev/rbd/rbd/foo /mnt/ceph-block-device
cd /mnt/ceph-block-device

若其它节点再次挂载这个设备, 那么其它节点的卷是overlay的卷,可以看到这个卷原有的文件,新生成的文件互相都看不到。


#ceph对象存储
从管理节点的工作目录,在 client-node 上安装 Ceph 对象网关软件包:
ssh dbahost1
cd my-cluster
ceph-deploy install --rgw dbahost1
ceph-deploy rgw create dbahost1

可以从此url访问:
http://192.168.86.11:7480

systemctl restart ceph-radosgw.service


为 S3 访问创建 RADOSGW 用户:
cephadm@dbahost1:~/my-cluster$  sudo radosgw-admin user create --uid="testuser" --display-name="First User"
{
    "user_id": "testuser",
    "display_name": "First User",
    "email": "",
    "suspended": 0,
    "max_buckets": 1000,
    "auid": 0,
    "subusers": [],
    "keys": [
        {
            "user": "testuser",
            "access_key": "RBT0BVXHZ4OOFYSFKHG1",
            "secret_key": "
            "
        }
    ],
    "swift_keys": [],
    "caps": [],
    "op_mask": "read, write, delete",
    "default_placement": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "max_size_kb": -1,
        "max_objects": -1
    },
    "user_quota": {
        "enabled": false,
        "max_size_kb": -1,
        "max_objects": -1
    },
    "temp_url_keys": []
}


新建一个 SWIFT 子用户:
cephadm@dbahost1:~/my-cluster$ sudo radosgw-admin subuser create --uid=testuser --subuser=testuser:swift --access=full
{
    "user_id": "testuser",
    "display_name": "First User",
    "email": "",
    "suspended": 0,
    "max_buckets": 1000,
    "auid": 0,
    "subusers": [
        {
            "id": "testuser:swift",
            "permissions": "full-control"
        }
    ],
    "keys": [
        {
            "user": "testuser",
            "access_key": "RBT0BVXHZ4OOFYSFKHG1",
            "secret_key": "XXe5Ex1AzbkErBcRWC9IeaiZXzlDuLOlvpefCNu0"
        }
    ],
    "swift_keys": [
        {
            "user": "testuser:swift",
            "secret_key": "dQ2mMgQkqmwAyAZjjcgquoGN9I27OXN5hkk9bHcd"
        }
    ],
    "caps": [],
    "op_mask": "read, write, delete",
    "default_placement": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "max_size_kb": -1,
        "max_objects": -1
    },
    "user_quota": {
        "enabled": false,
        "max_size_kb": -1,
        "max_objects": -1
    },
    "temp_url_keys": []
}

cephadm@dbahost1:~/my-cluster$ 

新建 secret key:
cephadm@dbahost1:~/my-cluster$ sudo radosgw-admin key create --subuser=testuser:swift --key-type=swift --gen-secret
{
    "user_id": "testuser",
    "display_name": "First User",
    "email": "",
    "suspended": 0,
    "max_buckets": 1000,
    "auid": 0,
    "subusers": [
        {
            "id": "testuser:swift",
            "permissions": "full-control"
        }
    ],
    "keys": [
        {
            "user": "testuser",
            "access_key": "RBT0BVXHZ4OOFYSFKHG1",
            "secret_key": "XXe5Ex1AzbkErBcRWC9IeaiZXzlDuLOlvpefCNu0"
        }
    ],
    "swift_keys": [
        {
            "user": "testuser:swift",
            "secret_key": "w2TqR2vWNVCFUBapIOhAstxdoCW1lKkVt9Hyi7GE"
        }
    ],
    "caps": [],
    "op_mask": "read, write, delete",
    "default_placement": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "max_size_kb": -1,
        "max_objects": -1
    },
    "user_quota": {
        "enabled": false,
        "max_size_kb": -1,
        "max_objects": -1
    },
    "temp_url_keys": []
}

cephadm@dbahost1:~/my-cluster$ 


#测试s3方式的访问:
s3test.py :
import boto
import boto.s3.connection

access_key = 'RBT0BVXHZ4OOFYSFKHG1'
secret_key = 'XXe5Ex1AzbkErBcRWC9IeaiZXzlDuLOlvpefCNu0'
conn = boto.connect_s3(
        aws_access_key_id = access_key,
        aws_secret_access_key = secret_key,
        host = '192.168.86.11', port = 7480 ,
        is_secure=False, calling_format = boto.s3.connection.OrdinaryCallingFormat(),
        )

bucket = conn.create_bucket('my-new-bucket')
    for bucket in conn.get_all_buckets():
            print "{name}".format(
                    name = bucket.name,
                    created = bucket.creation_date,
 )

@测试 SWIFT 访问

swift -A http://192.168.86.11:7480/auth/1.0 -U testuser:swift -K 'w2TqR2vWNVCFUBapIOhAstxdoCW1lKkVt9Hyi7GE' list


cephadm@dbahost1:~$ swift -A http://192.168.86.11:7480/auth/1.0 -U testuser:swift -K 'w2TqR2vWNVCFUBapIOhAstxdoCW1lKkVt9Hyi7GE' list
my-new-bucket
cephadm@dbahost1:~$ 


#结合docker的测试:

1、如果使用nfs即CephFS的文件系统,那么在不同host的不同容器内mount的卷, 其内容都是共享的,可以并发读写的。

192.168.86.18:6789:/  86302720  48271360  38031360  56% /mnt/mycephfs11
root@dbahost1:/mnt/mycephfs11# docker run -it --rm -v /mnt/mycephfs11:/mnt/mycephfs11 ubuntu /bin/bash 

root@c0c8aa7425a2:/mnt/mycephfs11# touch container_11
root@c0c8aa7425a2:/mnt/mycephfs11# ls -l 
total 1
-rw-r--r-- 1 root root  0 May 12 07:17 container_11
-rw-r--r-- 1 root root  0 May 12 07:16 dbahost1
-rw-r--r-- 1 root root 15 May 11 10:33 dbahost6
-rw-r--r-- 1 root root  0 May 12 07:17 dbahost7
root@c0c8aa7425a2:/mnt/mycephfs11#
root@c0c8aa7425a2:/mnt/mycephfs11# ls -l 
total 1
-rw-r--r-- 1 root root  0 May 12 07:17 container_11
-rw-r--r-- 1 root root  0 May 12 07:18 container_17
-rw-r--r-- 1 root root  0 May 12 07:16 dbahost1
-rw-r--r-- 1 root root 15 May 11 10:33 dbahost6
-rw-r--r-- 1 root root  0 May 12 07:17 dbahost7
root@c0c8aa7425a2:/mnt/mycephfs11# 

192.168.86.18:6789:/  86302720 48275456  38027264  56% /mnt/mycephfs3
root@dbahost7:/mnt/mycephfs3# docker run -it --rm -v /mnt/mycephfs3:/mnt/mycephfs17 ubuntu /bin/bash   

root@9d60c1af183c:/# df -k 
Filesystem           1K-blocks     Used Available Use% Mounted on
none                  28768380 14895136  12388856  55% /
tmpfs                   499152        0    499152   0% /dev
tmpfs                   499152        0    499152   0% /sys/fs/cgroup
192.168.86.18:6789:/  86302720 48275456  38027264  56% /mnt/mycephfs17
/dev/sda1             28768380 14895136  12388856  55% /etc/hosts
shm                      65536        0     65536   0% /dev/shm
tmpfs                   499152        0    499152   0% /sys/firmware
root@9d60c1af183c:/# cd  /mnt/mycephfs17 
root@9d60c1af183c:/mnt/mycephfs17# ls
container_11  dbahost1  dbahost6  dbahost7
root@9d60c1af183c:/mnt/mycephfs17# touch container_17
root@9d60c1af183c:/mnt/mycephfs17# ls -l 
total 1
-rw-r--r-- 1 root root  0 May 12 07:17 container_11
-rw-r--r-- 1 root root  0 May 12 07:18 container_17
-rw-r--r-- 1 root root  0 May 12 07:16 dbahost1
-rw-r--r-- 1 root root 15 May 11 10:33 dbahost6
-rw-r--r-- 1 root root  0 May 12 07:17 dbahost7
root@9d60c1af183c:/mnt/mycephfs17# 

2、如果使用ceph的块文件,那么在不同host的不同容器内mount的卷, 其内容不是共享的,是overlay的。
但是也可以用在容器共享上面, 因为后面一个容器的mount的卷里面的内容会包含该rbd设备的最新的内容。

http://ceph-users.ceph.narkive.com/LNCcfd0m/ceph-same-rbd-on-multiple-client

Example
create rbd image named foo
map foo to /dev/rbd0 on server A, mount /dev/rbd0 to /mnt
map foo to /dev/rbd0 on server B, mount /dev/rbd0 to /mnt


root@dbahost7:~# ls -l /dev/rbd/rbd/foo
lrwxrwxrwx 1 root root 10 May 12 10:20 /dev/rbd/rbd/foo -> ../../rbd0
root@dbahost7:~# 
root@dbahost7:~# 
root@dbahost7:~# mount /dev/rbd/rbd/foo /mnt/ceph-block-device-17
root@dbahost7:~# 
root@dbahost7:~# docker run -it --rm -v /mnt/ceph-block-device-17:/mnt/block-device17 ubuntu /bin/bash
root@e8c518538ff5:/# 
root@e8c518538ff5:/# df -k 
Filesystem     1K-blocks     Used Available Use% Mounted on
none            28768380 14896260  12387732  55% /
tmpfs             499152        0    499152   0% /dev
tmpfs             499152        0    499152   0% /sys/fs/cgroup
/dev/rbd0        3997376     8188   3972804   1% /mnt/block-device17
/dev/sda1       28768380 14896260  12387732  55% /etc/hosts
shm                65536        0     65536   0% /dev/shm
tmpfs             499152        0    499152   0% /sys/firmware
root@e8c518538ff5:/# cd /mnt/block-device17 
root@e8c518538ff5:/mnt/block-device17# echo "this is 17" > file17
root@e8c518538ff5:/mnt/block-device17# more file17
this is 17
root@e8c518538ff5:/mnt/block-device17# 


root@dbahost1:/mnt# ls -l /dev/rbd/rbd/foo
ls: cannot access '/dev/rbd/rbd/foo': No such file or directory
root@dbahost1:/mnt# 
root@dbahost1:/mnt# su - cephadm
cephadm@dbahost1:~$ sudo rbd map foo --name client.admin -m 192.168.86.16 -k /etc/ceph/ceph.client.admin.keyring
/dev/rbd0
cephadm@dbahost1:~$ ls -l /dev/rbd/rbd/foo 
lrwxrwxrwx 1 root root 10 May 12 15:58 /dev/rbd/rbd/foo -> ../../rbd0
cephadm@dbahost1:~$ sudo mount /dev/rbd/rbd/foo /mnt/ceph-block-device-11
cephadm@dbahost1:~$ df -k |grep device
/dev/rbd0              3997376      8188   3972804   1% /mnt/ceph-block-device-11
cephadm@dbahost1:~$ exit
logout
root@dbahost1:/mnt# docker run -it --rm -v /mnt/ceph-block-device-11:/mnt/block-device11 ubuntu /bin/bash
root@7f1d457f5ebd:/# df -k 
Filesystem     1K-blocks     Used Available Use% Mounted on
none            28768380 15316948  11967044  57% /
tmpfs            1015248        0   1015248   0% /dev
tmpfs            1015248        0   1015248   0% /sys/fs/cgroup
/dev/rbd0        3997376     8188   3972804   1% /mnt/block-device11
/dev/sda1       28768380 15316948  11967044  57% /etc/hosts
shm                65536        0     65536   0% /dev/shm
tmpfs            1015248        0   1015248   0% /sys/firmware
root@7f1d457f5ebd:/# cd /mnt/block-device11/
root@7f1d457f5ebd:/mnt/block-device11# ls
file17  lost+found
root@7f1d457f5ebd:/mnt/block-device11# more file17 
this is 17
root@7f1d457f5ebd:/mnt/block-device11# echo "this is 11" > file11
root@7f1d457f5ebd:/mnt/block-device11# ls -l
total 24
-rw-r--r-- 1 root root    11 May 12 07:59 file11
-rw-r--r-- 1 root root    11 May 12 07:57 file17
drwx------ 2 root root 16384 May 12 02:12 lost+found
root@7f1d457f5ebd:/mnt/block-device11# more file11
this is 11
root@7f1d457f5ebd:/mnt/block-device11# 


root@e8c518538ff5:/mnt/block-device17# ls -tlr 
total 20
drwx------ 2 root root 16384 May 12 02:12 lost+found
-rw-r--r-- 1 root root    11 May 12 07:57 file17
root@e8c518538ff5:/mnt/block-device17# 

转载于:https://my.oschina.net/rocky0202/blog/898319

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值