一、集群
1、启动一个ceph进程
启动mon进程
[root@ceph-adm ~]# service ceph start mon.ceph-mon1
启动msd进程
[root@ceph-adm ~]# service ceph start mds.ceph-mds1
启动osd进程
[root@ceph-adm ~]# service ceph start osd.0
2、查看机器的监控状态
[root@ceph-adm ~]# ceph health
HEALTH_OK
3、查看ceph的实时运行状态
[root@ceph-adm ~]# ceph -w
cluster fdbb34ad-765a-420b-89e8-443aba4254dd
health HEALTH_WARN
clock skew detected on mon.ceph-mon3
84 pgs stale
84 pgs stuck stale
5 requests are blocked > 32 sec
mds cluster is degraded
Monitor clock skew detected
monmap e1: 3 mons at {ceph-mon1=192.168.203.72:6789/0,ceph-mon2=192.168.203.73:6789/0,ceph-mon3=192.168.203.74:6789/0}
election epoch 22, quorum 0,1,2 ceph-mon1,ceph-mon2,ceph-mon3
mdsmap e16: 1/1/1 up {0=ceph-mon2=up:replay}, 2 up:standby
osdmap e56: 2 osds: 2 up, 2 in
pgmap v137: 596 pgs, 7 pools, 10442 bytes data, 63 objects
77032 kB used, 30622 MB / 30697 MB avail
512 active+clean
84 stale+active+clean
2016-08-28 20:42:51.043538 osd.0 [WRN] slow request 120.842103 seconds old, received at 2016-08-28 20:40:50.201254: osd_op(mds.0.3:4 400.00000000 [read 0~0] 2.64e96f8f ack+read+known_if_redirected e49) currently no flag points reached
4、检查信息状态信息
[root@ceph-adm ~]# ceph -s
cluster fdbb34ad-765a-420b-89e8-443aba4254dd
health HEALTH_WARN
clock skew detected on mon.ceph-mon3
84 pgs stale
84 pgs stuck stale
5 requests are blocked > 32 sec
mds cluster is degraded
Monitor clock skew detected
monmap e1: 3 mons at {ceph-mon1=192.168.203.72:6789/0,ceph-mon2=192.168.203.73:6789/0,ceph-mon3=192.168.203.74:6789/0}
election epoch 22, quorum 0,1,2 ceph-mon1,ceph-mon2,ceph-mon3
mdsmap e16: 1/1/1 up {0=ceph-mon2=up:replay}, 2 up:standby
osdmap e56: 2 osds: 2 up, 2 in
pgmap v139: 596 pgs, 7 pools, 10442 bytes data, 63 objects
77636 kB used, 30622 MB / 30697 MB avail
512 active+clean
84 stale+active+clean
5、查看ceph存储空间
[root@ceph-adm ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
30697M 30622M 77636k 0.25
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 0 0 15310M 0
cephfs_data 1 0 0 15310M 0
cephfs_metadata 2 9594 0 15310M 20
6、删除一个节点的所有的ceph数据包
[root@ceph-adm ~]# ceph-deploy purge ceph-mon1
[root@ceph-adm ~]# ceph-deploy purgedata cdph-mon1
7、为ceph创建一个admin用户并为admin用户创建一个密钥,把密钥保存到/etc/ceph目录下:
[root@ceph-adm ~]# ceph auth get-or-create client.admin mds ‘allow’ osd ‘allow *’ mon ‘allow *’ > /etc/ceph/ceph.client.admin.keyring
或
[root@ceph-adm ~]# ceph auth get-or-create client.admin mds ‘allow’ osd ‘allow *’ mon ‘allow *’ -o /etc/ceph/ceph.client.admin.keyring
8、为osd.0创建一个用户并创建一个key
[root@ceph-adm ~]# ceph auth get-or-create osd.0 mon ‘allow rwx’ osd ‘allow *’ -o /var/lib/ceph/osd/ceph-0/keyring
9、为mds.ceph-mon1创建一个用户并创建一个key
[root@ceph-adm ~]# ceph auth get-or-create mds.node1 mon ‘allow rwx’ osd ‘allow *’ mds ‘allow *’ -o /var/lib/ceph/mds/ceph-node1/keyring
10、查看ceph集群中的认证用户及相关的key
[root@ceph-adm ~]# ceph auth list
11、删除集群中的一个认证用户
[root@ceph-adm ~]# ceph auth del osd.0
12、查看集群健康状态细节
[root@ceph-adm ~]# ceph health detail
HEALTH_WARN clock skew detected on mon.ceph-mon3; 84 pgs stale; 84 pgs stuck stale; 5 requests are blocked > 32 sec; 2 osds have slow requests; mds cluster is degraded; Monitor clock skew detected
pg 0.22 is stuck stale for 13269.231220, current state stale+active+clean, last acting [1,0]
pg 0.21 is stuck stale for 13269.231219, current state stale+active+clean, last acting [1,0]
pg 0.20 is stuck stale for 13269.231222, current state stale+active+clean, last acting [1,0]
pg 0.1f is stuck stale for 13269.231224, current state stale+active+clean, last acting [0,1]
13、查看ceph log日志所在的目录
[root@ceph-adm ~]# ceph-conf –name mon.ceph-adm –show-config-value log_file
/var/log/ceph/ceph-mon.ceph-adm.log
二、mon
1、查看mon的状态信息
[root@ceph-mon1 ~]# ceph mon stat
e1: 3 mons at {ceph-mon1=192.168.203.72:6789/0,ceph-mon2=192.168.203.73:6789/0,ceph-mon3=192.168.203.74:6789/0}, election epoch 22, quorum 0,1,2 ceph-mon1,ceph-mon2,ceph-mon3
2、查看mon的选举状态
[root@ceph-mon1 ~]# ceph quorum_status
{“election_epoch”:22,”quorum”:[0,1,2],”quorum_names”:[“ceph-mon1″,”ceph-mon2″,”ceph-mon3″],”quorum_leader_name”:”ceph-mon1″,”monmap”:{“epoch”:1,”fsid”:”fdbb34ad-765a-420b-89e8-443aba4254dd”,”modified”:”0.000000″,”created”:”0.000000″,”mons”:[{“rank”:0,”name”:”ceph-mon1″,”addr”:”192.168.203.72:6789\/0″},{“rank”:1,”name”:”ceph-mon2″,”addr”:”192.168.203.73:6789\/0″},{“rank”:2,”name”:”ceph-mon3″,”addr”:”192.168.203.74:6789\/0″}]}}
3、查看mon的映射信息
[root@ceph-mon1 ~]# ceph mon dump
dumped monmap epoch 1
epoch 1
fsid fdbb34ad-765a-420b-89e8-443aba4254dd
last_changed 0.000000
created 0.000000
0: 192.168.203.72:6789/0 mon.ceph-mon1
1: 192.168.203.73:6789/0 mon.ceph-mon2
2: 192.168.203.74:6789/0 mon.ceph-mon3
4、删除一个mon节点
[root@node1 ~]# ceph mon remove node1
removed mon.node1 at 10.39.101.1:6789/0, there are now 3 monitors
2014-07-07 18:11:04.974188 7f4d16bfd700 0 monclient: hunting for new mon
5、获得一个正在运行的mon map,并保存在1.txt文件中
[root@ceph-mon1 ~]# ceph mon getmap -o 1.txt
got monmap epoch 1
6、查看上面获得的map
[root@ceph-mon1 ~]# monmaptool –print 1.txt
monmaptool: monmap file 1.txt
epoch 1
fsid fdbb34ad-765a-420b-89e8-443aba4254dd
last_changed 0.000000
created 0.000000
0: 192.168.203.72:6789/0 mon.ceph-mon1
1: 192.168.203.73:6789/0 mon.ceph-mon2
2: 192.168.203.74:6789/0 mon.ceph-mon3
7、把上面的mon map注入新加入的节点
[root@ceph-mon1 ~]# ceph-mon -i ceph-mon3 –inject-monmap 1.txt
8、查看mon的amin socket
[root@ceph-mon1 ~]# ceph-conf –name mon.ceph-mon1 –show-config-value admin_socket
/var/run/ceph/ceph-mon.ceph-mon1.asok
9、查看mon的详细状态
[root@ceph-mon1 ~]# ceph daemon mon.ceph-mon1 mon_status
{
“name”: “ceph-mon1”,
1、启动一个ceph进程
启动mon进程
[root@ceph-adm ~]# service ceph start mon.ceph-mon1
启动msd进程
[root@ceph-adm ~]# service ceph start mds.ceph-mds1
启动osd进程
[root@ceph-adm ~]# service ceph start osd.0
2、查看机器的监控状态
[root@ceph-adm ~]# ceph health
HEALTH_OK
3、查看ceph的实时运行状态
[root@ceph-adm ~]# ceph -w
cluster fdbb34ad-765a-420b-89e8-443aba4254dd
health HEALTH_WARN
clock skew detected on mon.ceph-mon3
84 pgs stale
84 pgs stuck stale
5 requests are blocked > 32 sec
mds cluster is degraded
Monitor clock skew detected
monmap e1: 3 mons at {ceph-mon1=192.168.203.72:6789/0,ceph-mon2=192.168.203.73:6789/0,ceph-mon3=192.168.203.74:6789/0}
election epoch 22, quorum 0,1,2 ceph-mon1,ceph-mon2,ceph-mon3
mdsmap e16: 1/1/1 up {0=ceph-mon2=up:replay}, 2 up:standby
osdmap e56: 2 osds: 2 up, 2 in
pgmap v137: 596 pgs, 7 pools, 10442 bytes data, 63 objects
77032 kB used, 30622 MB / 30697 MB avail
512 active+clean
84 stale+active+clean
2016-08-28 20:42:51.043538 osd.0 [WRN] slow request 120.842103 seconds old, received at 2016-08-28 20:40:50.201254: osd_op(mds.0.3:4 400.00000000 [read 0~0] 2.64e96f8f ack+read+known_if_redirected e49) currently no flag points reached
4、检查信息状态信息
[root@ceph-adm ~]# ceph -s
cluster fdbb34ad-765a-420b-89e8-443aba4254dd
health HEALTH_WARN
clock skew detected on mon.ceph-mon3
84 pgs stale
84 pgs stuck stale
5 requests are blocked > 32 sec
mds cluster is degraded
Monitor clock skew detected
monmap e1: 3 mons at {ceph-mon1=192.168.203.72:6789/0,ceph-mon2=192.168.203.73:6789/0,ceph-mon3=192.168.203.74:6789/0}
election epoch 22, quorum 0,1,2 ceph-mon1,ceph-mon2,ceph-mon3
mdsmap e16: 1/1/1 up {0=ceph-mon2=up:replay}, 2 up:standby
osdmap e56: 2 osds: 2 up, 2 in
pgmap v139: 596 pgs, 7 pools, 10442 bytes data, 63 objects
77636 kB used, 30622 MB / 30697 MB avail
512 active+clean
84 stale+active+clean
5、查看ceph存储空间
[root@ceph-adm ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
30697M 30622M 77636k 0.25
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 0 0 15310M 0
cephfs_data 1 0 0 15310M 0
cephfs_metadata 2 9594 0 15310M 20
6、删除一个节点的所有的ceph数据包
[root@ceph-adm ~]# ceph-deploy purge ceph-mon1
[root@ceph-adm ~]# ceph-deploy purgedata cdph-mon1
7、为ceph创建一个admin用户并为admin用户创建一个密钥,把密钥保存到/etc/ceph目录下:
[root@ceph-adm ~]# ceph auth get-or-create client.admin mds ‘allow’ osd ‘allow *’ mon ‘allow *’ > /etc/ceph/ceph.client.admin.keyring
或
[root@ceph-adm ~]# ceph auth get-or-create client.admin mds ‘allow’ osd ‘allow *’ mon ‘allow *’ -o /etc/ceph/ceph.client.admin.keyring
8、为osd.0创建一个用户并创建一个key
[root@ceph-adm ~]# ceph auth get-or-create osd.0 mon ‘allow rwx’ osd ‘allow *’ -o /var/lib/ceph/osd/ceph-0/keyring
9、为mds.ceph-mon1创建一个用户并创建一个key
[root@ceph-adm ~]# ceph auth get-or-create mds.node1 mon ‘allow rwx’ osd ‘allow *’ mds ‘allow *’ -o /var/lib/ceph/mds/ceph-node1/keyring
10、查看ceph集群中的认证用户及相关的key
[root@ceph-adm ~]# ceph auth list
11、删除集群中的一个认证用户
[root@ceph-adm ~]# ceph auth del osd.0
12、查看集群健康状态细节
[root@ceph-adm ~]# ceph health detail
HEALTH_WARN clock skew detected on mon.ceph-mon3; 84 pgs stale; 84 pgs stuck stale; 5 requests are blocked > 32 sec; 2 osds have slow requests; mds cluster is degraded; Monitor clock skew detected
pg 0.22 is stuck stale for 13269.231220, current state stale+active+clean, last acting [1,0]
pg 0.21 is stuck stale for 13269.231219, current state stale+active+clean, last acting [1,0]
pg 0.20 is stuck stale for 13269.231222, current state stale+active+clean, last acting [1,0]
pg 0.1f is stuck stale for 13269.231224, current state stale+active+clean, last acting [0,1]
13、查看ceph log日志所在的目录
[root@ceph-adm ~]# ceph-conf –name mon.ceph-adm –show-config-value log_file
/var/log/ceph/ceph-mon.ceph-adm.log
二、mon
1、查看mon的状态信息
[root@ceph-mon1 ~]# ceph mon stat
e1: 3 mons at {ceph-mon1=192.168.203.72:6789/0,ceph-mon2=192.168.203.73:6789/0,ceph-mon3=192.168.203.74:6789/0}, election epoch 22, quorum 0,1,2 ceph-mon1,ceph-mon2,ceph-mon3
2、查看mon的选举状态
[root@ceph-mon1 ~]# ceph quorum_status
{“election_epoch”:22,”quorum”:[0,1,2],”quorum_names”:[“ceph-mon1″,”ceph-mon2″,”ceph-mon3″],”quorum_leader_name”:”ceph-mon1″,”monmap”:{“epoch”:1,”fsid”:”fdbb34ad-765a-420b-89e8-443aba4254dd”,”modified”:”0.000000″,”created”:”0.000000″,”mons”:[{“rank”:0,”name”:”ceph-mon1″,”addr”:”192.168.203.72:6789\/0″},{“rank”:1,”name”:”ceph-mon2″,”addr”:”192.168.203.73:6789\/0″},{“rank”:2,”name”:”ceph-mon3″,”addr”:”192.168.203.74:6789\/0″}]}}
3、查看mon的映射信息
[root@ceph-mon1 ~]# ceph mon dump
dumped monmap epoch 1
epoch 1
fsid fdbb34ad-765a-420b-89e8-443aba4254dd
last_changed 0.000000
created 0.000000
0: 192.168.203.72:6789/0 mon.ceph-mon1
1: 192.168.203.73:6789/0 mon.ceph-mon2
2: 192.168.203.74:6789/0 mon.ceph-mon3
4、删除一个mon节点
[root@node1 ~]# ceph mon remove node1
removed mon.node1 at 10.39.101.1:6789/0, there are now 3 monitors
2014-07-07 18:11:04.974188 7f4d16bfd700 0 monclient: hunting for new mon
5、获得一个正在运行的mon map,并保存在1.txt文件中
[root@ceph-mon1 ~]# ceph mon getmap -o 1.txt
got monmap epoch 1
6、查看上面获得的map
[root@ceph-mon1 ~]# monmaptool –print 1.txt
monmaptool: monmap file 1.txt
epoch 1
fsid fdbb34ad-765a-420b-89e8-443aba4254dd
last_changed 0.000000
created 0.000000
0: 192.168.203.72:6789/0 mon.ceph-mon1
1: 192.168.203.73:6789/0 mon.ceph-mon2
2: 192.168.203.74:6789/0 mon.ceph-mon3
7、把上面的mon map注入新加入的节点
[root@ceph-mon1 ~]# ceph-mon -i ceph-mon3 –inject-monmap 1.txt
8、查看mon的amin socket
[root@ceph-mon1 ~]# ceph-conf –name mon.ceph-mon1 –show-config-value admin_socket
/var/run/ceph/ceph-mon.ceph-mon1.asok
9、查看mon的详细状态
[root@ceph-mon1 ~]# ceph daemon mon.ceph-mon1 mon_status
{
“name”: “ceph-mon1”,