在进行osd的创建时报路径错误?
找到这个cepe.conf存在的目录
在执行osd的创建就欧克了
[root@node-1 opt]# find / -name "ceph.conf"
/etc/ceph/ceph.conf
find: ‘/proc/57761’: No such file or directory
find: ‘/proc/57790’: No such file or directory
/usr/local/cluster-deploy/ceph/ceph.conf
[root@node-1 opt]# cd /usr/local/cluster-deploy/ceph/
[root@node-1 ceph]# ll
total 84K
-rw------- 1 root root 113 Apr 10 21:57 ceph.bootstrap-mds.keyring
-rw------- 1 root root 113 Apr 10 21:57 ceph.bootstrap-mgr.keyring
-rw------- 1 root root 113 Apr 10 21:57 ceph.bootstrap-osd.keyring
-rw------- 1 root root 113 Apr 10 21:57 ceph.bootstrap-rgw.keyring
-rw------- 1 root root 151 Apr 10 21:57 ceph.client.admin.keyring
-rw-r--r-- 1 root root 273 Apr 10 21:55 ceph.conf
-rw-r--r-- 1 root root 49K Apr 10 21:58 ceph-deploy-ceph.log
-rw------- 1 root root 73 Apr 10 21:55 ceph.mon.keyring
[root@node-1 ceph]# ceph-deploy osd create node-1 --data /dev/sda
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node-1 --data /dev/sda
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] subcommand : create
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf object at 0x400039864c70>
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] func : <function osd at 0x4000397a89d0>
[ceph_deploy.cli][INFO ] data : /dev/sda
[ceph_deploy.cli][INFO ] journal : None
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.cli][INFO ] fs_type : xfs
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] filestore : None
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] block_db : None
[ceph_deploy.cli][INFO ] block_wal : None
[ceph_deploy.cli][INFO ] host : node-1
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sda
[node-1][DEBUG ] connected to host: node-1
[ceph_deploy.osd][INFO ] Distro info: openEuler 22.03 22.03 LTS
[ceph_deploy.osd][DEBUG ] Deploying osd to node-1
[node-1][WARNIN] osd keyring does not exist yet, creating one
[node-1][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda
[node-1][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key
[node-1][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new a9dd4a1d-d36f-4304-8798-9ed303db9a6d
[node-1][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-35ef53c1-5a53-4928-bc23-fac2b5f62cbc /dev/sda
[node-1][WARNIN] stdout: Physical volume "/dev/sda" successfully created.
[node-1][WARNIN] stdout: Volume group "ceph-35ef53c1-5a53-4928-bc23-fac2b5f62cbc" successfully created
[node-1][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 1907591 -n osd-block-a9dd4a1d-d36f-4304-8798-9ed303db9a6d ceph-35ef53c1-5a53-4928-bc23-fac2b5f62cbc
[node-1][WARNIN] stdout: Logical volume "osd-block-a9dd4a1d-d36f-4304-8798-9ed303db9a6d" created.
[node-1][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key
[node-1][WARNIN] Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
[node-1][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-35ef53c1-5a53-4928-bc23-fac2b5f62cbc/osd-block-a9dd4a1d-d36f-4304-8798-9ed303db9a6d
[node-1][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3
[node-1][WARNIN] Running command: /usr/bin/ln -s /dev/ceph-35ef53c1-5a53-4928-bc23-fac2b5f62cbc/osd-block-a9dd4a1d-d36f-4304-8798-9ed303db9a6d /var/lib/ceph/osd/ceph-0/block
[node-1][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
[node-1][WARNIN] stderr: 2024-04-10T23:21:29.419+0800 400037c41120 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node-1][WARNIN] 2024-04-10T23:21:29.419+0800 400037c41120 -1 AuthRegistry(0x40003805ae50) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node-1][WARNIN] stderr: got monmap epoch 2
[node-1][WARNIN] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQB3rhZmSxKTCxAAP3yZeti6BnKt/qP8lJvH/w==
[node-1][WARNIN] stdout: creating /var/lib/ceph/osd/ceph-0/keyring
[node-1][WARNIN] added entity osd.0 auth(key=AQB3rhZmSxKTCxAAP3yZeti6BnKt/qP8lJvH/w==)
[node-1][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
[node-1][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
[node-1][WARNIN] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid a9dd4a1d-d36f-4304-8798-9ed303db9a6d --setuser ceph --setgroup ceph
[node-1][WARNIN] stderr: 2024-04-10T23:21:30.507+0800 4000123b0e00 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid
[node-1][WARNIN] stderr: 2024-04-10T23:21:31.103+0800 4000123b0e00 -1 load failed dlopen(): "/usr/lib64/ceph/compressor/libceph_snappy.so: undefined symbol: _ZTIN6snappy6SourceE" or "/usr/lib64/ceph/libceph_snappy.so: cannot open shared object file: No such file or directory"
[node-1][WARNIN] stderr: 2024-04-10T23:21:31.103+0800 4000123b0e00 -1 create cannot load compressor of type snappy
[node-1][WARNIN] stderr: 2024-04-10T23:21:31.103+0800 4000123b0e00 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _set_compression unable to initialize snappy compressor
[node-1][WARNIN] stderr: 2024-04-10T23:21:44.423+0800 4000123b0e00 -1 load failed dlopen(): "/usr/lib64/ceph/compressor/libceph_snappy.so: undefined symbol: _ZTIN6snappy6SourceE" or "/usr/lib64/ceph/libceph_snappy.so: cannot open shared object file: No such file or directory"
[node-1][WARNIN] stderr: 2024-04-10T23:21:44.423+0800 4000123b0e00 -1 create cannot load compressor of type snappy
[node-1][WARNIN] stderr: 2024-04-10T23:21:44.423+0800 4000123b0e00 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _set_compression unable to initialize snappy compressor
[node-1][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sda
[node-1][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
[node-1][WARNIN] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-35ef53c1-5a53-4928-bc23-fac2b5f62cbc/osd-block-a9dd4a1d-d36f-4304-8798-9ed303db9a6d --path /var/lib/ceph/osd/ceph-0 --no-mon-config
[node-1][WARNIN] Running command: /usr/bin/ln -snf /dev/ceph-35ef53c1-5a53-4928-bc23-fac2b5f62cbc/osd-block-a9dd4a1d-d36f-4304-8798-9ed303db9a6d /var/lib/ceph/osd/ceph-0/block
[node-1][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
[node-1][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3
[node-1][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
[node-1][WARNIN] Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-a9dd4a1d-d36f-4304-8798-9ed303db9a6d
[node-1][WARNIN] stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-a9dd4a1d-d36f-4304-8798-9ed303db9a6d.service → /usr/lib/systemd/system/ceph-volume@.service.
[node-1][WARNIN] Running command: /usr/bin/systemctl enable --runtime ceph-osd@0
[node-1][WARNIN] stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service → /usr/lib/systemd/system/ceph-osd@.service.
[node-1][WARNIN] Running command: /usr/bin/systemctl start ceph-osd@0
[node-1][WARNIN] --> ceph-volume lvm activate successful for osd ID: 0
[node-1][WARNIN] --> ceph-volume lvm create successful for: /dev/sda
[node-1][INFO ] checking OSD status...
[node-1][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node-1 is now ready for osd use.
[root@node-1 ceph]#