ceph线上工作心得
1.搭建ceph
1. 创建Ceph专用网络
sudo docker network create --driver bridge --subnet 172.20.0.0/16 ceph-network
2. 拉取搭建用镜像
sudo docker pull ceph/daemon:latest-luminous
3. 搭建mon节点
sudo docker run -d --name ceph-mon --network ceph-network --ip 172.20.0.10 -e CLUSTER=ceph -e WEIGHT=1.0 -e MON_IP=172.20.0.10 -e MON_NAME=ceph-mon -e CEPH_PUBLIC_NETWORK=172.20.0.0/16 -v /etc/ceph:/etc/ceph -v /var/lib/ceph/:/var/lib/ceph/ -v /var/log/ceph/:/var/log/ceph/ ceph/daemon:latest-luminous mon
4. 搭建osd节点
sudo docker exec ceph-mon ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring
修改配置文件以兼容etx4硬盘
sudo vi /etc/ceph/ceph.conf
在文件最后添加:
osd max object name len = 256
osd max object namespace len = 64
分别启动三个容器来模拟集群
sudo docker run -d --privileged=true --name ceph-osd-1 --network ceph-network --ip 172.20.0.11 -e CLUSTER=ceph -e WEIGHT=1.0 -e MON_NAME=ceph-mon -e MON_IP=172.20.0.10 -e OSD_TYPE=directory -v /etc/ceph:/etc/ceph -v /var/lib/ceph/:/var/lib/ceph/ -v /var/lib/ceph/osd/1:/var/lib/ceph/osd -v /etc/localtime:/etc/localtime:ro ceph/daemon:latest-luminous osd
sudo docker run -d --privileged=true --name ceph-osd-2 --network ceph-network --ip 172.20.0.12 -e CLUSTER=ceph -e WEIGHT=1.0 -e MON_NAME=ceph-mon -e MON_IP=172.20.0.10 -e OSD_TYPE=directory -v /etc/ceph:/etc/ceph -v /var/lib/ceph/:/var/lib/ceph/ -v /var/lib/ceph/osd/2:/var/lib/ceph/osd -v /etc/localtime:/etc/localtime:ro ceph/daemon:latest-luminous osd
sudo docker run -d --privileged=true --name ceph-osd-3 --network ceph-network --ip 172.20.0.13 -e CLUSTER=ceph -e WEIGHT=1.0 -e MON_NAME=ceph-mon -e MON_IP=172.20.0.10 -e OSD_TYPE=directory -v /etc/ceph:/etc/ceph -v /var/lib/ceph/:/var/lib/ceph/ -v /var/lib/ceph/osd/3:/var/lib/ceph/osd -v /etc/localtime:/etc/localtime:ro ceph/daemon:latest-luminous osd
5. 搭建mgr节点
sudo docker run -d --privileged=true --name ceph-mgr --network ceph-network --ip 172.20.0.14 -e CLUSTER=ceph -p 5003:7000 --pid=container:ceph-mon -v /etc/ceph:/etc/ceph -v /var/lib/ceph/:/var/lib/ceph/ ceph/daemon:latest-luminous mgr
开启管理界面
sudo docker exec ceph-mgr ceph mgr module enable dashboard
6. 搭建rgw节点
sudo docker exec ceph-mon ceph auth get client.bootstrap-rgw -o /var/lib/ceph/bootstrap-rgw/ceph.keyring
sudo docker run -d --privileged=true --name ceph-rgw --network ceph-network --ip 172.20.0.15 -e CLUSTER=ceph -e RGW_NAME=ceph-rgw -p 9031:7480 -v /var/lib/ceph/:/var/lib/ceph/ -v /etc/ceph:/etc/ceph -v /etc/localtime:/etc/localtime:ro ceph/daemon:latest-luminous rgw
7.检查Ceph状态
sudo docker exec ceph-mon ceph -s
8. 测试添加rgw用户 生成 access_key secret_key用于访问
sudo docker exec ceph-rgw radosgw-admin user create --uid="test" --display-name="test user"
推荐博客
https://www.cnblogs.com/hackyo/p/13373340.html
ceph 搭建成功图示
2.环境验证
生成 key
sudo docker exec ceph-rgw radosgw-admin user create --uid="test" --display-name="test user"
查看ceph健康状态
docker exec ceph-mon ceph -s
进入docker
docker exec -it ceph-mon bash
查看桶中数据
rados ls -p default.rgw.buckets.data
下载 [docker]
rados get -p default.rgw.buckets.data c2ebdb32-037c-429e-a5c2-6e1e96d24fbb.54097.28_20200901/hello.txt hello.txt
下载到本地
docker cp ceph-mgr:hello.txt ./hello.txt
二、ceph体系架构
1. ceph 体系架构
2.ceph 基础组件、
OSD 用于集群中所有数据与对象的存储:存储/复制/平衡/恢复数据等
Monitor 监控集群的状态,维护Cluster Map,保证集群的数据一致性
MDS 保存文件系统服务的元素(OBJ/Block不需要该服务)
GW 提供与Amazon S3和Swift兼容的Restful API的gateway服务
rgw 7480
mgr 7000
3.ceph aws 术语
三、 ceph常用api
1.初始化
package main
import (
"fmt"
"github.com/astaxie/beego/logs"
"gopkg.in/amz.v1/aws"
"gopkg.in/amz.v1/s3"
"io/ioutil"
"time"
)
var (
CephConn *s3.S3
)
func init() {
auth := aws.Auth{
AccessKey: "",
SecretKey: "",
}
region := aws.Region{
Name: "default",
EC2Endpoint: "",
S3Endpoint: "",
S3BucketEndpoint: "",
S3LocationConstraint: false, // 没有区域限制
S3LowercaseBucket: false, // bucket没有大小写限制
Sign: aws.SignV2,
}
// 创建🔓s3类型连接
CephConn = s3.New(auth, region)
}
2.
常用操作
func main() {
bucketName := "bucket_test"
filename := "C:\\Users\\zhangSir\\1.jpg"
cephPath := "/static/default/bucket_test/V1/" + "pic4.jpg"
// 获取指定桶
bucket := GetCephBucket(bucketName)
// 上传
bucket, err := put2Bucket(bucket, filename, cephPath)
if err != nil {
logs.Error(err)
}
// 下载
localPath := "C:\\Users\\zhangSir\\Desktop\\local.jpg"
downLoadFromCeph(bucket, localPath, cephPath)
// 获得url
url := bucket.SignedURL(cephPath, time.Now().Add(time.Hour))
fmt.Println(url)
// 批量查找
prefixCephPath := "static/default/bucket_test/V1"
getBatchFromCeph(bucket, prefixCephPath)
// 删除数据
delCephData(bucket, cephPath)
//删除桶,必须先删除桶中的元素
delBucket(bucket)
}
函数实现
// 获取指定Bucket
func GetCephBucket(bucket string) *s3.Bucket {
return CephConn.Bucket(bucket)
}
// 上传
func put2Bucket(bucket *s3.Bucket, localPath, cephPath string) (*s3.Bucket, error) {
err := bucket.PutBucket(s3.PublicRead) // 参数权限
if err != nil {
logs.Error(err.Error())
}
bytes, err := ioutil.ReadFile(localPath)
if err != nil {
logs.Error(err.Error())
}
err = bucket.Put(cephPath, bytes, "octet-stream", s3.PublicRead)
return bucket, err
}
// 下载
func downLoadFromCeph(bucket *s3.Bucket, localPath,cephPath string) error {
data, err := bucket.Get(cephPath)
if err != nil {
logs.Error(err.Error())
return err
}
return ioutil.WriteFile(localPath, data, 0666)
}
// 批量查找
func getBatchFromCeph(bucket *s3.Bucket, prefixCephPath string) []string {
maxBatch := 10000
resultListResp, err := bucket.List(prefixCephPath, "", "", maxBatch) // 最多返回100条数据
if err != nil {
fmt.Errorf(err.Error())
}
keyList :=make([]string,0)
for _, key := range resultListResp.Contents{
keyList=append(keyList, key.Key) // static/default/4/V1/12075e9f-31bb-45ca-8127-585ac16fee62.png
}
return keyList
}
// 删除数据
func delCephData(bucket *s3.Bucket, cephPath string) error {
err := bucket.Del(cephPath)
if err != nil {
logs.Error(err)
}
return err
}
//删除桶,必须先删除桶中的元素
func delBucket(bucket *s3.Bucket) error {
err := bucket.DelBucket()
if err !=nil{
logs.Error(err)
}
return err
}