ceph遍历key迁移数据

背景:

ceph集群出现版本bug,触发之后osd也无法恢复,由于集群版本比较旧,数据也不是特别敏感, 所以选择重建ceph集群,但是重建之前需要把里面的数据迁移到另外一个ceph集群实现恢复数据读写。

分析:

1.需要把数据的key全部遍历出来

---当时查了挺多数据,都无法在集群故障,无法读写的情况下按bucket实现遍历key,所以这里我是直接把所有数据都遍历了 ,5.6亿行的key数据,大概导了30个小时左右导完。

rados -p tupu-zone1.rgw.buckets.data ls

2.遍历完key之后开始过滤业务需要的数据key

---为什么需要过滤呢,因为5.6亿数据太大了,没必要的数据就不要了。

---在使用py过滤的过程中遇到了一个比较常见的错误,由于导出的key文件有60多g,如果直接把60多g的文件直接放到内存中,是会导致内存错误,需要使用流式读取

import time
start = time.time()
# file1 = open('2023-3-21-data.txt', 'r')
# Lines = file1.readlines()



bucket_keys = [
    "LTS-Bucket-BIAvatar.log",
    "LTS-Bucket-BIBodyFeature.log",
    "LTS-Bucket-PISKUImage-new.log",
    "LTS-Bucket-storeInspectImage.log",
    "LTS-Bucket-BI-GOOD-Result-Img.log",
    "LTS-Bucket-Aigc.log",
    "LTS-Bucket-UserDefineImages.log",
    "LTS-Bucket-OBJECT_SEARCH-politicians.log",
    "OBJECT_SEARCH-politicians.log",
    "LTS-Bucket-OBJECT_SEARCH.log",
    "LTS-Bucket-OBJECT_SEARCH-stars.log",
    "LTS-Bucket-OBJECT_SEARCH-politicianv4.log",
    "LTS-Bucket-OBJECT_SEARCH-inkenew.log",
    "LTS-Bucket-OBJECT_SEARCH-tt.log",
    "LTS-Bucket-ClerkImage.log"
]
path_wf = "/root/tmp_data/res/"

filelist = []
for i in range(len(bucket_keys)):
    n = "file" + str(i)
    filelist.append(n)

for k, key in enumerate(bucket_keys):
    filename = path_wf + key
    filelist[k] = open(filename, 'a')

buckey_id = [
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.3374283.71960",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.37539210.94331",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.10764110.28340",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.43245259.51271",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.2793933771.431",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.3243722154.5763",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.43245292.36826",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14838714.38464",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14962229.37964",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.175373217.48388" ,
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14828896.34830",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.30224932.39965",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14828947.38406",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.26387467.19668",
    "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.2242965399.4663"
]

myfile = dict(zip(buckey_id, filelist))


count = 0

with open("2023-3-21-data.txt", 'r') as f:
    for line in f:
        count += 1
        if "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.3374283.71960" in line:
                myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.3374283.71960'].write(line)
        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.37539210.94331" in line:
                myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.37539210.94331'].write(line)
        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.10764110.28340" in line:
                myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.10764110.28340'].write(line)
        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.43245259.51271" in line:
                myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.43245259.51271'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.2793933771.431" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.2793933771.431'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.3243722154.5763" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.3243722154.5763'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.43245292.36826" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.43245292.36826'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14838714.38464" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14838714.38464'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14962229.37964" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14962229.37964'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.175373217.48388" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.175373217.48388'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14828896.34830" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14828896.34830'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.30224932.39965" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.30224932.39965'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14828947.38406" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14828947.38406'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.26387467.19668" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.26387467.19668'].write(line)

        elif "56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.2242965399.4663" in line:
            myfile['56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.2242965399.4663'].write(line)


for k, key in enumerate(bucket_keys):
    filelist[k].close()

end = time.time()
print(end-start)

个人比较熟悉py语法,现在流行chatgpt,大家也可以尽可能地使用起来,特别是跨语言实现功能的时候,我这里在chatgpt也查到js的用法,个人感觉相当哇塞。

const readline = require('readline')
const fs = require('fs')

const filePath = '2023-3-21-data.txt'

const bucketIdToWS = {
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.3374283.71960': fs.createWriteStream(`LTS-Bucket-BIAvatar.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.37539210.94331': fs.createWriteStream(`LTS-Bucket-BIBodyFeature.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.10764110.28340': fs.createWriteStream(`LTS-Bucket-PISKUImage-new.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.43245259.51271': fs.createWriteStream(`LTS-Bucket-storeInspectImage.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.2793933771.431': fs.createWriteStream(`LTS-Bucket-BI-GOOD-Result-Img.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.3243722154.5763': fs.createWriteStream(`LTS-Bucket-Aigc.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.43245292.36826': fs.createWriteStream(`LTS-Bucket-UserDefineImages.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14838714.38464': fs.createWriteStream(`LTS-Bucket-OBJECT_SEARCH-politicians.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14962229.37964': fs.createWriteStream(`LTS-Bucket-OBJECT_SEARCH-politicians.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.175373217.48388': fs.createWriteStream(`LTS-Bucket-OBJECT_SEARCH.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14828896.34830': fs.createWriteStream(`LTS-Bucket-OBJECT_SEARCH-stars.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.30224932.39965': fs.createWriteStream(`LTS-Bucket-OBJECT_SEARCH-politicianv4.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.14828947.38406': fs.createWriteStream(`LTS-Bucket-OBJECT_SEARCH-inkenew.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.26387467.19668': fs.createWriteStream(`LTS-Bucket-OBJECT_SEARCH-tt.log`),
    '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.2242965399.4663': fs.createWriteStream(`LTS-Bucket-ClerkImage.log`),
}

const bucketIds = Object.keys(bucketIdToWS)

// 创建按行读取接口
const lineReader = readline.createInterface({
    input: fs.createReadStream(filePath, { encoding: 'utf-8' }),
    crlfDelay: Infinity
});

// 逐行读取大文件
lineReader.on('line', (line) => {
    // console.log('line', line)
    for (const id of bucketIds) {
        if (line.startsWith(id)) {
            bucketIdToWS[id].write(line)
            bucketIdToWS[id].write('\n')
            break
        }
    }
});

// 结束时关闭输出文件流
lineReader.on('close', () => {
    for (const ws of Object.values(bucketIdToWS)) {
        ws.end()
    }
});

3.最后把需要的key进行调用ceph自带的rados ioctx api接口读取数据,之后把数据写入到新ceph集群上。

import rados
import os
from boto3.session import Session
import threading
import math

FROM_BUCKET_ID = '56dfcf77-f7ee-4b4f-89b8-cbddbbbd1ef1.10764110.28340'
TO_BUCKET = 'LTS-Bucket-PISKUImage'

access_key = "xxx"
secret_key = "xxx"

session = Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key)
url = "http://xxx"
s3_client = session.client('s3', endpoint_url=url)

cluster = rados.Rados(conffile='/etc/ceph/ceph.conf')
print("[from] Will attempt to connect to: {}".format(str(cluster.conf_get('mon host'))))
cluster.connect()


ioctx = cluster.open_ioctx('tupu-zone1.rgw.buckets.data')

file1 = open('pi-sku.txt', 'r')
Lines = file1.readlines()

def loopOver(start, end):
    print('start end', start, end)
    count = 0
    for i in range(start, end):
        line = Lines[i]
        count += 1

        _line = line.strip()
        key = _line[29:]
        tmpFilename = str(hash(key))+'.jpg'
        print(count, _line, key, tmpFilename)
        
        try:
            res = ioctx.read(FROM_BUCKET_ID + "_" + key,10240000)
        except:
            print(count, _line, key, tmpFilename, 'except')
            continue
        if not res:
            print(count, _line, key, tmpFilename, 'empty')
            continue

        with open(tmpFilename, 'wb') as f:
            f.write(res)
        
        s3_client.put_object(
                Bucket=TO_BUCKET,
                Key=key,
                Body=open(tmpFilename, 'rb').read()
            )
        os.remove(tmpFilename)


num_threads = 5
chunk_size = len(Lines) / num_threads
threads = []

for i in range(num_threads):
    start = i * chunk_size
    end = (i + 1) * chunk_size if i != num_threads - 1 else len(Lines)
    t = threading.Thread(target=loopOver, args=(start, end))
    threads.append(t)
    t.start()

for t in threads:
    t.join()

在写入数据的时候,数据量那么大,肯定是需要写多线程,或者go协程之类的去跑,不然又是何年何月了,这里再推荐一波大家使用chatgpt去编写一些自己明确知道的功能代码, 这里我搜索的就是“用python去写一个将大数组,如何使用多线程去读完它“ 这里chatgpt可以很好地去帮你解决一些边界的问题。

小结:

在ceph的这次故障中,其实也发现了很多问题,例如不能按照bucket导出数据,以及如果可以按照某一些业务分组导数据, 就不需要等把所有数据都load下来再过滤,可以直接获取到自己想要的数据进行迁移、这里涉及到了自己的知识盲区,需要去进行学习的大佬,有知道如何去解决这个问题的朋友也可以在评论区进行解惑。

第一次结合chatgpt去编写脚本解决实际问题,感觉棒棒哒,大家用起来。

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值