基于ceph的cinder backup功能代码分析

5 篇文章 0 订阅
1 篇文章 0 订阅

1、cinder/backup/api.py 239 create687

def create(self, context, name, description, volume_id,
           container, incremental=False, availability_zone=None,
           force=False, snapshot_id=None):

检查磁盘状态,磁盘快照以及快照状态以及in-use时是否加了force,检查backup配额,由于我们使用的是ceph的driver,获取dirver的过程就不看了主要看看实现的地方
2、cinder/backup/drivers/ceph.py 840 backup函数

def backup(self, backup, volume_file, backup_metadata=True):
        """Backup volume and metadata (if available) to Ceph object store.

        If the source volume is an RBD we will attempt to do an
        incremental/differential backup, otherwise a full copy is performed.
        If this fails we will attempt to fall back to full copy.
        """
        backup_id = backup['id']
        volume = self.db.volume_get(self.context, backup['volume_id'])
        volume_id = volume['id']
        volume_name = volume['name']

        LOG.debug("Starting backup of volume='%s'.", volume_id)

        # Ensure we are at the beginning of the volume
        volume_file.seek(0)
        length = self._get_volume_size_gb(volume)

        do_full_backup = False
        if self._file_is_rbd(volume_file):
            # If volume an RBD, attempt incremental backup.
            try:
                self._backup_rbd(backup_id, volume_id, volume_file,
                                 volume_name, length)
            except exception.BackupRBDOperationFailed:
                LOG.debug("Forcing full backup of volume %s.", volume_id)
                do_full_backup = True
        else:
            do_full_backup = True

        if do_full_backup:
            self._full_backup(backup_id, volume_id, volume_file,
                              volume_name, length)

        backup.container = self._ceph_backup_pool
        backup.save()

        if backup_metadata:
            try:
                self._backup_metadata(backup)
            except exception.BackupOperationError:
                with excutils.save_and_reraise_exception():
                    # Cleanup.
                    self.delete(backup)

        LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.",
                  {'backup_id': backup_id, 'volume_id': volume_id})

3、cinder/backup/drivers/ceph.py 589 _backup_rbd函数

    def _backup_rbd(self, backup_id, volume_id, volume_file, volume_name,
                    length):
        """Create an incremental backup from an RBD image."""
        rbd_user = volume_file.rbd_user
        rbd_pool = volume_file.rbd_pool
        rbd_conf = volume_file.rbd_conf
        source_rbd_image = volume_file.rbd_image

        # Identify our --from-snap point (if one exists)
        from_snap = self._get_most_recent_snap(source_rbd_image)
        LOG.debug("Using --from-snap '%(snap)s' for incremental backup of "
                  "volume %(volume)s.",
                  {'snap': from_snap, 'volume': volume_id})

        base_name = self._get_backup_base_name(volume_id, diff_format=True)
        image_created = False
        with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
            # If from_snap does not exist at the destination (and the
            # destination exists), this implies a previous backup has failed.
            # In this case we will force a full backup.
            #
            # TODO(dosaboy): find a way to repair the broken backup
            #
            if base_name not in self.rbd.RBD().list(ioctx=client.ioctx):
                # If a from_snap is defined but the base does not exist, we
                # ignore it since it is stale and waiting to be cleaned up.
                if from_snap:
                    LOG.debug("Source snapshot '%(snapshot)s' of volume "
                              "%(volume)s is stale so deleting.",
                              {'snapshot': from_snap, 'volume': volume_id})
                    source_rbd_image.remove_snap(from_snap)
                    from_snap = None

                # Create new base image
                #创建基础镜像用于后期数据拷贝过来
                self._create_base_image(base_name, length, client)
                image_created = True
            else:
                # If a from_snap is defined but does not exist in the back base
                # then we cannot proceed (see above)
                if not self._snap_exists(base_name, from_snap, client):
                    errmsg = (_("Snapshot='%(snap)s' does not exist in base "
                                "image='%(base)s' - aborting incremental "
                                "backup") %
                              {'snap': from_snap, 'base': base_name})
                    LOG.info(errmsg)
                    # Raise this exception so that caller can try another
                    # approach
                    raise exception.BackupRBDOperationFailed(errmsg)

        # Snapshot source volume so that we have a new point-in-time
        new_snap = self._get_new_snap_name(backup_id)
        LOG.debug("Creating backup snapshot='%s'", new_snap)
        source_rbd_image.create_snap(new_snap)

        # Attempt differential backup. If this fails, perhaps because librbd
        # or Ceph cluster version does not support it, do a full backup
        # instead.
        #
        # TODO(dosaboy): find a way to determine if the operation is supported
        #                rather than brute force approach.
        try:
            before = time.time()
            self._rbd_diff_transfer(volume_name, rbd_pool, base_name,
                                    self._ceph_backup_pool,
                                    src_user=rbd_user,
                                    src_conf=rbd_conf,
                                    dest_user=self._ceph_backup_user,
                                    dest_conf=self._ceph_backup_conf,
                                    src_snap=new_snap,
                                    from_snap=from_snap)

            LOG.debug("Differential backup transfer completed in %.4fs",
                      (time.time() - before))

            # We don't need the previous snapshot (if there was one) anymore so
            # delete it.
            if from_snap:
                source_rbd_image.remove_snap(from_snap)

        except exception.BackupRBDOperationFailed:
            with excutils.save_and_reraise_exception():
                LOG.debug("Differential backup transfer failed")

                # Clean up if image was created as part of this operation
                if image_created:
                    self._try_delete_base_image(backup_id, volume_id,
                                                base_name=base_name)

                # Delete snapshot
                LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of "
                          "source volume='%(volume)s'.",
                          {'snapshot': new_snap, 'volume': volume_id})
                source_rbd_image.remove_snap(new_snap)

4、cinder/backup/drivers/ceph.py 515 _rbd_diff_transfer函数
这里才是真正干活的地方

def _rbd_diff_transfer(self, src_name, src_pool, dest_name, dest_pool,src_user, src_conf, dest_user, dest_conf,src_snap=None, from_snap=None):
        """Copy only extents changed between two points.

        If no snapshot is provided, the diff extents will be all those changed
        since the rbd volume/base was created, otherwise it will be those
        changed since the snapshot was created.
        """
        LOG.debug("Performing differential transfer from '%(src)s' to "
                  "'%(dest)s'",
                  {'src': src_name, 'dest': dest_name})

        # NOTE(dosaboy): Need to be tolerant of clusters/clients that do
        # not support these operations since at the time of writing they
        # were very new.

        src_ceph_args = self._ceph_args(src_user, src_conf, pool=src_pool)
        dest_ceph_args = self._ceph_args(dest_user, dest_conf, pool=dest_pool)

        cmd1 = ['rbd', 'export-diff'] + src_ceph_args
        if from_snap is not None:
            cmd1.extend(['--from-snap', from_snap])
        if src_snap:
            path = utils.convert_str("%s/%s@%s"
                                     % (src_pool, src_name, src_snap))
        else:
            path = utils.convert_str("%s/%s" % (src_pool, src_name))
        cmd1.extend([path, '-'])

        cmd2 = ['rbd', 'import-diff'] + dest_ceph_args
        rbd_path = utils.convert_str("%s/%s" % (dest_pool, dest_name))
        cmd2.extend(['-', rbd_path])

        ret, stderr = self._piped_execute(cmd1, cmd2)
        if ret:
            msg = (_("RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)") %
                   {'ret': ret, 'stderr': stderr})
            LOG.info(msg)
            raise exception.BackupRBDOperationFailed(msg)

其实基于ceph后端的cinder backup最终重要的是两个三个地方:
1、对volume先做一个全量拷贝
2、对volume的拷贝做快照

 rbd create voulmes/test
 rbd snap create volumes/test@snap

3、在快照的基础上做增量快照

导出增量部分
rbd export-diff volumes/test@snap snap1
创建一个新的镜像
rbd create backup/volume-$volume-id.backup.base
导入增量部分
rbd import-diff snap1 backup/volume-$volume-id.backup.base

完成上述步骤即可完成增量导入

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值