kubernetes 挂载ceph rbd源码解析

kubernetes对于第三方存储本身没有管理,只是使用存储,先看一下

首先是存储插件的初始化,在这个方法里面,通过Init方法初始化

func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost) error {
    pm.mutex.Lock()
    defer pm.mutex.Unlock()

    if pm.plugins == nil {
        pm.plugins = map[string]VolumePlugin{}
    }

    allErrs := []error{}
    for _, plugin := range plugins {
        name := plugin.GetPluginName()
        if errs := validation.IsQualifiedName(name); len(errs) != 0 {
            allErrs = append(allErrs, fmt.Errorf("volume plugin has invalid name: %q: %s", name, strings.Join(errs, ";")))
            continue
        }

        if _, found := pm.plugins[name]; found {
            allErrs = append(allErrs, fmt.Errorf("volume plugin %q was registered more than once", name))
            continue
        }
        err := plugin.Init(host)
        if err != nil {
            glog.Errorf("Failed to load volume plugin %s, error: %s", plugin, err.Error())
            allErrs = append(allErrs, err)
            continue
        }
        pm.plugins[name] = plugin
        glog.V(1).Infof("Loaded volume plugin %q", name)
    }
    return utilerrors.NewAggregate(allErrs)
}

进入rbd的init方法

func (plugin *rbdPlugin) Init(host volume.VolumeHost) error {
    plugin.host = host
    return nil
}

存储挂载

func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount.Interface, fsGroup *types.UnixGroupID) error {
    globalPDPath := manager.MakeGlobalPDName(*b.rbd)
    // TODO: handle failed mounts here.
    notMnt, err := mounter.IsLikelyNotMountPoint(volPath)

    if err != nil && !os.IsNotExist(err) {
        glog.Errorf("cannot validate mountpoint: %s", volPath)
        return err
    }
    if !notMnt {
        return nil
    }
    if err := manager.AttachDisk(b); err != nil {
        glog.Errorf("failed to attach disk")
        return err
    }

    if err := os.MkdirAll(volPath, 0750); err != nil {
        glog.Errorf("failed to mkdir:%s", volPath)
        return err
    }
    // Perform a bind mount to the full path to allow duplicate mounts of the same disk.
    options := []string{"bind"}
    if (&b).GetAttributes().ReadOnly {
        options = append(options, "ro")
    }
    mountOptions := volume.JoinMountOptions(b.mountOptions, options)
    err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
    if err != nil {
        glog.Errorf("failed to bind mount:%s", globalPDPath)
        return err
    }

    if !b.ReadOnly {
        volume.SetVolumeOwnership(&b, fsGroup)
    }

    return nil
}

先是AttachDisk然后mounter.Mount
先看AttachDisk

func (util *RBDUtil) AttachDisk(b rbdMounter) error {
    var err error
    var output []byte

    // create mount point
    globalPDPath := b.manager.MakeGlobalPDName(*b.rbd)
    notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
    // in the first time, the path shouldn't exist and IsLikelyNotMountPoint is expected to get NotExist
    if err != nil && !os.IsNotExist(err) {
        return fmt.Errorf("rbd: %s failed to check mountpoint", globalPDPath)
    }
    if !notMnt {
        return nil
    }
    if err = os.MkdirAll(globalPDPath, 0750); err != nil {
        return fmt.Errorf("rbd: failed to mkdir %s, error", globalPDPath)
    }

    devicePath, found := waitForPath(b.Pool, b.Image, 1)
    if !found {
        // modprobe
        _, err = b.plugin.execCommand("modprobe", []string{"rbd"})
        if err != nil {
            return fmt.Errorf("rbd: failed to modprobe rbd error:%v", err)
        }

        // fence off other mappers
        if err = util.fencing(b); err != nil {
            return fmt.Errorf("rbd: image %s is locked by other nodes", b.Image)
        }
        // rbd lock remove needs ceph and image config
        // but kubelet doesn't get them from apiserver during teardown
        // so persit rbd config so upon disk detach, rbd lock can be removed
        // since rbd json is persisted in the same local directory that is used as rbd mountpoint later,
        // the json file remains invisible during rbd mount and thus won't be removed accidentally.
        util.persistRBD(b, globalPDPath)

        // rbd map
        l := len(b.Mon)
        // avoid mount storm, pick a host randomly
        start := rand.Int() % l
        // iterate all hosts until mount succeeds.
        for i := start; i < start+l; i++ {
            mon := b.Mon[i%l]
            glog.V(1).Infof("rbd: map mon %s", mon)
            if b.Secret != "" {
                output, err = b.plugin.execCommand("rbd",
                    []string{"map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "--key=" + b.Secret})
            } else {
                output, err = b.plugin.execCommand("rbd",
                    []string{"map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "-k", b.Keyring})
            }
            if err == nil {
                break
            }
            glog.V(1).Infof("rbd: map error %v %s", err, string(output))
        }
        if err != nil {
            return fmt.Errorf("rbd: map failed %v %s", err, string(output))
        }
        devicePath, found = waitForPath(b.Pool, b.Image, 10)
        if !found {
            return errors.New("Could not map image: Timeout after 10s")
        }
    }

    // mount it
    if err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil); err != nil {
        err = fmt.Errorf("rbd: failed to mount rbd volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
    }
    return err
}

上面只是挂着到主机上面,mounter.Mount挂着到容器里面

func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
    // Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty.
    // All Linux distros are expected to be shipped with a mount utility that an support bind mounts.
    mounterPath := ""
    bind, bindRemountOpts := isBind(options)
    if bind {
        err := doMount(mounterPath, defaultMountCommand, source, target, fstype, []string{"bind"})
        if err != nil {
            return err
        }
        return doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts)
    }
    // The list of filesystems that require containerized mounter on GCI image cluster
    fsTypesNeedMounter := sets.NewString("nfs", "glusterfs", "ceph", "cifs")
    if fsTypesNeedMounter.Has(fstype) {
        mounterPath = mounter.mounterPath
    }
    return doMount(mounterPath, defaultMountCommand, source, target, fstype, options)
}

效果如下:

mount |grep rbd0

/dev/rbd0 on /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/tenx-pool-image-qinzhao.CID-ca4135da3326.datadir-zookeeper-2 type ext4 (rw,relatime,stripe=1024,data=ordered)

/dev/rbd0 on /var/lib/kubelet/pods/d03c7a87-4b3c-11e7-9b5e-5254eec04736/volumes/kubernetes.io~rbd/qinzhao.datadir-zookeeper-2 type ext4 (rw,relatime,stripe=1024,data=ordered)

上面是挂载到主机,下面是挂载到容器。

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

柳清风09

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值