基于1.1.2版本kubelet源码阅读(2)

主要分析syncLoopIteration中如何对pod进行响应的操作

1、syncLoopIteration中如何具体运行相关的pod

func (kl *Kubelet) syncLoopIteration(updates <-chan PodUpdate, handler SyncHandler) bool {
    /*
        syncLoopIteration函数会从updates读取Pod信息,然后根据u.Op为ADD、UPDATE、REMOVE进行处理,分别调用对应处理函数。
        syncLoopIteration会不断地循环执行
        syncLoopIteration主要是根据收到不同podUpdate的信息,进行处理。可以理解成一个分发函数。
    */
    //负责消费PodUpdate,handler其实就是kubelet自身
    //从传送管道中,获取到pod信息,然后根据pod的类型,分别调用了不同的处理接口。
    kl.syncLoopMonitor.Store(time.Now())
    select {

    case u, open := <-updates:
        if !open {
            glog.Errorf("Update channel is closed. Exiting the sync loop.")
            return false
        }
        switch u.Op {
        /*
            可以发现:ADD UPDATE sync在 最后都调用了
                func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType SyncPodType, mirrorPod *api.Pod, start time.Time);
            dispatchWork跟PodWorkers有关系,PodWorkers归PodManager管理调用,需要进一步了解
            kubectl create -f xx.yaml的时候走ADD路径,reflect.valueof(u.Op)=1
            kubectl delete rc xxx的时候先走UPDATE路径,reflect.valueof(u.Op)=3;然后再走REMOVE路径,reflect.valueof(u.Op)=2
        */
        case ADD:
            glog.V(2).Infof("SyncLoop (ADD): %q", kubeletUtil.FormatPodNames(u.Pods))
            handler.HandlePodAdditions(u.Pods)
        case UPDATE:
            glog.V(2).Infof("SyncLoop (UPDATE): %q", kubeletUtil.FormatPodNames(u.Pods))
            //定义在下面的func (kl *Kubelet) HandlePodUpdates(pods []*api.Pod)
            handler.HandlePodUpdates(u.Pods)
        case REMOVE:
            glog.V(2).Infof("SyncLoop (REMOVE): %q", kubeletUtil.FormatPodNames(u.Pods))
            handler.HandlePodDeletions(u.Pods)
        case SET:
            // TODO: Do we want to support this?
            glog.Errorf("Kubelet does not support snapshot update")
        }
    case <-kl.resyncTicker.C:
        // Periodically syncs all the pods and performs cleanup tasks.
        /*
            定时sync 调用了func (kl *Kubelet) HandlePodSyncs(pods []*api.Pod)
            这地方会不断地定期执行,即使无任何状态的改变
        */
        glog.V(4).Infof("SyncLoop (periodic sync)")
        handler.HandlePodSyncs(kl.podManager.GetPods())
    }
    kl.syncLoopMonitor.Store(time.Now())
    return true
}

k8s系统正常运行起来之后,syncLoopIteration()和sync操作的kl.syncLoopMonitor.Store(time.Now())会不断地循环之行。而add、update和delete操作将在触发响应的动作的时候执行。

2、对pod的add、update、delete和sync操作

    func (kl *Kubelet) HandlePodAdditions(pods []*api.Pod) {
    /*
        函数首先将pods按照创建时间排列,然后按照创建时间依次处理
        调用kl.podManager.AddPod(pod),将Pod加入podManager,podManager是一个重要的结构,前面说过的manager都依赖于这个结构体工作。

        这里还有一个mirrorpod的概念,mirrorpod主要与kubelet运行于standalone模式有关,
        假如pod是通过file或http的形式获得的,这个pod被称为static pod,k8s会在集群中创建一个对应的mirror pod。

        接着函数调用kl.dispatchWork去处理Pod,最后将这个pod加入到probeManager,
        k8s里面存在两种probe(探针),一种是readiness probe,另一种是liveness probe
    */
    start := time.Now()
    sort.Sort(podsByCreationTime(pods))
    for _, pod := range pods {
        kl.podManager.AddPod(pod)
        //kubectl create -f xx.yaml的时候 isMirrorPod(pod)=false
        if isMirrorPod(pod) {
            kl.handleMirrorPod(pod, start)
            continue
        }
        // Note that allPods includes the new pod since we added at the
        // beginning of the loop.
        //注意allPods包括新的pod,因为我们在循环的开始添加。
        allPods := kl.podManager.GetPods()
        // We failed pods that we rejected, so activePods include all admitted
        // pods that are alive and the new pod.
        activePods := kl.filterOutTerminatedPods(allPods)
        // Check if we can admit the pod; if not, reject it.
        if ok, reason, message := kl.canAdmitPod(activePods, pod); !ok {
            kl.rejectPod(pod, reason, message)
            continue
        }
        mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
        kl.dispatchWork(pod, SyncPodCreate, mirrorPod, start)
    }
}
    /*
    HandlePodDeletions的逻辑较简单,首先调用kl.podManager.DeletePod(pod)。
    podManager对象存储了所有分配到本节点的pod信息。
    假如是mirrorpod,有自己的一套处理逻辑。
    然后调用kl.deletePod(pod)删除实际的Pod,最后将pod从probeManager中删除。
*/
func (kl *Kubelet) HandlePodDeletions(pods []*api.Pod) {
    start := time.Now()
    for _, pod := range pods {

        kl.podManager.DeletePod(pod)
        if isMirrorPod(pod) {
            kl.handleMirrorPod(pod, start)
            continue
        }
        // Deletion is allowed to fail because the periodic cleanup routine
        // will trigger deletion again.

        //调用deletePod
        if err := kl.deletePod(pod.UID); err != nil {
            glog.V(2).Infof("Failed to delete pod %q, err: %v", kubeletUtil.FormatPodName(pod), err)
        }
    }
}
    /*
    HandlePodUpdates和HandlePodAdditions的逻辑差不多,
    也是直接将要更新的Pod,利用dispatchWork分发给pod_workers进行处理。
    dispatchWork的第二个参数为kubetypes.SyncPodUpdate,而假如是创建,则为kubetypes.SyncPodCreate。
*/
func (kl *Kubelet) HandlePodUpdates(pods []*api.Pod) {
    start := time.Now()
    for _, pod := range pods {
        //调用podManager.UpdatePods方法更新pod
        //???kl.podManager.UpdatePod(pod)和dispatchWork()的作用?
        kl.podManager.UpdatePod(pod)
        if isMirrorPod(pod) {
            kl.handleMirrorPod(pod, start)
            continue
        }
        // TODO: Evaluate if we need to validate and reject updates.

        mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
        kl.dispatchWork(pod, SyncPodUpdate, mirrorPod, start)
    }
}

sync操作

func (kl *Kubelet) HandlePodSyncs(pods []*api.Pod) {
    start := time.Now()
    for _, pod := range pods {
        mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
        kl.dispatchWork(pod, SyncPodSync, mirrorPod, start)
    }
}

可以发现:
1)、都有对static Pod的处理,调用handleMirrorPod()。

// TODO: Consider handling all mirror pods updates in a separate component.
func (kl *Kubelet) handleMirrorPod(mirrorPod *api.Pod, start time.Time) {
    // Mirror pod ADD/UPDATE/DELETE operations are considered an UPDATE to the
    // corresponding static pod. Send update to the pod worker if the static
    // pod exists.

    /*
        译:Mirror pod的ADD/UPDATE/DELETE操作都被视为一个对对应的static pod的UPDATE操作
    if the static pod exists,Send update to the pod worker。->通过调用dispatchWork
    */
    if pod, ok := kl.podManager.GetPodByMirrorPod(mirrorPod); ok {
        /*
            SyncPodUpdate 类似一个标志位
            dispatchWork会调用podWorkers
        */
        kl.dispatchWork(pod, SyncPodUpdate, mirrorPod, start)
    }
}

2)、除了delete,其他三个操作最后都调用了dispatchWork。

3、分析add和update操作

从dispatchWork(...)开始
func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType SyncPodType, mirrorPod *api.Pod, start time.Time) {
    /*
        主要的一个调用流程是,利用kubelet对象中的podworks对象进行Updatepod操作
        kubectl create/delete/ -f xx.yaml  也是进入kl.podWorkers.UpdatePod(pod, mirrorPod, func() 中
        定义在pkg/kubelet/pod_worker.go  func (p *podWorkers) UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateComplete func())
    */
    if kl.podIsTerminated(pod) {
        return
    }
    // Run the sync in an async worker.  async:异步
    //*********************dispatchWork 的核心**************************
    kl.podWorkers.UpdatePod(pod, mirrorPod, func() {
        metrics.PodWorkerLatency.WithLabelValues(syncType.String()).Observe(metrics.SinceInMicroseconds(start))
    })
    // Note the number of containers for new pods.
    if syncType == SyncPodCreate {
        //统计刚刚create的pod包含的container个数  一个rc里面包含几个container:len(pod.Spec.Containers)
        metrics.ContainersPerPodCount.Observe(float64(len(pod.Spec.Containers)))
    }
}

进入podWorkers.UpdatePod()
先看看podWorkers这个结构体的属性:

/*
    podWorkers是一个重要的结构体,如下所示:
    podUpdates是一个map类型,
    每一个Pod的uuid作为key,
    而UpdatePodOptions类型的channel作为value。
*/
type podWorkers struct {
    // Protects all per worker fields.
    podLock sync.Mutex

    // Tracks all running per-pod goroutines - per-pod goroutine will be
    // processing updates received through its corresponding channel.
    //最重要的一个属性: podupdates,这个是针对不同的uid pod开启了一个通信管道
    podUpdates map[types.UID]chan workUpdate
    // Track the current state of per-pod goroutines.
    // Currently all update request for a given pod coming when another
    // update of this pod is being processed are ignored.
    isWorking map[types.UID]bool
    // Tracks the last undelivered work item for this pod - a work item is
    // undelivered if it comes in while the worker is working.
    lastUndeliveredWorkUpdate map[types.UID]workUpdate
    // runtimeCache is used for listing running containers.
    runtimeCache kubecontainer.RuntimeCache

    // This function is run to sync the desired stated of pod.
    // NOTE: This function has to be thread-safe - it can be called for
    // different pods at the same time.
    syncPodFn syncPodFnType

    // The EventRecorder to use
    recorder record.EventRecorder
}

进入updatepod函数

// Apply the new setting to the specified pod. updateComplete is called when the update is completed.
//将新设置应用于指定的pod。 updateComplete在更新完成时调用。kubectl create/delete/sccle 等等所有操作最后都是走这个函数通道。
//也就是说无论是add 还是delet等等操作,都是走podworker的UpdatePod(***)
func (p *podWorkers) UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateComplete func()) {
    //对新的uid pod开启来一个chan 管道,真正的运行是在managePodLoop

    /*
        ********以新创建pod的这种情况为例子********:
        函数首先会去检查podUpdates这个map,由于是新创建的pod,因此不存在,它会创建一个go routine,执行函数mannagePodLoop。
        注意:
        每一个pod都会有一个相应的go routine执行mannagePodLoop,其参数podUpdates这个channel则用来传递pod updgte的信息
    */
    uid := pod.UID
    var podUpdates chan workUpdate
    var exists bool

    // TODO: Pipe this through from the kubelet. Currently kubelets operating with
    // snapshot updates (PodConfigNotificationSnapshot) will send updates, creates
    // and deletes as SET operations, which makes updates indistinguishable from
    // creates. The intent here is to communicate to the pod worker that it can take
    // certain liberties, like skipping status generation, when it receives a create
    // event for a pod.
    updateType := SyncPodUpdate

    p.podLock.Lock()
    defer p.podLock.Unlock()
    if podUpdates, exists = p.podUpdates[uid]; !exists {
        // We need to have a buffer here, because checkForUpdates() method that
        // puts an update into channel is called from the same goroutine where
        // the channel is consumed. However, it is guaranteed that in such case
        // the channel is empty, so buffer of size 1 is enough.
        podUpdates = make(chan workUpdate, 1)
        p.podUpdates[uid] = podUpdates

        // Creating a new pod worker either means this is a new pod, or that the
        // kubelet just restarted. In either case the kubelet is willing to believe
        // the status of the pod for the first pod worker sync. See corresponding
        // comment in syncPod.
        updateType = SyncPodCreate
        go func() {
            defer util.HandleCrash()
            //*****************important!!!!
            //每一个pod都会有一个相应的go routine执行mannagePodLoop,其参数podUpdates这个channel则用来传递pod updgte的信息
            p.managePodLoop(podUpdates)
        }()
    }
    //isworking防止同一个pod重入。然后新的pod则将信息发送到管道。真正的运行是在managePodLoop
    if !p.isWorking[pod.UID] {
        p.isWorking[pod.UID] = true
        podUpdates <- workUpdate{
            pod:              pod,
            mirrorPod:        mirrorPod,
            updateCompleteFn: updateComplete,
            updateType:       updateType,
        }
    } else {
        p.lastUndeliveredWorkUpdate[pod.UID] = workUpdate{
            pod:              pod,
            mirrorPod:        mirrorPod,
            updateCompleteFn: updateComplete,
            updateType:       updateType,
        }
    }
}

进入p.managePodLoop(podUpdates)函数:

func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
    /*
        这是一个死循环的,消费者
        前面的updatepod是生产者,负责将pod通过管道发送到这个消费者。
        先从runtimecache中获取到pods,然后将调用syncpodFn(这个参数是在构建podworkers传入的,其实就是kubelet的syncPod),稍后再分析syncpod
        定义在pkg/kubelet/kubelet.go
        func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType SyncPodType)
    */
    var minRuntimeCacheTime time.Time
    for newWork := range podUpdates {
        func() {
            defer p.checkForUpdates(newWork.pod.UID, newWork.updateCompleteFn)
            // We would like to have the state of the containers from at least
            // the moment when we finished the previous processing of that pod.
            if err := p.runtimeCache.ForceUpdateIfOlder(minRuntimeCacheTime); err != nil {
                glog.Errorf("Error updating the container runtime cache: %v", err)
                return
            }
            pods, err := p.runtimeCache.GetPods()
            if err != nil {
                glog.Errorf("Error getting pods while syncing pod: %v", err)
                return
            }
            //manangePodLoop调用syncPodFn函数去同步Pod,syncPodFn这个函数实际上是syncPod函数。
            err = p.syncPodFn(newWork.pod, newWork.mirrorPod,
                kubecontainer.Pods(pods).FindPodByID(newWork.pod.UID), newWork.updateType)
            if err != nil {
                glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
                p.recorder.Eventf(newWork.pod, "FailedSync", "Error syncing pod, skipping: %v", err)
                return
            }
            minRuntimeCacheTime = time.Now()

            newWork.updateCompleteFn()
        }()
    }
}

进入了pkg/kubelet/kubelet.go func (kl *Kubelet) syncPod(…)函数:

func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecontainer.Pod, updateType SyncPodType) error {
    /*
        syncPod比较长,其中主要包括以下几个函数:
        makePodDataDirs创建创建该Pod的目录,即目录/var/lib/kubelet/uuid。
        kl.volumeManager.WaitForAttachAndMount函数主要等待volumeManager将该Pod依赖的Volume都挂载完毕。
        kl.getPullSecretsForPod主要获取pullimage secret。
        最后调用kl.containerRuntime.SyncPod去同步Pod。
        ****
        以docker为例,其SyncPod方法定义在 pkg/kubelet/dockertools/manager.go的
            func (dm *DockerManager) SyncPod(*****)
    */

    podFullName := kubecontainer.GetPodFullName(pod)
    uid := pod.UID
    start := time.Now()
    var firstSeenTime time.Time
    if firstSeenTimeStr, ok := pod.Annotations[ConfigFirstSeenAnnotationKey]; !ok {
        glog.V(3).Infof("First seen time not recorded for pod %q", pod.UID)
    } else {
        firstSeenTime = kubeletTypes.ConvertToTimestamp(firstSeenTimeStr).Get()
    }

    // Before returning, regenerate status and store it in the cache.
    defer func() {
        if isStaticPod(pod) && mirrorPod == nil {
            // No need to cache the status because the mirror pod does not
            // exist yet.
            return
        }
        status, err := kl.generatePodStatus(pod)
        if err != nil {
            glog.Errorf("Unable to generate status for pod with name %q and uid %q info with error(%v)", podFullName, uid, err)
        } else {
            podToUpdate := pod
            if mirrorPod != nil {
                podToUpdate = mirrorPod
            }
            existingStatus, ok := kl.statusManager.GetPodStatus(podToUpdate.UID)
            if !ok || existingStatus.Phase == api.PodPending && status.Phase == api.PodRunning &&
                !firstSeenTime.IsZero() {
                metrics.PodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
            }
            kl.statusManager.SetPodStatus(podToUpdate, status)
        }
    }()
    //上面的这部分是一个函数返回时的操作
    //*******************************************************
    // Kill pods we can't run.
    if err := canRunPod(pod); err != nil || pod.DeletionTimestamp != nil {
        if err := kl.killPod(pod, runningPod); err != nil {
            util.HandleError(err)
        }
        return err
    }

    if err := kl.makePodDataDirs(pod); err != nil {
        glog.Errorf("Unable to make pod data directories for pod %q (uid %q): %v", podFullName, uid, err)
        return err
    }

    // Starting phase:
    ref, err := api.GetReference(pod)
    if err != nil {
        glog.Errorf("Couldn't make a ref to pod %q: '%v'", podFullName, err)
    }

    // Mount volumes.
    podVolumes, err := kl.mountExternalVolumes(pod)
    if err != nil {
        if ref != nil {
            kl.recorder.Eventf(ref, "FailedMount", "Unable to mount volumes for pod %q: %v", podFullName, err)
        }
        glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err)
        return err
    }
    kl.volumeManager.SetVolumes(pod.UID, podVolumes)
    //上面中,开始真正的动作。killpod,当pod无法运行的时候
    //*******************************************************
    //然后开始解析,挂载volume
    var podStatus api.PodStatus
    if updateType == SyncPodCreate {
        // This is the first time we are syncing the pod. Record the latency
        // since kubelet first saw the pod if firstSeenTime is set.
        if !firstSeenTime.IsZero() {
            metrics.PodWorkerStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
        }

        podStatus = pod.Status
        podStatus.StartTime = &unversioned.Time{Time: start}
        kl.statusManager.SetPodStatus(pod, podStatus)
        glog.V(3).Infof("Not generating pod status for new pod %q", podFullName)
    } else {
        var err error
        podStatus, err = kl.generatePodStatus(pod)
        if err != nil {
            glog.Errorf("Unable to get status for pod %q (uid %q): %v", podFullName, uid, err)
            return err
        }
    }

    pullSecrets, err := kl.getPullSecretsForPod(pod)
    if err != nil {
        glog.Errorf("Unable to get pull secrets for pod %q (uid %q): %v", podFullName, uid, err)
        return err
    }

    err = kl.containerRuntime.SyncPod(pod, runningPod, podStatus, pullSecrets, kl.backOff)
    if err != nil {
        return err
    }
    /*
        一系列的操作完之后,调用了containerRuntime.synpod
        这个就是我们之前分析的对Docker封装的一个containerRuntime
        *******************************************************
    */
    ingress, egress, err := extractBandwidthResources(pod)
    if err != nil {
        return err
    }
    if egress != nil || ingress != nil {
        if pod.Spec.HostNetwork {
            kl.recorder.Event(pod, "HostNetworkNotSupported", "Bandwidth shaping is not currently supported on the host network")
        } else if kl.shaper != nil {
            status, found := kl.statusManager.GetPodStatus(pod.UID)
            if !found {
                statusPtr, err := kl.containerRuntime.GetPodStatus(pod)
                if err != nil {
                    glog.Errorf("Error getting pod for bandwidth shaping")
                    return err
                }
                status = *statusPtr
            }
            if len(status.PodIP) > 0 {
                err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress)
            }
        } else {
            kl.recorder.Event(pod, "NilShaper", "Pod requests bandwidth shaping, but the shaper is undefined")
        }
    }

    if isStaticPod(pod) {
        if mirrorPod != nil && !kl.podManager.IsMirrorPodOf(mirrorPod, pod) {
            // The mirror pod is semantically different from the static pod. Remove
            // it. The mirror pod will get recreated later.
            glog.Errorf("Deleting mirror pod %q because it is outdated", podFullName)
            if err := kl.podManager.DeleteMirrorPod(podFullName); err != nil {
                glog.Errorf("Failed deleting mirror pod %q: %v", podFullName, err)
            }
        }
        if mirrorPod == nil {
            glog.V(3).Infof("Creating a mirror pod %q", podFullName)
            if err := kl.podManager.CreateMirrorPod(pod); err != nil {
                glog.Errorf("Failed creating a mirror pod %q: %v", podFullName, err)
            }
        }
    }
    return nil
}

进入DockerManager的SyncPod函数:

// Sync the running pod to match the specified desired pod.
/*
    func (dm *DockerManager) SyncPod()思想解析:
    其总体思想是调用dm.computePodContainerChanges,返回一个PodContainerChangesSpec类型。
        ->containerChanges, err := dm.computePodContainerChanges(pod, runningPod, podStatus)
    由于不仅创建时会调用SyncPod,更新Pod时都会调用SyncPod。

    因此PodContainerChangesSpec结构体主要包含了该Pod需要作出的一些changes:
    比如是否需要创建infracontainer,由StartInfraContainer这个bool值决定,他表示infracontainer是否已启动。
    infracontainer是一个特殊的container,每一个Pod创建时首先会启动一个infracontainer,
    然后再启动用户自定义的container,用户自定义的container共享infracontainer的net、uts、ipc命名空间。
    ContainersToStart是一个map,这个map包括了需要启动的containers。
    而ContainersToKeep则表示该Pod中保留的containers(主要用于更新Pod这种情况)。
*/
func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) error {
    ...
    ...
    ...

调用脉络:

kubelet的syncLoopIteration()-->dispatchWork()--》podWorkers.UpdatePod()---》kubelet的func (kl *Kubelet) syncPod()--》func (dm *DockerManager) SyncPod()

k8s调用docker:
kubelet.containerRuntime = DockerManager,
而DockerManager中包含了dockerClient,dockerClient封装了指向docker的地址和操作接口;
(DockerManager和dockerClient的关系还需进一步理清)

4、分析delete操作

func (kl *Kubelet) HandlePodDeletions(pods []*api.Pod) ()函数在上面第二点已经贴出来。
进入kubelet的deletePod函数

func (kl *Kubelet) deletePod(uid types.UID) error {

    if !kl.sourcesReady() {
        // If the sources aren't ready, skip deletion, as we may accidentally delete pods
        // for sources that haven't reported yet.
        return fmt.Errorf("skipping delete because sources aren't ready yet")
    }
    kl.podWorkers.ForgetWorker(uid)

    // Runtime cache may not have been updated to with the pod, but it's okay
    // because the periodic cleanup routine will attempt to delete again later.
    runningPods, err := kl.runtimeCache.GetPods()
    if err != nil {
        return fmt.Errorf("error listing containers: %v", err)
    }
    pod := kubecontainer.Pods(runningPods).FindPod("", uid)
    if pod.IsEmpty() {
        return fmt.Errorf("pod not found")
    }

    /*
        *****通过chan把pod传送给了podKillingCh*****
        下面定义的 func (kl *Kubelet) podKiller() 会对kl.podKillingCh进行处理


        podKiller在之前的的func (kl *Kubelet) Run(updates <-chan PodUpdate)函数中
        通过go util.Until(kl.podKiller, 1*time.Second, util.NeverStop)启动了
    */
    kl.podKillingCh <- &pod
    // TODO: delete the mirror pod here?

    // We leave the volume/directory cleanup to the periodic cleanup routine.
    return nil
}

进入podKiller()函数

// podKiller launches a goroutine to kill a pod received from the channel if
// another goroutine isn't already in action.
//译:podKiller启动一个goroutine来杀死从通道接收的pod,如果另一个goroutine没有在行动。
func (kl *Kubelet) podKiller() {
    /*
        podKiller这个go routine,它最后会调用kl.containerRuntime.KillPod去删除Pod
        -->kl.killPod(nil, *pod)
    */
    killing := sets.NewString()
    resultCh := make(chan types.UID)
    defer close(resultCh)
    for {
        select {
        //获取要删除的pod的channel
        case pod, ok := <-kl.podKillingCh:
            if !ok {
                return
            }
            if killing.Has(string(pod.ID)) {
                // The pod is already being killed.
                break
            }
            killing.Insert(string(pod.ID))
            go func(pod *kubecontainer.Pod, ch chan types.UID) {
                defer func() {
                    ch <- pod.ID
                }()
                glog.V(2).Infof("Killing unwanted pod %q", pod.Name)
                //接收到需要kill的pod,然后最终调用了killpod
                err := kl.killPod(nil, *pod)
                if err != nil {
                    glog.Errorf("Failed killing the pod %q: %v", pod.Name, err)
                }
            }(pod, resultCh)

        case podID := <-resultCh:
            killing.Delete(string(podID))
        }
    }
}

进入err := kl.killPod(nil, *pod)

// Kill all running containers in a pod (includes the pod infra container).
//译:杀死pod中的所有正在运行的容器(包括pod infra容器)。
func (kl *Kubelet) killPod(pod *api.Pod, runningPod kubecontainer.Pod) error {
    //最终调用了containerrumtime的killpod
    return kl.containerRuntime.KillPod(pod, runningPod)
}

调用脉络:

k8s调用docker:
kubelet.containerRuntime = DockerManager,
而DockerManager中包含了dockerClient,dockerClient封装了指向docker的地址和操作接口;
(DockerManager和dockerClient的关系还需进一步理清)

参考:

http://licyhust.com/容器技术/2016/10/20/kubelet-1/
http://blog.csdn.net/screscent?viewmode=contents

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值