katacontainers启动分析

程序入口


程序启动命令示例:

/usr/bin/containerd-shim-kata-v2 -namespace k8s.io -address /run/containerd/containerd.sock -publish-binary /usr/bin/containerd -id 19639196b889a15b8dcd5d718371de7af5afccfbdf5d7fdc314685e40ba55b09

检测kata输入参数

func main() {

	if len(os.Args) == 2 && os.Args[1] == "--version" {
		fmt.Printf("%s containerd shim: id: %q, version: %s, commit: %v\n", project, types.DefaultKataRuntimeName, version, commit)
		os.Exit(0)
	}

	shim.Run(types.DefaultKataRuntimeName, containerdshim.New, shimConfig)
}

containerdshim.New:获取service实例

shimConfig:初始化shim配置

初始化环境

shim.Run是进入主要逻辑入口,首先解析输入的参数,初始化运行时环境,设置child_subreaper(subreaper,只针对linux环境),这样它就能接收以他为父节点的进程树下所有的孤儿进程,比如containerd中父进程拉起子进程后父进程退出,子进程会被设置了child_subreaper的父进程接管,这里设置相当于用containerd-shim管理容器内的子进程,是否启动subreaper由shimConfig函数初始化变量确定

func run(id string, initFunc Init, config Config) error {
	parseFlags()
	setRuntime()

	signals, err := setupSignals(config)
	if err != nil {
		return err
	}
	if !config.NoSubreaper {
		if err := subreaper(); err != nil {
			return err
		}
	}
	......
	......
	......

初始化事件通知器

初始化事件通知实例,addressFlag和containerdBinaryFlag string根据parseFlags解析输入参数获取,config.NoReaper根据shimConfig获取

	publisher := &remoteEventsPublisher{
		address:              addressFlag,
		containerdBinaryPath: containerdBinaryFlag,
		noReaper:             config.NoReaper,
	}

初始化命名空间

初始化命名空间,namespaceFlag从parseFlags解析到的输入参数获取,并在上下文空间传递BundlePath和Debug两个参数,同时把ctx的log加上“runtime”字段

	ctx := namespaces.WithNamespace(context.Background(), namespaceFlag)
	ctx = context.WithValue(ctx, OptsKey{}, Opts{BundlePath: bundlePath, Debug: debugFlag})
	ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", id))

初始化service实例

initFunc是函数指针,指向containerdshim.New函数,相当于调用containerdshim.New方法,主要作用是实例化service对象

	service, err := initFunc(ctx, idFlag, publisher)
	if err != nil {
		return err
	}

containerdshim.New,首先初始化vci、katautils日志,包括日志的等级、字段,然后初始化service实例

	s := &service{
		id:         id,
		pid:        uint32(os.Getpid()),
		ctx:        ctx,
		containers: make(map[string]*container),
		events:     make(chan interface{}, chSize),
		ec:         make(chan exit, bufferSize),
		cancel:     cancel,
	}

启动service事件处理协程

使用goroutine启动service的processExits方法,使用for轮回service.ec信道信息,然后使用service.sendL方法发送出去,sendL把需要发送的事件发给events信道,以上两个信道都是service实例的信道

func (s *service) processExits() {
	for e := range s.ec {
		s.checkProcesses(e)
	}
}
func (s *service) checkProcesses(e exit) {
	s.mu.Lock()
	defer s.mu.Unlock()

	id := e.execid
	if id == "" {
		id = e.id
	}

	s.sendL(&eventstypes.TaskExit{
		ContainerID: e.id,
		ID:          id,
		Pid:         e.pid,
		ExitStatus:  uint32(e.status),
		ExitedAt:    e.timestamp,
	})
}
func (s *service) sendL(evt interface{}) {
	s.eventSendMu.Lock()
	if s.events != nil {
		s.events <- evt
	}
	s.eventSendMu.Unlock()
}

使用gorouteine启动service的forward方法,publisher是remoteEventsPublisher实例对象,forward轮询service的events信道,相当于在接收service.ev信道信息

func (s *service) forward(publisher events.Publisher) {
	for e := range s.events {
		ctx, cancel := context.WithTimeout(s.ctx, timeOut)
		err := publisher.Publish(ctx, getTopic(e), e)
		cancel()
		if err != nil {
			shimLog.WithError(err).Error("post event")
		}
	}
}

Publish对消息经过预处理后开始组织命令,然后执行命令等待返回值,这里就是调用containerd的地方

l.containerdBinaryPath是启动程序传入的参数containerdBinaryFlag(publish-binary),默认为containerd

l.address是启动程序传入的参数addressFlag(address),默认为空

ns是启动程序传入的参数namespaceFlag(namespace),默认为空

topic是事件类型

file:src\runtime\vendor\github.com\containerd\containerd\runtime\v2\shim\shim_unix.go
func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error {
	ns, _ := namespaces.Namespace(ctx)
	encoded, err := typeurl.MarshalAny(event)
	if err != nil {
		return err
	}
	data, err := encoded.Marshal()
	if err != nil {
		return err
	}
	cmd := exec.CommandContext(ctx, l.containerdBinaryPath, "--address", l.address, "publish", "--topic", topic, "--namespace", ns)
	cmd.Stdin = bytes.NewReader(data)
	if l.noReaper {
		if err := cmd.Start(); err != nil {
			return err
		}
		if err := cmd.Wait(); err != nil {
			return errors.Wrap(err, "failed to publish event")
		}
		return nil
	}
	c, err := Default.Start(cmd)
	if err != nil {
		return err
	}
	status, err := Default.Wait(cmd, c)
	if err != nil {
		return err
	}
	if status != 0 {
		return errors.New("failed to publish event")
	}
	return nil
}

topic事件类型

func getTopic(e interface{}) string {
	switch e.(type) {
	case *eventstypes.TaskCreate:
		return cdruntime.TaskCreateEventTopic
	case *eventstypes.TaskStart:
		return cdruntime.TaskStartEventTopic
	case *eventstypes.TaskOOM:
		return cdruntime.TaskOOMEventTopic
	case *eventstypes.TaskExit:
		return cdruntime.TaskExitEventTopic
	case *eventstypes.TaskDelete:
		return cdruntime.TaskDeleteEventTopic
	case *eventstypes.TaskExecAdded:
		return cdruntime.TaskExecAddedEventTopic
	case *eventstypes.TaskExecStarted:
		return cdruntime.TaskExecStartedEventTopic
	case *eventstypes.TaskPaused:
		return cdruntime.TaskPausedEventTopic
	case *eventstypes.TaskResumed:
		return cdruntime.TaskResumedEventTopic
	case *eventstypes.TaskCheckpointed:
		return cdruntime.TaskCheckpointedEventTopic
	default:
		shimLog.WithField("event-type", e).Warn("no topic for event type")
	}
	return cdruntime.TaskUnknownTopic
}

处理事件

action = flag.Arg(0),flag.arg处理没有被解析的参数,如果没有未被解析的参数action则为空

	switch action {
	case "delete":
		logger := logrus.WithFields(logrus.Fields{
			"pid":       os.Getpid(),
			"namespace": namespaceFlag,
		})
		go handleSignals(logger, signals)
		response, err := service.Cleanup(ctx)
		if err != nil {
			return err
		}
		data, err := proto.Marshal(response)
		if err != nil {
			return err
		}
		if _, err := os.Stdout.Write(data); err != nil {
			return err
		}
		return nil
	case "start":
		address, err := service.StartShim(ctx, idFlag, containerdBinaryFlag, addressFlag)
		if err != nil {
			return err
		}
		if _, err := os.Stdout.WriteString(address); err != nil {
			return err
		}
		return nil
	default:
		if err := setLogger(ctx, idFlag); err != nil {
			return err
		}
		client := NewShimClient(ctx, service, signals)
		return client.Serve()
	}

default

NewShimClient获取Client实例,svc是service实例

// NewShimClient creates a new shim server client
func NewShimClient(ctx context.Context, svc shimapi.TaskService, signals chan os.Signal) *Client {
	s := &Client{
		service: svc,
		context: ctx,
		signals: signals,
	}
	return s
}

client.Serve()中会初始化信号捕获函数、日志,启动serve

// Serve the shim server
func (s *Client) Serve() error {
	dump := make(chan os.Signal, 32)
	setupDumpStacks(dump)

	path, err := os.Getwd()
	if err != nil {
		return err
	}
	server, err := newServer()
	if err != nil {
		return errors.Wrap(err, "failed creating server")
	}

	logrus.Debug("registering ttrpc server")
	shimapi.RegisterTaskService(server, s.service)

	if err := serve(s.context, server, socketFlag); err != nil {
		return err
	}
	logger := logrus.WithFields(logrus.Fields{
		"pid":       os.Getpid(),
		"path":      path,
		"namespace": namespaceFlag,
	})
	go func() {
		for range dump {
			dumpStacks(logger)
		}
	}()
	return handleSignals(logger, s.signals)
}

newServer会初始化ttrpc服务

file:src\runtime\vendor\github.com\containerd\containerd\runtime\v2\shim\shim_linux.go
func newServer() (*ttrpc.Server, error) {
	return ttrpc.NewServer(ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser()))
}

shimapi.RegisterTaskService(server, s.service),把service实例注册到ttrpc服务器

func RegisterTaskService(srv *ttrpc.Server, svc TaskService) {
	srv.Register("containerd.task.v2.Task", map[string]ttrpc.Method{
		"State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
			var req StateRequest
			if err := unmarshal(&req); err != nil {
				return nil, err
			}
			return svc.State(ctx, &req)
		},
		"Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
			var req CreateTaskRequest
			if err := unmarshal(&req); err != nil {
				return nil, err
			}
			return svc.Create(ctx, &req)
		},
		"Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
			var req StartRequest
			if err := unmarshal(&req); err != nil {
				return nil, err
			}
			return svc.Start(ctx, &req)
		},
		"Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
			var req DeleteRequest
			if err := unmarshal(&req); err != nil {
				return nil, err
			}
			return svc.Delete(ctx, &req)
		},
		......
		......

ttrpc注册的方法如“Create”方法被调用后会被执行到service的Create方法,然后调用create方法,最后把创建结果使用service的send方法通过信道发送给service的events信道,然后被service的forward协程处理发送给containerd

file:src\runtime\containerd-shim-v2\service.go
// Create a new sandbox or container with the underlying OCI runtime
func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) {
	start := time.Now()
	defer func() {
		err = toGRPC(err)
		rpcDurationsHistogram.WithLabelValues("create").Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond)))
	}()

	s.mu.Lock()
	defer s.mu.Unlock()

	var c *container

	c, err = create(ctx, s, r)
	if err != nil {
		return nil, err
	}

	c.status = task.StatusCreated

	s.containers[r.ID] = c

	s.send(&eventstypes.TaskCreate{
		ContainerID: r.ID,
		Bundle:      r.Bundle,
		Rootfs:      r.Rootfs,
		IO: &eventstypes.TaskIO{
			Stdin:    r.Stdin,
			Stdout:   r.Stdout,
			Stderr:   r.Stderr,
			Terminal: r.Terminal,
		},
		Checkpoint: r.Checkpoint,
		Pid:        s.pid,
	})

	return &taskAPI.CreateTaskResponse{
		Pid: s.pid,
	}, nil
}
func (s *service) send(evt interface{}) {
	// for unit test, it will not initialize s.events
	if s.events != nil {
		s.events <- evt
	}
}
service goroutine
call
call
call
call
chan
chan
chan
chan
start
loop event
public
syscall containerd
ttrpc service
process event
create
create_backend
Start
Start_backend
State
State_backend
...
...
send event

create方法分为两种创建方式PodSandbox和PodContainerg

file:src\runtime\containerd-shim-v2\create.go
func create(ctx context.Context, s *service, r *taskAPI.CreateTaskRequest) (*container, error) {
	rootFs := vc.RootFs{}
	if len(r.Rootfs) == 1 {
		m := r.Rootfs[0]
		rootFs.Source = m.Source
		rootFs.Type = m.Type
		rootFs.Options = m.Options
	}

	detach := !r.Terminal
	ociSpec, bundlePath, err := loadSpec(r)
	if err != nil {
		return nil, err
	}

	containerType, err := oci.ContainerType(*ociSpec)
	if err != nil {
		return nil, err
	}

	disableOutput := noNeedForOutput(detach, ociSpec.Process.Terminal)
	rootfs := filepath.Join(r.Bundle, "rootfs")

	switch containerType {
	case vc.PodSandbox:
		if s.sandbox != nil {
			return nil, fmt.Errorf("cannot create another sandbox in sandbox: %s", s.sandbox.ID())
		}

		_, err := loadRuntimeConfig(s, r, ociSpec.Annotations)
		if err != nil {
			return nil, err
		}

		if rootFs.Mounted, err = checkAndMount(s, r); err != nil {
			return nil, err
		}

		defer func() {
			if err != nil && rootFs.Mounted {
				if err2 := mount.UnmountAll(rootfs, 0); err2 != nil {
					shimLog.WithField("container-type", containerType).WithError(err2).Warn("failed to cleanup rootfs mount")
				}
			}
		}()

		katautils.HandleFactory(ctx, vci, s.config)

		// Pass service's context instead of local ctx to CreateSandbox(), since local
		// ctx will be canceled after this rpc service call, but the sandbox will live
		// across multiple rpc service calls.
		//
		sandbox, _, err := katautils.CreateSandbox(s.ctx, vci, *ociSpec, *s.config, rootFs, r.ID, bundlePath, "", disableOutput, false)
		if err != nil {
			return nil, err
		}
		s.sandbox = sandbox
		go s.startManagementServer(ctx, ociSpec)

	case vc.PodContainer:
		if s.sandbox == nil {
			return nil, fmt.Errorf("BUG: Cannot start the container, since the sandbox hasn't been created")
		}

		if rootFs.Mounted, err = checkAndMount(s, r); err != nil {
			return nil, err
		}

		defer func() {
			if err != nil && rootFs.Mounted {
				if err2 := mount.UnmountAll(rootfs, 0); err2 != nil {
					shimLog.WithField("container-type", containerType).WithError(err2).Warn("failed to cleanup rootfs mount")
				}
			}
		}()

		_, err = katautils.CreateContainer(ctx, s.sandbox, *ociSpec, rootFs, r.ID, bundlePath, "", disableOutput)
		if err != nil {
			return nil, err
		}
	}

	container, err := newContainer(s, r, containerType, ociSpec, rootFs.Mounted)
	if err != nil {
		return nil, err
	}

	return container, nil
}

初始化sandbox

kata创建过程会进入vc.PodSandbox分支,首先加载配置文件中的配置和rpc调用过来的配置

		s.config, err = loadRuntimeConfig(s, r, ociSpec.Annotations)
		if err != nil {
			return nil, err
		}

创建tracer

		// create tracer
		// This is the earliest location we can create the tracer because we must wait
		// until the runtime config is loaded
		_, err = katautils.CreateTracer("kata", s.config)
		if err != nil {
			return nil, err
		}

		// create root span
		var rootSpan otelTrace.Span
		rootSpan, s.rootCtx = trace(s.ctx, "root span")
		defer rootSpan.End()

		// create span
		var span otelTrace.Span
		span, s.ctx = trace(s.rootCtx, "create")
		defer span.End()

检查挂载

		if rootFs.Mounted, err = checkAndMount(s, r); err != nil {
			return nil, err
		}

初始化sandbox配置,kata采用模板方式初始化sandbox,katautils.HandleFactory根据初始配置创建sandbox模板,配置通过vci返回,这个步骤从配置文件同步配置到vci,后续再从vci同步到SandboxConfig

		katautils.HandleFactory(ctx, vci, s.config)

创建sandbox入口

		sandbox, _, err := katautils.CreateSandbox(s.ctx, vci, *ociSpec, *s.config, rootFs, r.ID, bundlePath, "", disableOutput, false)
		if err != nil {
			return nil, err
		}

1、初始化virtcontainers的sandbox配置结构体SandboxConfig

2、检测系统是否开启FIPS功能(在 FIPS 模式下,只能使用 FIPS 140-2 批准的加密算法)

3、指定container文件系统路径

4、创建网络命名空间

5、进入网络命名空间执行precontainer hook函数

6、创建sandbox,实现方法:src\runtime\virtcontainers\implementation.go

// CreateSandbox create a sandbox container
func CreateSandbox(ctx context.Context, vci vc.VC, ociSpec specs.Spec, runtimeConfig oci.RuntimeConfig, rootFs vc.RootFs,
	containerID, bundlePath, console string, disableOutput, systemdCgroup bool) (_ vc.VCSandbox, _ vc.Process, err error) {
	span, ctx := Trace(ctx, "createSandbox")
	defer span.End()

	sandboxConfig, err := oci.SandboxConfig(ociSpec, runtimeConfig, bundlePath, containerID, console, disableOutput, systemdCgroup)
	if err != nil {
		return nil, vc.Process{}, err
	}

	if err := checkForFIPS(&sandboxConfig); err != nil {
		return nil, vc.Process{}, err
	}

	if !rootFs.Mounted && len(sandboxConfig.Containers) == 1 {
		if rootFs.Source != "" {
			realPath, err := ResolvePath(rootFs.Source)
			if err != nil {
				return nil, vc.Process{}, err
			}
			rootFs.Source = realPath
		}
		sandboxConfig.Containers[0].RootFs = rootFs
	}

	// Important to create the network namespace before the sandbox is
	// created, because it is not responsible for the creation of the
	// netns if it does not exist.
	if err := SetupNetworkNamespace(&sandboxConfig.NetworkConfig); err != nil {
		return nil, vc.Process{}, err
	}

	defer func() {
		// cleanup netns if kata creates it
		ns := sandboxConfig.NetworkConfig
		if err != nil && ns.NetNsCreated {
			if ex := cleanupNetNS(ns.NetNSPath); ex != nil {
				kataUtilsLogger.WithField("path", ns.NetNSPath).WithError(ex).Warn("failed to cleanup netns")
			}
		}
	}()

	// Run pre-start OCI hooks.
	err = EnterNetNS(sandboxConfig.NetworkConfig.NetNSPath, func() error {
		return PreStartHooks(ctx, ociSpec, containerID, bundlePath)
	})
	if err != nil {
		return nil, vc.Process{}, err
	}

	sandbox, err := vci.CreateSandbox(ctx, sandboxConfig)
	if err != nil {
		return nil, vc.Process{}, err
	}

	sid := sandbox.ID()
	kataUtilsLogger = kataUtilsLogger.WithField("sandbox", sid)
	span.SetAttributes(label.Key("sandbox").String(sid))

	containers := sandbox.GetAllContainers()
	if len(containers) != 1 {
		return nil, vc.Process{}, fmt.Errorf("BUG: Container list from sandbox is wrong, expecting only one container, found %d containers", len(containers))
	}

	return sandbox, containers[0].Process(), nil
}

vci.CreateSandbox(ctx, sandboxConfig)流程:

file:src\runtime\pkg\katautils\create.go vci.CreateSandbox

file:src\runtime\virtcontainers\implementation.go CreateSandbox

file:src\runtime\virtcontainers\api.go CreateSandbox createSandboxFromConfig

file:src\runtime\virtcontainers\sandbox.go createSandbox

sandbox.go
api.go
implementation.go
create.go
createSandbox
CreateSandbox
createSandboxFromConfig
CreateSandbox
vci.CreateSandbox

createSandbox回到sandbox包:src\runtime\virtcontainers\sandbox.go

getNewAgentFunc:创建运行在sandbox中agent的实例,kataAgent对象

newHypervisor:创建hypervisor实例,sandbox的虚拟化平台,根据配置返回不同的虚拟化平台实例,默认是qemu:src\runtime\virtcontainers\qemu.go

s.hypervisor.createSandbox:根据sandbox配置如处理器、内存、虚拟设备等初始化虚拟机配置(qemu:src\runtime\virtcontainers\qemu.go:createSandbox)

s.agent.init:初始化kata-agent,初始化trace功能和基础配置(src\runtime\virtcontainers\kata_agent.go)

file:src\runtime\virtcontainers\sandbox.go


func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (sb *Sandbox, retErr error) {
	span, ctx := trace(ctx, "newSandbox")
	defer span.End()

	if !sandboxConfig.valid() {
		return nil, fmt.Errorf("Invalid sandbox configuration")
	}

	// create agent instance
	agent := getNewAgentFunc(ctx)()

	hypervisor, err := newHypervisor(sandboxConfig.HypervisorType)
	if err != nil {
		return nil, err
	}

	s := &Sandbox{
		id:              sandboxConfig.ID,
		factory:         factory,
		hypervisor:      hypervisor,
		agent:           agent,
		config:          &sandboxConfig,
		volumes:         sandboxConfig.Volumes,
		containers:      map[string]*Container{},
		state:           types.SandboxState{BlockIndexMap: make(map[int]struct{})},
		annotationsLock: &sync.RWMutex{},
		wg:              &sync.WaitGroup{},
		shmSize:         sandboxConfig.ShmSize,
		sharePidNs:      sandboxConfig.SharePidNs,
		networkNS:       NetworkNamespace{NetNsPath: sandboxConfig.NetworkConfig.NetNSPath},
		ctx:             ctx,
	}

	hypervisor.setSandbox(s)

	if s.store, err = persist.GetDriver(); err != nil || s.store == nil {
		return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
	}

	defer func() {
		if retErr != nil {
			s.Logger().WithError(retErr).Error("Create new sandbox failed")
			s.store.Destroy(s.id)
		}
	}()

	spec := s.GetPatchedOCISpec()
	if spec != nil && spec.Process.SelinuxLabel != "" {
		sandboxConfig.HypervisorConfig.SELinuxProcessLabel = spec.Process.SelinuxLabel
	}

	s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver,
		sandboxConfig.HypervisorConfig.EnableVhostUserStore,
		sandboxConfig.HypervisorConfig.VhostUserStorePath, nil)

	// Ignore the error. Restore can fail for a new sandbox
	if err := s.Restore(); err != nil {
		s.Logger().WithError(err).Debug("restore sandbox failed")
	}

	// store doesn't require hypervisor to be stored immediately
	if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig); err != nil {
		return nil, err
	}

	if s.disableVMShutdown, err = s.agent.init(ctx, s, sandboxConfig.AgentConfig); err != nil {
		return nil, err
	}

	return s, nil
}

启动sandbox

file:src\runtime\virtcontainers\api.go

func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (_ *Sandbox, err error) {
	span, ctx := trace(ctx, "createSandboxFromConfig")
	defer span.End()

	// Create the sandbox.
	s, err := createSandbox(ctx, sandboxConfig, factory)
	if err != nil {
		return nil, err
	}

	// cleanup sandbox resources in case of any failure
	defer func() {
		if err != nil {
			s.Delete(ctx)
		}
	}()

	// Create the sandbox network
	if err = s.createNetwork(ctx); err != nil {
		return nil, err
	}

	// network rollback
	defer func() {
		if err != nil {
			s.removeNetwork(ctx)
		}
	}()

	// Move runtime to sandbox cgroup so all process are created there.
	if s.config.SandboxCgroupOnly {
		if err := s.createCgroupManager(); err != nil {
			return nil, err
		}

		if err := s.setupSandboxCgroup(); err != nil {
			return nil, err
		}
	}

	// Start the VM
	if err = s.startVM(ctx); err != nil {
		return nil, err
	}

	// rollback to stop VM if error occurs
	defer func() {
		if err != nil {
			s.stopVM(ctx)
		}
	}()

	s.postCreatedNetwork(ctx)

	if err = s.getAndStoreGuestDetails(ctx); err != nil {
		return nil, err
	}

	// Create Containers
	if err = s.createContainers(ctx); err != nil {
		return nil, err
	}

	// The sandbox is completely created now, we can store it.
	if err = s.storeSandbox(ctx); err != nil {
		return nil, err
	}

	return s, nil
}

s.createNetwork:创建网络

s.startVM:启动虚拟机,最后会根据hypervisor配置拉起虚拟机,还有很多流程。。。

s.postCreatedNetwork(ctx):创建网络命名空间

s.getAndStoreGuestDetails(ctx):获取guest资源信息

s.createContainers(ctx):在guest环境中创建container,container配置在SandboxConfig结构体

s.storeSandbox(ctx):写入kata环境在配置文件,包括版本、状态、虚拟化平台、kata-agent等信息

启动service等待kata-agent连接

todo:service分析,还有很多流程。。。

		s.sandbox = sandbox
		pid, err := s.sandbox.GetHypervisorPid()
		if err != nil {
			return nil, err
		}
		s.hpid = uint32(pid)

		go s.startManagementServer(ctx, ociSpec)

start

start分支主要用于拉起containerd-shim-kata-v2命令

可能containerd-shim-kata-v2由start分支发起,start分支再退出,具体实现需要分析containerd源码

func (s *service) StartShim(ctx context.Context, id, containerdBinary, containerdAddress string) (string, error) {
	bundlePath, err := os.Getwd()
	if err != nil {
		return "", err
	}

	address, err := getAddress(ctx, bundlePath, id)
	if err != nil {
		return "", err
	}
	if address != "" {
		if err := cdshim.WriteAddress("address", address); err != nil {
			return "", err
		}
		return address, nil
	}

	cmd, err := newCommand(ctx, containerdBinary, id, containerdAddress)
	if err != nil {
		return "", err
	}

	address, err = cdshim.SocketAddress(ctx, id)
	if err != nil {
		return "", err
	}

	socket, err := cdshim.NewSocket(address)
	if err != nil {
		return "", err
	}
	defer socket.Close()
	f, err := socket.File()
	if err != nil {
		return "", err
	}
	defer f.Close()

	cmd.ExtraFiles = append(cmd.ExtraFiles, f)

	if err := cmd.Start(); err != nil {
		return "", err
	}
	defer func() {
		if err != nil {
			cmd.Process.Kill()
		}
	}()

	// make sure to wait after start
	go cmd.Wait()
	if err = cdshim.WritePidFile("shim.pid", cmd.Process.Pid); err != nil {
		return "", err
	}
	if err = cdshim.WriteAddress("address", address); err != nil {
		return "", err
	}
	return address, nil
}
  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值