基于web的K8S仿真调度平台设计5-POD部署失败,重新整理思路

给node打标签,方便后面部署pod

package main

import (
	"context"
	"time"

	//"fmt"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	//"fmt"
	//corev1 "k8s.io/api/core/v1"
	//"k8s.io/apimachinery/pkg/api/resource"
	//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	//v1 "k8s.io/client-go/applyconfigurations/core/v1"
	"k8s.io/client-go/kubernetes"
	//"k8s.io/client-go/tools/cache"
	"k8s.io/client-go/tools/clientcmd"
	//"strings"

	"k8s.io/api/core/v1"
	"k8s.io/apimachinery/pkg/api/resource"
)

func getResourceList(cpu, memory string) v1.ResourceList {
	res := v1.ResourceList{}
	if cpu != "" {
		res[v1.ResourceCPU] = resource.MustParse(cpu)
	}
	if memory != "" {
		res[v1.ResourceMemory] = resource.MustParse(memory)
	}
	return res
}

func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
	res := v1.ResourceRequirements{}
	res.Requests = requests
	res.Limits = limits
	return res
}


func main() {
	// uses the current context in kubeconfig
	// path-to-kubeconfig -- for example, /root/.kube/config
	config, err := clientcmd.BuildConfigFromFlags("", "C:\\Users\\HJW\\.kube\\config")
	if err!= nil{
		panic(err)
	}

	// creates the clientset
	clientset, err := kubernetes.NewForConfig(config)
	if err!= nil{
		panic(err)
	}

	var labels=map[string]string{"node1":"node1"}
	
	//pod模版
	//newPod := &v1.Pod{
	//	TypeMeta: metav1.TypeMeta{
	//		Kind: "Pod",
	//		APIVersion: "v1",
	//	},
	//	ObjectMeta: metav1.ObjectMeta{
	//		Name: "pod2",
	//	},
	//	Spec: v1.PodSpec{
	//		Containers: []v1.Container{
	//			{Name: "pod2", Image: "busybox:latest", Command: []string{"sleep", "1000"},
	//				Resources: getResourceRequirements(getResourceList("10m", "1Gi"), getResourceList("50m", "1Gi"))},
	//		},
	//		//NodeSelector:"node1",
	//	},
	//}

	//node模板
	lastheartbeattime,_ :=time.Parse("2006-01-02 15:04:05", "2019-06-05 18:38:35")
	lasttransitiontime,_ :=time.Parse("2006-01-02 15:04:05", "2019-06-05 11:41:27")
	newNode := &v1.Node{
		ObjectMeta: metav1.ObjectMeta{
					Name: "node1",
					Labels: labels,
		},
		Spec: v1.NodeSpec{},
		Status: v1.NodeStatus{
			Capacity: getResourceList("50m", "1Gi"),
			Allocatable:getResourceList("50m", "1Gi"),
			Phase:"running",
			//Conditions:Conditions.Type="Ready",Conditions.Status="True"
			Conditions: []v1.NodeCondition{
				{"Ready","True",metav1.Time{lastheartbeattime},metav1.Time{lasttransitiontime},"KubeletReady","kubelet is posting ready status"},
			},
		},
	}
	//创建pod
	//pod, err := clientset.CoreV1().Pods("kube-system").Create(context.Background(), newPod, metav1.CreateOptions{})
	//if err != nil {
	//	panic(err)
	//}
	//fmt.Printf("Created pod %q.\n", pod.GetObjectMeta().GetName())
	//clientset.CoreV1().Pods("kube-system").Delete(context.Background(),"pod2",metav1.DeleteOptions{})
	clientset.CoreV1().Nodes().Create(context.Background(), newNode, metav1.CreateOptions{})
	//clientset.CoreV1().Nodes().Delete(context.Background(),"node1",metav1.DeleteOptions{})
}

标签的类型是map[string]string,是go里面的键值对,要事先定义应用了之后可以查看到node已经被打上标签
在这里插入图片描述
然后创建同样标签的pod

package main

import (
	"context"
	"fmt"
	//"fmt"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	//"fmt"
	//corev1 "k8s.io/api/core/v1"
	//"k8s.io/apimachinery/pkg/api/resource"
	//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	//v1 "k8s.io/client-go/applyconfigurations/core/v1"
	"k8s.io/client-go/kubernetes"
	//"k8s.io/client-go/tools/cache"
	"k8s.io/client-go/tools/clientcmd"
	//"strings"

	"k8s.io/api/core/v1"
	"k8s.io/apimachinery/pkg/api/resource"
)

func getResourceList(cpu, memory string) v1.ResourceList {
	res := v1.ResourceList{}
	if cpu != "" {
		res[v1.ResourceCPU] = resource.MustParse(cpu)
	}
	if memory != "" {
		res[v1.ResourceMemory] = resource.MustParse(memory)
	}
	return res
}

func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
	res := v1.ResourceRequirements{}
	res.Requests = requests
	res.Limits = limits
	return res
}


func main() {
	// uses the current context in kubeconfig
	// path-to-kubeconfig -- for example, /root/.kube/config
	config, err := clientcmd.BuildConfigFromFlags("", "C:\\Users\\HJW\\.kube\\config")
	if err!= nil{
		panic(err)
	}

	// creates the clientset
	clientset, err := kubernetes.NewForConfig(config)
	if err!= nil{
		panic(err)
	}

	var labels=map[string]string{"node1":"node1"}

	//pod模版
	newPod := &v1.Pod{
		TypeMeta: metav1.TypeMeta{
			Kind: "Pod",
			APIVersion: "v1",
		},
		ObjectMeta: metav1.ObjectMeta{
			Name: "pod2",
		},
		Spec: v1.PodSpec{
			Containers: []v1.Container{
				{Name: "pod2", Image: "busybox:latest", Command: []string{"sleep", "1000"},
					Resources: getResourceRequirements(getResourceList("10m", "1Gi"), getResourceList("50m", "1Gi"))},
			},
			NodeSelector:labels,
		},
	}

	//node模板
	//lastheartbeattime,_ :=time.Parse("2006-01-02 15:04:05", "2019-06-05 18:38:35")
	//lasttransitiontime,_ :=time.Parse("2006-01-02 15:04:05", "2019-06-05 11:41:27")
	//newNode := &v1.Node{
	//	ObjectMeta: metav1.ObjectMeta{
	//				Name: "node1",
	//				Labels: labels,
	//	},
	//	Spec: v1.NodeSpec{},
	//	Status: v1.NodeStatus{
	//		Capacity: getResourceList("50m", "1Gi"),
	//		Allocatable:getResourceList("50m", "1Gi"),
	//		Phase:"running",
	//		//Conditions:Conditions.Type="Ready",Conditions.Status="True"
	//		Conditions: []v1.NodeCondition{
	//			{"Ready","True",metav1.Time{lastheartbeattime},metav1.Time{lasttransitiontime},"KubeletReady","kubelet is posting ready status"},
	//		},
	//	},
	//}
	//创建pod
	pod, err := clientset.CoreV1().Pods("kube-system").Create(context.Background(), newPod, metav1.CreateOptions{})
	if err != nil {
		panic(err)
	}
	fmt.Printf("Created pod %q.\n", pod.GetObjectMeta().GetName())
	//clientset.CoreV1().Pods("kube-system").Delete(context.Background(),"pod2",metav1.DeleteOptions{})
	//clientset.CoreV1().Nodes().Create(context.Background(), newNode, metav1.CreateOptions{})
	//clientset.CoreV1().Nodes().Delete(context.Background(),"node1",metav1.DeleteOptions{})
}

在这里插入图片描述
一开始确实创建了
在这里插入图片描述
但是过了一分钟就出了问题
0/2 nodes are available: 1 Too many pods, 1 node(s) didn’t match Pod’s node affinity/selector, 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }. preemption: 0/2 nodes are available: 2 Preemption is not helpful for scheduling.
有0/2个节点可用:1个Pod太多,1个节点与Pod的节点关联/选择器不匹配,1个节点具有无法容忍的污染{node.kubernetes.io/unreachable:}。抢占:0/2个节点可用:2抢占对调度没有帮助。
在这里插入图片描述
他说node1pod太多,还有无法忍受的污染,查了挺长时间,client-go里面的node.creat只是创建一个结构体数据而已,欺骗scheduler的,这里要转变一下思路。调度仿真平台不能真的部署,创建node和pod之后在数据库里面匹配模拟调度。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值