mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Added test for scheduler informers
This commit is contained in:
parent
9b7439d77d
commit
719a56fa67
@ -195,7 +195,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
Client: clientSet,
|
Client: clientSet,
|
||||||
InformerFactory: informerFactory,
|
InformerFactory: informerFactory,
|
||||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
PodInformer: factory.NewPodInformer(clientSet, 0),
|
||||||
EventClient: clientSet.CoreV1(),
|
EventClient: clientSet.CoreV1(),
|
||||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||||
Broadcaster: eventBroadcaster,
|
Broadcaster: eventBroadcaster,
|
||||||
@ -254,7 +254,7 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
|||||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
Client: clientSet,
|
Client: clientSet,
|
||||||
InformerFactory: informerFactory,
|
InformerFactory: informerFactory,
|
||||||
PodInformer: factory.NewPodInformer(clientSet, 0, v1.DefaultSchedulerName),
|
PodInformer: factory.NewPodInformer(clientSet, 0),
|
||||||
EventClient: clientSet.CoreV1(),
|
EventClient: clientSet.CoreV1(),
|
||||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||||
Broadcaster: eventBroadcaster,
|
Broadcaster: eventBroadcaster,
|
||||||
@ -516,7 +516,7 @@ func TestMultiScheduler(t *testing.T) {
|
|||||||
// 5. create and start a scheduler with name "foo-scheduler"
|
// 5. create and start a scheduler with name "foo-scheduler"
|
||||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
|
||||||
informerFactory2 := informers.NewSharedInformerFactory(context.clientSet, 0)
|
informerFactory2 := informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||||
podInformer2 := factory.NewPodInformer(context.clientSet, 0, fooScheduler)
|
podInformer2 := factory.NewPodInformer(context.clientSet, 0)
|
||||||
|
|
||||||
schedulerConfigFactory2 := createConfiguratorWithPodInformer(fooScheduler, clientSet2, podInformer2, informerFactory2)
|
schedulerConfigFactory2 := createConfiguratorWithPodInformer(fooScheduler, clientSet2, podInformer2, informerFactory2)
|
||||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||||
@ -749,3 +749,96 @@ func TestPDBCache(t *testing.T) {
|
|||||||
t.Errorf("No PDB was deleted from the cache: %v", err)
|
t.Errorf("No PDB was deleted from the cache: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestSchedulerInformers tests that scheduler receives informer events and updates its cache when
|
||||||
|
// pods are scheduled by other schedulers.
|
||||||
|
func TestSchedulerInformers(t *testing.T) {
|
||||||
|
// Initialize scheduler.
|
||||||
|
context := initTest(t, "scheduler-informer")
|
||||||
|
defer cleanupTest(t, context)
|
||||||
|
cs := context.clientSet
|
||||||
|
|
||||||
|
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||||
|
}
|
||||||
|
defaultNodeRes := &v1.ResourceList{
|
||||||
|
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodeConfig struct {
|
||||||
|
name string
|
||||||
|
res *v1.ResourceList
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
description string
|
||||||
|
nodes []*nodeConfig
|
||||||
|
existingPods []*v1.Pod
|
||||||
|
pod *v1.Pod
|
||||||
|
preemptedPodIndexes map[int]struct{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
|
||||||
|
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||||
|
existingPods: []*v1.Pod{
|
||||||
|
initPausePod(context.clientSet, &pausePodConfig{
|
||||||
|
Name: "pod1",
|
||||||
|
Namespace: context.ns.Name,
|
||||||
|
Resources: defaultPodRes,
|
||||||
|
Labels: map[string]string{"foo": "bar"},
|
||||||
|
NodeName: "node-1",
|
||||||
|
SchedulerName: "foo-scheduler",
|
||||||
|
}),
|
||||||
|
initPausePod(context.clientSet, &pausePodConfig{
|
||||||
|
Name: "pod2",
|
||||||
|
Namespace: context.ns.Name,
|
||||||
|
Resources: defaultPodRes,
|
||||||
|
Labels: map[string]string{"foo": "bar"},
|
||||||
|
NodeName: "node-1",
|
||||||
|
SchedulerName: "bar-scheduler",
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
pod: initPausePod(cs, &pausePodConfig{
|
||||||
|
Name: "unschedulable-pod",
|
||||||
|
Namespace: context.ns.Name,
|
||||||
|
Resources: defaultPodRes,
|
||||||
|
}),
|
||||||
|
preemptedPodIndexes: map[int]struct{}{2: {}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
for _, nodeConf := range test.nodes {
|
||||||
|
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pods := make([]*v1.Pod, len(test.existingPods))
|
||||||
|
var err error
|
||||||
|
// Create and run existingPods.
|
||||||
|
for i, p := range test.existingPods {
|
||||||
|
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||||
|
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create the new "pod".
|
||||||
|
unschedulable, err := createPausePod(cs, test.pod)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while creating new pod: %v", err)
|
||||||
|
}
|
||||||
|
if err := waitForPodUnschedulable(cs, unschedulable); err != nil {
|
||||||
|
t.Errorf("Pod %v got scheduled: %v", unschedulable.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
pods = append(pods, unschedulable)
|
||||||
|
cleanupPods(cs, t, pods)
|
||||||
|
cs.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
|
||||||
|
cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user