mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 05:57:25 +00:00
Integration test to verify alloc works
Add integration test to verify allocatable can works
This commit is contained in:
parent
e64fe82245
commit
0202a206b8
@ -305,7 +305,6 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
|
|||||||
}
|
}
|
||||||
|
|
||||||
allocatable := info.Status.Allocatable
|
allocatable := info.Status.Allocatable
|
||||||
|
|
||||||
if int64(len(existingPods))+1 > allocatable.Pods().Value() {
|
if int64(len(existingPods))+1 > allocatable.Pods().Value() {
|
||||||
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), allocatable.Pods().Value())
|
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), allocatable.Pods().Value())
|
||||||
return false, ErrExceededMaxPodNumber
|
return false, ErrExceededMaxPodNumber
|
||||||
|
@ -449,3 +449,119 @@ func createPod(name string, annotation map[string]string) *api.Pod {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
|
||||||
|
func TestAllocatable(t *testing.T) {
|
||||||
|
framework.DeleteAllEtcdKeys()
|
||||||
|
|
||||||
|
var m *master.Master
|
||||||
|
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
m.Handler.ServeHTTP(w, req)
|
||||||
|
}))
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||||
|
m = master.New(masterConfig)
|
||||||
|
|
||||||
|
// 1. create and start default-scheduler
|
||||||
|
restClient := client.NewOrDie(&client.Config{Host: s.URL, GroupVersion: testapi.Default.GroupVersion()})
|
||||||
|
|
||||||
|
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName)
|
||||||
|
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||||
|
}
|
||||||
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||||
|
eventBroadcaster.StartRecordingToSink(restClient.Events(""))
|
||||||
|
scheduler.New(schedulerConfig).Run()
|
||||||
|
// default-scheduler will be stopped later
|
||||||
|
defer close(schedulerConfig.StopEverything)
|
||||||
|
|
||||||
|
// 2. create a node without allocatable awareness
|
||||||
|
node := &api.Node{
|
||||||
|
ObjectMeta: api.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
|
||||||
|
Spec: api.NodeSpec{Unschedulable: false},
|
||||||
|
Status: api.NodeStatus{
|
||||||
|
Capacity: api.ResourceList{
|
||||||
|
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
|
api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||||
|
api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
allocNode, err := restClient.Nodes().Create(node)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create node: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. create resource pod which requires less than Capacity
|
||||||
|
podResource := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{Name: "pod-test-allocatable"},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "container",
|
||||||
|
Image: "kubernetes/pause:go",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
|
||||||
|
api.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testAllocPod, err := restClient.Pods(api.NamespaceDefault).Create(podResource)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
|
||||||
|
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod.Namespace, testAllocPod.Name))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
|
||||||
|
} else {
|
||||||
|
t.Logf("Test allocatable unawareness: %s Pod scheduled", testAllocPod.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Change the node status to allocatable aware, note that Allocatable is less than Pod's requirement
|
||||||
|
allocNode.Status = api.NodeStatus{
|
||||||
|
Capacity: api.ResourceList{
|
||||||
|
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
|
api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||||
|
api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||||
|
},
|
||||||
|
Allocatable: api.ResourceList{
|
||||||
|
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||||
|
api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||||
|
api.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := restClient.Nodes().UpdateStatus(allocNode); err != nil {
|
||||||
|
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := restClient.Pods(api.NamespaceDefault).Delete(podResource.Name, &api.DeleteOptions{}); err != nil {
|
||||||
|
t.Fatalf("Failed to remove first resource pod: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 6. Make another pod with different name, same resource request
|
||||||
|
podResource.ObjectMeta.Name = "pod-test-allocatable2"
|
||||||
|
testAllocPod2, err := restClient.Pods(api.NamespaceDefault).Create(podResource)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 7. Test: this test pod should not be scheduled since it request more than Allocatable
|
||||||
|
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod2.Namespace, testAllocPod2.Name))
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectly, %v", testAllocPod2.Name, err)
|
||||||
|
} else {
|
||||||
|
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user