From 47b012ec8b12c4e5d29c3afce91d9289cd2818b0 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Thu, 22 Feb 2018 15:13:56 -0800 Subject: [PATCH 1/2] Refactor tests --- .../scheduler/local-pv-neg-affinity_test.go | 306 ------------------ .../scheduler/volume_binding_test.go | 269 +++++++++++++-- 2 files changed, 249 insertions(+), 326 deletions(-) delete mode 100644 test/integration/scheduler/local-pv-neg-affinity_test.go diff --git a/test/integration/scheduler/local-pv-neg-affinity_test.go b/test/integration/scheduler/local-pv-neg-affinity_test.go deleted file mode 100644 index bfbe2797882..00000000000 --- a/test/integration/scheduler/local-pv-neg-affinity_test.go +++ /dev/null @@ -1,306 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduler - -// This file tests the VolumeScheduling feature. - -import ( - "fmt" - "net/http" - "net/http/httptest" - "strconv" - "strings" - "testing" - "time" - - "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" - "k8s.io/kubernetes/pkg/scheduler" - "k8s.io/kubernetes/pkg/scheduler/factory" - "k8s.io/kubernetes/test/integration/framework" -) - -const ( - affinityLabelKey = "kubernetes.io/hostname" -) - -func TestLocalPVNegativeAffinity(t *testing.T) { - config := setupNodes(t, "volume-scheduling", 3) - defer config.teardown() - - pv := makeHostBoundPV(t, "local-pv", classImmediate, "", "", "node-1") - pvc := makePVC("local-pvc", config.ns, &classImmediate, "") - - // Create PV - if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { - t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) - } - - // Create PVC - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { - t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) - } - - nodeMarkers := []interface{}{ - markNodeAffinity, - markNodeSelector, - } - for i := 0; i < len(nodeMarkers); i++ { - podName := "local-pod-" + strconv.Itoa(i+1) - pod := makePod(podName, config.ns, []string{"local-pvc"}) - nodeMarkers[i].(func(*v1.Pod, string))(pod, "node-2") - // Create Pod - if _, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil { - t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) - } - // Give time to shceduler to attempt to schedule pod - if err := waitForPodToSchedule(config.client, pod); err == nil { - t.Errorf("Failed as Pod %s was scheduled successfully but expected to fail", pod.Name) - } - // Deleting test pod - p, err := config.client.CoreV1().Pods(config.ns).Get(podName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to access Pod %s status: %v", podName, err) - } - if strings.Compare(string(p.Status.Phase), "Pending") != 0 { - t.Fatalf("Failed as Pod %s was in: %s state and not in expected: Pending state", podName, p.Status.Phase) - } - if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 { - t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason) - } - if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match node selector") || !strings.Contains(p.Status.Conditions[0].Message, "node(s) had volume node affinity conflict") { - t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict", podName) - } - if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete Pod %s: %v", podName, err) - } - } -} - -func setupNodes(t *testing.T, nsName string, numberOfNodes int) *testConfig { - h := &framework.MasterHolder{Initialized: make(chan struct{})} - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - <-h.Initialized - h.M.GenericAPIServer.Handler.ServeHTTP(w, req) - })) - - // Enable feature gates - utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,PersistentLocalVolumes=true") - - // Build clientset and informers for controllers. - clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}}) - informers := informers.NewSharedInformerFactory(clientset, time.Second) - - // Start master - masterConfig := framework.NewIntegrationTestMasterConfig() - - _, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h) - ns := framework.CreateTestingNamespace(nsName, s, t).Name - - controllerCh := make(chan struct{}) - - // Start PV controller for volume binding. - params := persistentvolume.ControllerParameters{ - KubeClient: clientset, - SyncPeriod: time.Hour, // test shouldn't need to resync - VolumePlugins: nil, // TODO; need later for dynamic provisioning - Cloud: nil, - ClusterName: "volume-test-cluster", - VolumeInformer: informers.Core().V1().PersistentVolumes(), - ClaimInformer: informers.Core().V1().PersistentVolumeClaims(), - ClassInformer: informers.Storage().V1().StorageClasses(), - PodInformer: informers.Core().V1().Pods(), - EventRecorder: nil, // TODO: add one so we can test PV events - EnableDynamicProvisioning: true, - } - ctrl, err := persistentvolume.NewController(params) - if err != nil { - t.Fatalf("Failed to create PV controller: %v", err) - } - go ctrl.Run(controllerCh) - - // Start scheduler - configurator := factory.NewConfigFactory( - v1.DefaultSchedulerName, - clientset, - informers.Core().V1().Nodes(), - informers.Core().V1().Pods(), - informers.Core().V1().PersistentVolumes(), - informers.Core().V1().PersistentVolumeClaims(), - informers.Core().V1().ReplicationControllers(), - informers.Extensions().V1beta1().ReplicaSets(), - informers.Apps().V1beta1().StatefulSets(), - informers.Core().V1().Services(), - informers.Policy().V1beta1().PodDisruptionBudgets(), - informers.Storage().V1().StorageClasses(), - v1.DefaultHardPodAffinitySymmetricWeight, - true, // Enable EqualCache by default. - ) - - eventBroadcaster := record.NewBroadcaster() - sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) { - cfg.StopEverything = controllerCh - cfg.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientset.CoreV1().RESTClient()).Events("")}) - }) - if err != nil { - t.Fatalf("Failed to create scheduler: %v.", err) - } - - go sched.Run() - - // Waiting for all controller sync. - informers.Start(controllerCh) - informers.WaitForCacheSync(controllerCh) - - // Create shared objects - // Create nodes - for i := 0; i < numberOfNodes; i++ { - testNode := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("node-%d", i+1), - Labels: map[string]string{affinityLabelKey: fmt.Sprintf("node-%d", i+1)}, - }, - Spec: v1.NodeSpec{Unschedulable: false}, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(podLimit, resource.DecimalSI), - }, - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeReady, - Status: v1.ConditionTrue, - Reason: fmt.Sprintf("schedulable condition"), - LastHeartbeatTime: metav1.Time{Time: time.Now()}, - }, - }, - }, - } - if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil { - t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) - } - } - - // Create SCs - scs := []*storagev1.StorageClass{ - makeStorageClass(classImmediate, &modeImmediate), - } - for _, sc := range scs { - if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil { - t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) - } - } - - return &testConfig{ - client: clientset, - ns: ns, - stop: controllerCh, - teardown: func() { - clientset.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{}) - clientset.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(nil, metav1.ListOptions{}) - clientset.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) - clientset.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{}) - clientset.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) - close(controllerCh) - closeFn() - utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,LocalPersistentVolumes=false") - }, - } -} - -func makeHostBoundPV(t *testing.T, name, scName, pvcName, ns string, node string) *v1.PersistentVolume { - pv := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Annotations: map[string]string{}, - }, - Spec: v1.PersistentVolumeSpec{ - Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse("5Gi"), - }, - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadWriteOnce, - }, - StorageClassName: scName, - PersistentVolumeSource: v1.PersistentVolumeSource{ - Local: &v1.LocalVolumeSource{ - Path: "/tmp/" + node + "/test-path", - }, - }, - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: affinityLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{node}, - }, - }, - }, - }, - }, - }, - }, - } - - if pvcName != "" { - pv.Spec.ClaimRef = &v1.ObjectReference{Name: pvcName, Namespace: ns} - } - - return pv -} - -func markNodeAffinity(pod *v1.Pod, node string) { - affinity := &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "kubernetes.io/hostname", - Operator: v1.NodeSelectorOpIn, - Values: []string{node}, - }, - }, - }, - }, - }, - }, - } - pod.Spec.Affinity = affinity -} - -func markNodeSelector(pod *v1.Pod, node string) { - ns := map[string]string{ - "kubernetes.io/hostname": node, - } - pod.Spec.NodeSelector = ns -} diff --git a/test/integration/scheduler/volume_binding_test.go b/test/integration/scheduler/volume_binding_test.go index ce76645a32a..0cebad462e2 100644 --- a/test/integration/scheduler/volume_binding_test.go +++ b/test/integration/scheduler/volume_binding_test.go @@ -20,7 +20,12 @@ package scheduler import ( "fmt" + "net/http" + "net/http/httptest" + "strconv" + "strings" "testing" + "time" "github.com/golang/glog" @@ -28,7 +33,18 @@ import ( storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" + "k8s.io/kubernetes/pkg/scheduler" + "k8s.io/kubernetes/pkg/scheduler/factory" + "k8s.io/kubernetes/test/integration/framework" ) type testConfig struct { @@ -51,15 +67,14 @@ var ( ) const ( - labelKey = "kubernetes.io/hostname" - labelValue = "node-1" - nodeName = "node1" - podLimit = 100 - volsPerPod = 5 + node1 = "node-1" + podLimit = 100 + volsPerPod = 5 + affinityLabelKey = "kubernetes.io/hostname" ) func TestVolumeBinding(t *testing.T) { - config := setupNodes(t, "volume-scheduling", 1) + config := setupNodes(t, "volume-scheduling", 2) defer config.teardown() cases := map[string]struct { @@ -69,39 +84,39 @@ func TestVolumeBinding(t *testing.T) { }{ "immediate can bind": { pod: makePod("pod-i-canbind", config.ns, []string{"pvc-i-canbind"}), - pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "")}, + pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "", node1)}, pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-canbind", config.ns, &classImmediate, "")}, }, "immediate pvc prebound": { pod: makePod("pod-i-pvc-prebound", config.ns, []string{"pvc-i-prebound"}), - pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "")}, + pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "", node1)}, pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-prebound", config.ns, &classImmediate, "pv-i-pvc-prebound")}, }, "immediate pv prebound": { pod: makePod("pod-i-pv-prebound", config.ns, []string{"pvc-i-pv-prebound"}), - pvs: []*v1.PersistentVolume{makePV(t, "pv-i-prebound", classImmediate, "pvc-i-pv-prebound", config.ns)}, + pvs: []*v1.PersistentVolume{makePV(t, "pv-i-prebound", classImmediate, "pvc-i-pv-prebound", config.ns, node1)}, pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-pv-prebound", config.ns, &classImmediate, "")}, }, "wait can bind": { pod: makePod("pod-w-canbind", config.ns, []string{"pvc-w-canbind"}), - pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "")}, + pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "", node1)}, pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-canbind", config.ns, &classWait, "")}, }, "wait pvc prebound": { pod: makePod("pod-w-pvc-prebound", config.ns, []string{"pvc-w-prebound"}), - pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "")}, + pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "", node1)}, pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-prebound", config.ns, &classWait, "pv-w-pvc-prebound")}, }, "wait pv prebound": { pod: makePod("pod-w-pv-prebound", config.ns, []string{"pvc-w-pv-prebound"}), - pvs: []*v1.PersistentVolume{makePV(t, "pv-w-prebound", classWait, "pvc-w-pv-prebound", config.ns)}, + pvs: []*v1.PersistentVolume{makePV(t, "pv-w-prebound", classWait, "pvc-w-pv-prebound", config.ns, node1)}, pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-pv-prebound", config.ns, &classWait, "")}, }, "wait can bind two": { pod: makePod("pod-w-canbind-2", config.ns, []string{"pvc-w-canbind-2", "pvc-w-canbind-3"}), pvs: []*v1.PersistentVolume{ - makePV(t, "pv-w-canbind-2", classWait, "", ""), - makePV(t, "pv-w-canbind-3", classWait, "", ""), + makePV(t, "pv-w-canbind-2", classWait, "", "", node1), + makePV(t, "pv-w-canbind-3", classWait, "", "", node1), }, pvcs: []*v1.PersistentVolumeClaim{ makePVC("pvc-w-canbind-2", config.ns, &classWait, ""), @@ -111,8 +126,8 @@ func TestVolumeBinding(t *testing.T) { "mix immediate and wait": { pod: makePod("pod-mix-bound", config.ns, []string{"pvc-w-canbind-4", "pvc-i-canbind-2"}), pvs: []*v1.PersistentVolume{ - makePV(t, "pv-w-canbind-4", classWait, "", ""), - makePV(t, "pv-i-canbind-2", classImmediate, "", ""), + makePV(t, "pv-w-canbind-4", classWait, "", "", node1), + makePV(t, "pv-i-canbind-2", classImmediate, "", "", node1), }, pvcs: []*v1.PersistentVolumeClaim{ makePVC("pvc-w-canbind-4", config.ns, &classWait, ""), @@ -123,6 +138,7 @@ func TestVolumeBinding(t *testing.T) { // immediate mode - PVC cannot bound // wait mode - PVC cannot bind // wait mode - 2 PVCs, 1 cannot bind + // wait mode - 2 pvcs, multiple nodes } for name, test := range cases { @@ -175,7 +191,7 @@ func TestVolumeBindingStress(t *testing.T) { pvs := []*v1.PersistentVolume{} pvcs := []*v1.PersistentVolumeClaim{} for i := 0; i < podLimit*volsPerPod; i++ { - pv := makePV(t, fmt.Sprintf("pv-stress-%v", i), classWait, "", "") + pv := makePV(t, fmt.Sprintf("pv-stress-%v", i), classWait, "", "", node1) pvc := makePVC(fmt.Sprintf("pvc-stress-%v", i), config.ns, &classWait, "") if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { @@ -222,6 +238,191 @@ func TestVolumeBindingStress(t *testing.T) { // TODO: validate events on Pods and PVCs } +func TestPVAffinityConflict(t *testing.T) { + config := setupNodes(t, "volume-scheduling", 3) + defer config.teardown() + + pv := makePV(t, "local-pv", classImmediate, "", "", node1) + pvc := makePVC("local-pvc", config.ns, &classImmediate, "") + + // Create PV + if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) + } + + // Create PVC + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) + } + + nodeMarkers := []interface{}{ + markNodeAffinity, + markNodeSelector, + } + for i := 0; i < len(nodeMarkers); i++ { + podName := "local-pod-" + strconv.Itoa(i+1) + pod := makePod(podName, config.ns, []string{"local-pvc"}) + nodeMarkers[i].(func(*v1.Pod, string))(pod, "node-2") + // Create Pod + if _, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil { + t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) + } + // Give time to shceduler to attempt to schedule pod + if err := waitForPodToSchedule(config.client, pod); err == nil { + t.Errorf("Failed as Pod %s was scheduled successfully but expected to fail", pod.Name) + } + // Deleting test pod + p, err := config.client.CoreV1().Pods(config.ns).Get(podName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to access Pod %s status: %v", podName, err) + } + if strings.Compare(string(p.Status.Phase), "Pending") != 0 { + t.Fatalf("Failed as Pod %s was in: %s state and not in expected: Pending state", podName, p.Status.Phase) + } + if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 { + t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason) + } + if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match node selector") || !strings.Contains(p.Status.Conditions[0].Message, "node(s) had volume node affinity conflict") { + t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict", podName) + } + if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete Pod %s: %v", podName, err) + } + } +} + +func setupNodes(t *testing.T, nsName string, numberOfNodes int) *testConfig { + h := &framework.MasterHolder{Initialized: make(chan struct{})} + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + <-h.Initialized + h.M.GenericAPIServer.Handler.ServeHTTP(w, req) + })) + + // Enable feature gates + utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,PersistentLocalVolumes=true") + + // Build clientset and informers for controllers. + clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}}) + informers := informers.NewSharedInformerFactory(clientset, time.Second) + + // Start master + masterConfig := framework.NewIntegrationTestMasterConfig() + + _, _, closeFn := framework.RunAMasterUsingServer(masterConfig, s, h) + ns := framework.CreateTestingNamespace(nsName, s, t).Name + + controllerCh := make(chan struct{}) + + // Start PV controller for volume binding. + params := persistentvolume.ControllerParameters{ + KubeClient: clientset, + SyncPeriod: time.Hour, // test shouldn't need to resync + VolumePlugins: nil, // TODO; need later for dynamic provisioning + Cloud: nil, + ClusterName: "volume-test-cluster", + VolumeInformer: informers.Core().V1().PersistentVolumes(), + ClaimInformer: informers.Core().V1().PersistentVolumeClaims(), + ClassInformer: informers.Storage().V1().StorageClasses(), + PodInformer: informers.Core().V1().Pods(), + EventRecorder: nil, // TODO: add one so we can test PV events + EnableDynamicProvisioning: true, + } + ctrl, err := persistentvolume.NewController(params) + if err != nil { + t.Fatalf("Failed to create PV controller: %v", err) + } + go ctrl.Run(controllerCh) + + // Start scheduler + configurator := factory.NewConfigFactory( + v1.DefaultSchedulerName, + clientset, + informers.Core().V1().Nodes(), + informers.Core().V1().Pods(), + informers.Core().V1().PersistentVolumes(), + informers.Core().V1().PersistentVolumeClaims(), + informers.Core().V1().ReplicationControllers(), + informers.Extensions().V1beta1().ReplicaSets(), + informers.Apps().V1beta1().StatefulSets(), + informers.Core().V1().Services(), + informers.Policy().V1beta1().PodDisruptionBudgets(), + informers.Storage().V1().StorageClasses(), + v1.DefaultHardPodAffinitySymmetricWeight, + true, // Enable EqualCache by default. + ) + + eventBroadcaster := record.NewBroadcaster() + sched, err := scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) { + cfg.StopEverything = controllerCh + cfg.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(clientset.CoreV1().RESTClient()).Events("")}) + }) + if err != nil { + t.Fatalf("Failed to create scheduler: %v.", err) + } + + go sched.Run() + + // Waiting for all controller sync. + informers.Start(controllerCh) + informers.WaitForCacheSync(controllerCh) + + // Create shared objects + // Create nodes + for i := 0; i < numberOfNodes; i++ { + testNode := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%d", i+1), + Labels: map[string]string{affinityLabelKey: fmt.Sprintf("node-%d", i+1)}, + }, + Spec: v1.NodeSpec{Unschedulable: false}, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(podLimit, resource.DecimalSI), + }, + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + Reason: fmt.Sprintf("schedulable condition"), + LastHeartbeatTime: metav1.Time{Time: time.Now()}, + }, + }, + }, + } + if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil { + t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) + } + } + + // Create SCs + scs := []*storagev1.StorageClass{ + makeStorageClass(classImmediate, &modeImmediate), + makeStorageClass(classWait, &modeWait), + } + for _, sc := range scs { + if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil { + t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) + } + } + + return &testConfig{ + client: clientset, + ns: ns, + stop: controllerCh, + teardown: func() { + clientset.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{}) + clientset.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(nil, metav1.ListOptions{}) + clientset.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + clientset.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{}) + clientset.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + close(controllerCh) + closeFn() + utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,LocalPersistentVolumes=false") + }, + } +} + func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass { return &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ @@ -232,7 +433,7 @@ func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1 } } -func makePV(t *testing.T, name, scName, pvcName, ns string) *v1.PersistentVolume { +func makePV(t *testing.T, name, scName, pvcName, ns, node string) *v1.PersistentVolume { pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -257,9 +458,9 @@ func makePV(t *testing.T, name, scName, pvcName, ns string) *v1.PersistentVolume { MatchExpressions: []v1.NodeSelectorRequirement{ { - Key: labelKey, + Key: affinityLabelKey, Operator: v1.NodeSelectorOpIn, - Values: []string{labelValue}, + Values: []string{node}, }, }, }, @@ -350,3 +551,31 @@ func validatePVPhase(t *testing.T, client clientset.Interface, pv *v1.Persistent t.Errorf("PV %v phase not %v, got %v", pv.Name, phase, pv.Status.Phase) } } + +func markNodeAffinity(pod *v1.Pod, node string) { + affinity := &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: affinityLabelKey, + Operator: v1.NodeSelectorOpIn, + Values: []string{node}, + }, + }, + }, + }, + }, + }, + } + pod.Spec.Affinity = affinity +} + +func markNodeSelector(pod *v1.Pod, node string) { + ns := map[string]string{ + affinityLabelKey: node, + } + pod.Spec.NodeSelector = ns +} From 7167c4741112dc1cb0e9965a6314a2a77f4dfee8 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Thu, 22 Feb 2018 17:42:58 -0800 Subject: [PATCH 2/2] Add more test cases for volume binding in the scheduler --- test/integration/scheduler/BUILD | 1 - test/integration/scheduler/util.go | 12 ++ .../scheduler/volume_binding_test.go | 118 ++++++++++++++---- 3 files changed, 105 insertions(+), 26 deletions(-) diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index 3e6e766b987..1906eaebd58 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -11,7 +11,6 @@ go_test( size = "large", srcs = [ "extender_test.go", - "local-pv-neg-affinity_test.go", "main_test.go", "predicates_test.go", "preemption_test.go", diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index f3d63843086..68e40eaf814 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -362,6 +362,18 @@ func waitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error { return waitForPodToScheduleWithTimeout(cs, pod, 30*time.Second) } +// waitForPodUnscheduleWithTimeout waits for a pod to fail scheduling and returns +// an error if it does not become unschedulable within the given timeout. +func waitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error { + return wait.Poll(100*time.Millisecond, timeout, podUnschedulable(cs, pod.Namespace, pod.Name)) +} + +// waitForPodUnschedule waits for a pod to fail scheduling and returns +// an error if it does not become unschedulable within the timeout duration (30 seconds). +func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error { + return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second) +} + // deletePod deletes the given pod in the given namespace. func deletePod(cs clientset.Interface, podName string, nsName string) error { return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0)) diff --git a/test/integration/scheduler/volume_binding_test.go b/test/integration/scheduler/volume_binding_test.go index 0cebad462e2..0c5c919b333 100644 --- a/test/integration/scheduler/volume_binding_test.go +++ b/test/integration/scheduler/volume_binding_test.go @@ -33,6 +33,7 @@ import ( storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" @@ -67,26 +68,36 @@ var ( ) const ( - node1 = "node-1" - podLimit = 100 - volsPerPod = 5 - affinityLabelKey = "kubernetes.io/hostname" + node1 = "node-1" + node2 = "node-2" + podLimit = 100 + volsPerPod = 5 + nodeAffinityLabelKey = "kubernetes.io/hostname" ) func TestVolumeBinding(t *testing.T) { - config := setupNodes(t, "volume-scheduling", 2) + config := setupCluster(t, "volume-scheduling", 2) defer config.teardown() cases := map[string]struct { pod *v1.Pod pvs []*v1.PersistentVolume pvcs []*v1.PersistentVolumeClaim + // Create these, but they should not be bound in the end + unboundPvcs []*v1.PersistentVolumeClaim + unboundPvs []*v1.PersistentVolume + shouldFail bool }{ "immediate can bind": { pod: makePod("pod-i-canbind", config.ns, []string{"pvc-i-canbind"}), pvs: []*v1.PersistentVolume{makePV(t, "pv-i-canbind", classImmediate, "", "", node1)}, pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-canbind", config.ns, &classImmediate, "")}, }, + "immediate cannot bind": { + pod: makePod("pod-i-cannotbind", config.ns, []string{"pvc-i-cannotbind"}), + unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-i-cannotbind", config.ns, &classImmediate, "")}, + shouldFail: true, + }, "immediate pvc prebound": { pod: makePod("pod-i-pvc-prebound", config.ns, []string{"pvc-i-prebound"}), pvs: []*v1.PersistentVolume{makePV(t, "pv-i-pvc-prebound", classImmediate, "", "", node1)}, @@ -102,6 +113,11 @@ func TestVolumeBinding(t *testing.T) { pvs: []*v1.PersistentVolume{makePV(t, "pv-w-canbind", classWait, "", "", node1)}, pvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-canbind", config.ns, &classWait, "")}, }, + "wait cannot bind": { + pod: makePod("pod-w-cannotbind", config.ns, []string{"pvc-w-cannotbind"}), + unboundPvcs: []*v1.PersistentVolumeClaim{makePVC("pvc-w-cannotbind", config.ns, &classWait, "")}, + shouldFail: true, + }, "wait pvc prebound": { pod: makePod("pod-w-pvc-prebound", config.ns, []string{"pvc-w-prebound"}), pvs: []*v1.PersistentVolume{makePV(t, "pv-w-pvc-prebound", classWait, "", "", node1)}, @@ -115,13 +131,28 @@ func TestVolumeBinding(t *testing.T) { "wait can bind two": { pod: makePod("pod-w-canbind-2", config.ns, []string{"pvc-w-canbind-2", "pvc-w-canbind-3"}), pvs: []*v1.PersistentVolume{ - makePV(t, "pv-w-canbind-2", classWait, "", "", node1), - makePV(t, "pv-w-canbind-3", classWait, "", "", node1), + makePV(t, "pv-w-canbind-2", classWait, "", "", node2), + makePV(t, "pv-w-canbind-3", classWait, "", "", node2), }, pvcs: []*v1.PersistentVolumeClaim{ makePVC("pvc-w-canbind-2", config.ns, &classWait, ""), makePVC("pvc-w-canbind-3", config.ns, &classWait, ""), }, + unboundPvs: []*v1.PersistentVolume{ + makePV(t, "pv-w-canbind-5", classWait, "", "", node1), + }, + }, + "wait cannot bind two": { + pod: makePod("pod-w-cannotbind-2", config.ns, []string{"pvc-w-cannotbind-1", "pvc-w-cannotbind-2"}), + unboundPvcs: []*v1.PersistentVolumeClaim{ + makePVC("pvc-w-cannotbind-1", config.ns, &classWait, ""), + makePVC("pvc-w-cannotbind-2", config.ns, &classWait, ""), + }, + unboundPvs: []*v1.PersistentVolume{ + makePV(t, "pv-w-cannotbind-1", classWait, "", "", node2), + makePV(t, "pv-w-cannotbind-2", classWait, "", "", node1), + }, + shouldFail: true, }, "mix immediate and wait": { pod: makePod("pod-mix-bound", config.ns, []string{"pvc-w-canbind-4", "pvc-i-canbind-2"}), @@ -134,11 +165,6 @@ func TestVolumeBinding(t *testing.T) { makePVC("pvc-i-canbind-2", config.ns, &classImmediate, ""), }, }, - // TODO: - // immediate mode - PVC cannot bound - // wait mode - PVC cannot bind - // wait mode - 2 PVCs, 1 cannot bind - // wait mode - 2 pvcs, multiple nodes } for name, test := range cases { @@ -151,28 +177,51 @@ func TestVolumeBinding(t *testing.T) { } } + for _, pv := range test.unboundPvs { + if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) + } + } + // Create PVCs for _, pvc := range test.pvcs { if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } + for _, pvc := range test.unboundPvcs { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) + } + } // Create Pod if _, err := config.client.CoreV1().Pods(config.ns).Create(test.pod); err != nil { t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err) } - if err := waitForPodToSchedule(config.client, test.pod); err != nil { - t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err) + if test.shouldFail { + if err := waitForPodUnschedulable(config.client, test.pod); err != nil { + t.Errorf("Pod %q was not unschedulable: %v", test.pod.Name, err) + } + } else { + if err := waitForPodToSchedule(config.client, test.pod); err != nil { + t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err) + } } // Validate PVC/PV binding for _, pvc := range test.pvcs { validatePVCPhase(t, config.client, pvc, v1.ClaimBound) } + for _, pvc := range test.unboundPvcs { + validatePVCPhase(t, config.client, pvc, v1.ClaimPending) + } for _, pv := range test.pvs { validatePVPhase(t, config.client, pv, v1.VolumeBound) } + for _, pv := range test.unboundPvs { + validatePVPhase(t, config.client, pv, v1.VolumeAvailable) + } // TODO: validate events on Pods and PVCs @@ -184,7 +233,7 @@ func TestVolumeBinding(t *testing.T) { // TestVolumeBindingStress creates pods, each with unbound PVCs. func TestVolumeBindingStress(t *testing.T) { - config := setupNodes(t, "volume-binding-stress", 1) + config := setupCluster(t, "volume-binding-stress", 1) defer config.teardown() // Create enough PVs and PVCs for all the pods @@ -239,7 +288,7 @@ func TestVolumeBindingStress(t *testing.T) { } func TestPVAffinityConflict(t *testing.T) { - config := setupNodes(t, "volume-scheduling", 3) + config := setupCluster(t, "volume-scheduling", 3) defer config.teardown() pv := makePV(t, "local-pv", classImmediate, "", "", node1) @@ -255,6 +304,11 @@ func TestPVAffinityConflict(t *testing.T) { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } + // Wait for PVC bound + if err := waitForPVCBound(config.client, pvc); err != nil { + t.Fatalf("PVC %q failed to bind: %v", pvc.Name, err) + } + nodeMarkers := []interface{}{ markNodeAffinity, markNodeSelector, @@ -268,10 +322,10 @@ func TestPVAffinityConflict(t *testing.T) { t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) } // Give time to shceduler to attempt to schedule pod - if err := waitForPodToSchedule(config.client, pod); err == nil { - t.Errorf("Failed as Pod %s was scheduled successfully but expected to fail", pod.Name) + if err := waitForPodUnschedulable(config.client, pod); err != nil { + t.Errorf("Failed as Pod %s was not unschedulable: %v", pod.Name, err) } - // Deleting test pod + // Check pod conditions p, err := config.client.CoreV1().Pods(config.ns).Get(podName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to access Pod %s status: %v", podName, err) @@ -283,15 +337,16 @@ func TestPVAffinityConflict(t *testing.T) { t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason) } if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match node selector") || !strings.Contains(p.Status.Conditions[0].Message, "node(s) had volume node affinity conflict") { - t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict", podName) + t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict. Got message %q", podName, p.Status.Conditions[0].Message) } + // Deleting test pod if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete Pod %s: %v", podName, err) } } } -func setupNodes(t *testing.T, nsName string, numberOfNodes int) *testConfig { +func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig { h := &framework.MasterHolder{Initialized: make(chan struct{})} s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { <-h.Initialized @@ -373,7 +428,7 @@ func setupNodes(t *testing.T, nsName string, numberOfNodes int) *testConfig { testNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("node-%d", i+1), - Labels: map[string]string{affinityLabelKey: fmt.Sprintf("node-%d", i+1)}, + Labels: map[string]string{nodeAffinityLabelKey: fmt.Sprintf("node-%d", i+1)}, }, Spec: v1.NodeSpec{Unschedulable: false}, Status: v1.NodeStatus{ @@ -458,7 +513,7 @@ func makePV(t *testing.T, name, scName, pvcName, ns, node string) *v1.Persistent { MatchExpressions: []v1.NodeSelectorRequirement{ { - Key: affinityLabelKey, + Key: nodeAffinityLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{node}, }, @@ -552,6 +607,19 @@ func validatePVPhase(t *testing.T, client clientset.Interface, pv *v1.Persistent } } +func waitForPVCBound(client clientset.Interface, pvc *v1.PersistentVolumeClaim) error { + return wait.Poll(time.Second, 30*time.Second, func() (bool, error) { + claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if claim.Status.Phase == v1.ClaimBound { + return true, nil + } + return false, nil + }) +} + func markNodeAffinity(pod *v1.Pod, node string) { affinity := &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ @@ -560,7 +628,7 @@ func markNodeAffinity(pod *v1.Pod, node string) { { MatchExpressions: []v1.NodeSelectorRequirement{ { - Key: affinityLabelKey, + Key: nodeAffinityLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{node}, }, @@ -575,7 +643,7 @@ func markNodeAffinity(pod *v1.Pod, node string) { func markNodeSelector(pod *v1.Pod, node string) { ns := map[string]string{ - affinityLabelKey: node, + nodeAffinityLabelKey: node, } pod.Spec.NodeSelector = ns }