mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #48130 from aleksandra-malinowska/autoscaling-volumes-scale-up-test
Automatic merge from submit-queue (batch tested with PRs 48004, 48205, 48130, 48207) Add e2e tests for CA scale up when pending pod requests volume Test verifying pending pods with PVC don't interfere with scale up, issue: kubernetes/autoscaler#22
This commit is contained in:
commit
39fa1cb29e
@ -32,6 +32,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
|
@ -33,6 +33,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@ -252,6 +254,100 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+newPods, scaleUpTimeout))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp", func() {
|
||||
By("creating pods")
|
||||
pods := nodeCount
|
||||
newPods := 1
|
||||
labels := map[string]string{
|
||||
"anti-affinity": "yes",
|
||||
}
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod")
|
||||
|
||||
By("waiting for all pods before triggering scale up")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("creating a pod requesting EmptyDir")
|
||||
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod")
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+newPods, scaleUpTimeout))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp", func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
volumeLabels := labels.Set{
|
||||
framework.VolumeSelectorKey: f.Namespace.Name,
|
||||
}
|
||||
selector := metav1.SetAsLabelSelector(volumeLabels)
|
||||
|
||||
By("creating volume & pvc")
|
||||
diskName, err := framework.CreatePDWithRetry()
|
||||
framework.ExpectNoError(err)
|
||||
pvConfig := framework.PersistentVolumeConfig{
|
||||
NamePrefix: "gce-",
|
||||
Labels: volumeLabels,
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
Prebind: nil,
|
||||
}
|
||||
pvcConfig := framework.PersistentVolumeClaimConfig{
|
||||
Annotations: map[string]string{
|
||||
v1.BetaStorageClassAnnotation: "",
|
||||
},
|
||||
Selector: selector,
|
||||
}
|
||||
|
||||
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(c, f.Namespace.Name, pv, pvc))
|
||||
|
||||
defer func() {
|
||||
errs := framework.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
|
||||
if len(errs) > 0 {
|
||||
framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
pv, pvc = nil, nil
|
||||
if diskName != "" {
|
||||
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
|
||||
}
|
||||
}()
|
||||
|
||||
By("creating pods")
|
||||
pods := nodeCount
|
||||
labels := map[string]string{
|
||||
"anti-affinity": "yes",
|
||||
}
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer func() {
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod")
|
||||
glog.Infof("RC and pods not using volume deleted")
|
||||
}()
|
||||
|
||||
By("waiting for all pods before triggering scale up")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("creating a pod requesting PVC")
|
||||
pvcPodName := "pvc-pod"
|
||||
newPods := 1
|
||||
volumes := buildVolumes(pv, pvc)
|
||||
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes))
|
||||
defer func() {
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, pvcPodName)
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
}()
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+newPods, scaleUpTimeout))
|
||||
})
|
||||
|
||||
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
labelKey := "cluster-autoscaling-test.special-node"
|
||||
labelValue := "true"
|
||||
@ -955,6 +1051,53 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id
|
||||
return nil
|
||||
}
|
||||
|
||||
func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string, volumes []v1.Volume) error {
|
||||
config := &testutils.RCConfig{
|
||||
Affinity: buildAntiAffinity(antiAffinityLabels),
|
||||
Volumes: volumes,
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
}
|
||||
err := framework.RunRC(*config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var emptyDirVolumes = []v1.Volume{
|
||||
{
|
||||
Name: "empty-volume",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func buildVolumes(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []v1.Volume {
|
||||
return []v1.Volume{
|
||||
{
|
||||
Name: pv.Name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildAntiAffinity(labels map[string]string) *v1.Affinity {
|
||||
return &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
|
Loading…
Reference in New Issue
Block a user