mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #52523 from NickrenREN/ephemeral-storage-e2e
Automatic merge from submit-queue (batch tested with PRs 54773, 52523, 47497, 55356, 49429). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add ephemeral storage e2e tests Add e2e tests of limitrange/quota/downward_api for local ephemeral storage **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: part of #52463 **Special notes for your reviewer**: Add e2e tests of limitrange/quota/downwardapi for local ephemeral storage **Release note**: ```release-note Add limitrange/resourcequota/downward_api e2e tests for local ephemeral storage ``` /assign @jingxu97
This commit is contained in:
commit
b616dff2e6
@ -192,7 +192,7 @@ func MergeContainerResourceLimits(container *v1.Container,
|
||||
if container.Resources.Limits == nil {
|
||||
container.Resources.Limits = make(v1.ResourceList)
|
||||
}
|
||||
for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory} {
|
||||
for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage} {
|
||||
if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() {
|
||||
if cap, exists := allocatable[resource]; exists {
|
||||
container.Resources.Limits[resource] = *cap.Copy()
|
||||
|
@ -225,6 +225,81 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("downward-api")
|
||||
|
||||
Context("Downward API tests for local ephemeral storage", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessLocalEphemeralStorageEnabled()
|
||||
})
|
||||
|
||||
It("should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "EPHEMERAL_STORAGE_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.ephemeral-storage",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "EPHEMERAL_STORAGE_REQUEST",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.ephemeral-storage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectations := []string{
|
||||
fmt.Sprintf("EPHEMERAL_STORAGE_LIMIT=%d", 64*1024*1024),
|
||||
fmt.Sprintf("EPHEMERAL_STORAGE_REQUEST=%d", 32*1024*1024),
|
||||
}
|
||||
|
||||
testDownwardAPIForEphemeralStorage(f, podName, env, expectations)
|
||||
})
|
||||
|
||||
It("should provide default limits.ephemeral-storage from node allocatable", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "EPHEMERAL_STORAGE_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.ephemeral-storage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectations := []string{
|
||||
"EPHEMERAL_STORAGE_LIMIT=[1-9]",
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: env,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
testDownwardAPIUsingPod(f, pod, env, expectations)
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -257,6 +332,36 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex
|
||||
testDownwardAPIUsingPod(f, pod, env, expectations)
|
||||
}
|
||||
|
||||
func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: resource.MustParse("32Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: resource.MustParse("64Mi"),
|
||||
},
|
||||
},
|
||||
Env: env,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
testDownwardAPIUsingPod(f, pod, env, expectations)
|
||||
}
|
||||
|
||||
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
|
||||
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
|
||||
}
|
||||
|
@ -58,6 +58,7 @@ go_library(
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
@ -129,6 +130,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
|
@ -73,6 +73,7 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
@ -87,6 +88,7 @@ import (
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/node"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
@ -311,6 +313,12 @@ func SkipIfProviderIs(unsupportedProviders ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessLocalEphemeralStorageEnabled() {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||
Skipf("Only supported when %v feature is enabled", features.LocalStorageCapacityIsolation)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessSSHKeyPresent() {
|
||||
if _, err := GetSigner(TestContext.Provider); err != nil {
|
||||
Skipf("No SSH Key for provider %s: '%v'", TestContext.Provider, err)
|
||||
|
@ -108,6 +108,137 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
|
||||
})
|
||||
|
||||
var _ = framework.KubeDescribe("LimitRange", func() {
|
||||
f := framework.NewDefaultFramework("limitrange")
|
||||
|
||||
BeforeEach(func() {
|
||||
// only run the tests when LocalStorageCapacityIsolation feature is enabled
|
||||
framework.SkipUnlessLocalEphemeralStorageEnabled()
|
||||
})
|
||||
|
||||
It("should create a LimitRange with default ephemeral storage and ensure pod has the default applied.", func() {
|
||||
By("Creating a LimitRange")
|
||||
|
||||
min := getEphemeralStorageResourceList("100Mi")
|
||||
max := getEphemeralStorageResourceList("500Mi")
|
||||
defaultLimit := getEphemeralStorageResourceList("500Mi")
|
||||
defaultRequest := getEphemeralStorageResourceList("200Mi")
|
||||
maxLimitRequestRatio := v1.ResourceList{}
|
||||
limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
|
||||
min, max,
|
||||
defaultLimit, defaultRequest,
|
||||
maxLimitRequestRatio)
|
||||
limitRange, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing limitrange")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(limitRange.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Fetching the LimitRange to ensure it has proper values")
|
||||
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
|
||||
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
|
||||
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
|
||||
err = equalResourceRequirement(expected, actual)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a Pod with no resource requirements")
|
||||
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring Pod has resource requirements applied from LimitRange")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for i := range pod.Spec.Containers {
|
||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||
if err != nil {
|
||||
// Print the pod to help in debugging.
|
||||
framework.Logf("Pod %+v does not have the expected requirements", pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
By("Creating a Pod with request")
|
||||
pod = f.NewTestPod("pod-partial-resources", getEphemeralStorageResourceList("150m"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring Pod has merged resource requirements applied from LimitRange")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// If you specify a Request, and no Limit, the Limit will be set to default limit
|
||||
expected = v1.ResourceRequirements{Requests: getEphemeralStorageResourceList("150Mi"), Limits: defaultLimit}
|
||||
for i := range pod.Spec.Containers {
|
||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||
if err != nil {
|
||||
// Print the pod to help in debugging.
|
||||
framework.Logf("Pod %+v does not have the expected requirements", pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
By("Creating a Pod with limit")
|
||||
pod = f.NewTestPod("pod-partial-resources", v1.ResourceList{}, getEphemeralStorageResourceList("300m"))
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring Pod has merged resource requirements applied from LimitRange")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// If you specify a Limit, and no Request, the Limit will default to the Request
|
||||
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
|
||||
expected = v1.ResourceRequirements{Requests: getEphemeralStorageResourceList("300Mi"), Limits: getEphemeralStorageResourceList("300Mi")}
|
||||
for i := range pod.Spec.Containers {
|
||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||
if err != nil {
|
||||
// Print the pod to help in debugging.
|
||||
framework.Logf("Pod %+v does not have the expected requirements", pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
By("Failing to create a Pod with less than min resources")
|
||||
pod = f.NewTestPod(podName, getEphemeralStorageResourceList("50Mi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Failing to create a Pod with more than max resources")
|
||||
pod = f.NewTestPod(podName, getEphemeralStorageResourceList("600Mi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func getEphemeralStorageResourceList(ephemeralStorage string) v1.ResourceList {
|
||||
res := v1.ResourceList{}
|
||||
if ephemeralStorage != "" {
|
||||
res[v1.ResourceEphemeralStorage] = resource.MustParse(ephemeralStorage)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
|
||||
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
|
||||
err := equalResourceList(expected.Requests, actual.Requests)
|
||||
|
@ -42,6 +42,83 @@ const (
|
||||
|
||||
var classGold string = "gold"
|
||||
|
||||
var _ = SIGDescribe("ResourceQuota", func() {
|
||||
f := framework.NewDefaultFramework("resourcequota")
|
||||
|
||||
BeforeEach(func() {
|
||||
// only run the tests when LocalStorageCapacityIsolation feature is enabled
|
||||
framework.SkipUnlessLocalEphemeralStorageEnabled()
|
||||
})
|
||||
|
||||
It("should create a ResourceQuota and capture the life of a pod.", func() {
|
||||
By("Creating a ResourceQuota")
|
||||
quotaName := "test-quota"
|
||||
resourceQuota := newTestResourceQuotaForEphemeralStorage(quotaName)
|
||||
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing resourceQuota")
|
||||
err = deleteResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring resource quota status is calculated")
|
||||
usedResources := v1.ResourceList{}
|
||||
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a Pod that fits quota")
|
||||
podName := "test-pod"
|
||||
requests := v1.ResourceList{}
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("300Mi")
|
||||
pod := newTestPodForQuota(f, podName, requests, v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
podToUpdate := pod
|
||||
|
||||
defer func() {
|
||||
By("Deleting the pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring ResourceQuota status captures the pod usage")
|
||||
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
|
||||
usedResources[v1.ResourcePods] = resource.MustParse("1")
|
||||
usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage]
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Not allowing a pod to be created that exceeds remaining quota")
|
||||
requests = v1.ResourceList{}
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("300Mi")
|
||||
pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Ensuring a pod cannot update its resource requirements")
|
||||
// a pod cannot dynamically update its resource requirements.
|
||||
requests = v1.ResourceList{}
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("100Mi")
|
||||
podToUpdate.Spec.Containers[0].Resources.Requests = requests
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Ensuring attempts to update pod resource requirements did not change quota usage")
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
|
||||
usedResources[v1.ResourcePods] = resource.MustParse("0")
|
||||
usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("ResourceQuota", func() {
|
||||
f := framework.NewDefaultFramework("resourcequota")
|
||||
|
||||
@ -727,6 +804,16 @@ func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1
|
||||
}
|
||||
}
|
||||
|
||||
// newTestResourceQuotaForEphemeralStorage returns a quota that enforces default constraints for testing alpha feature LocalStorageCapacityIsolation
|
||||
func newTestResourceQuotaForEphemeralStorage(name string) *v1.ResourceQuota {
|
||||
hard := v1.ResourceList{}
|
||||
hard[v1.ResourceEphemeralStorage] = resource.MustParse("500Mi")
|
||||
return &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.ResourceQuotaSpec{Hard: hard},
|
||||
}
|
||||
}
|
||||
|
||||
// newTestResourceQuota returns a quota that enforces default constraints for testing
|
||||
func newTestResourceQuota(name string) *v1.ResourceQuota {
|
||||
hard := v1.ResourceList{}
|
||||
|
Loading…
Reference in New Issue
Block a user