mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-02-22 07:03:28 +00:00
DRA E2E: clean up class creation for extended resource tests
The initial testing of DRAExtendedResource changed the common setup for all
tests so that each test runs with seven DeviceClasses. This is unnecessary and
confusing, for example when looking at the endpoint stats for the simple
conformance test ("Are the stats correct? The test should only create one
class.").
This update changes it so that each test which needs more than the default
class explicitly creates those itself. For clarity, the special -1 index gets a
name and naming of the DeviceClasses gets updated to keep the default class
simple (no additional suffix).
Using the same "example.com/resource0" in tests which run in parallel might
explain https://github.com/kubernetes/kubernetes/issues/133653. This gets
replaced with extended resource names based on the driver name, which is unique
in each parallel test.
This commit is contained in:
@@ -1918,7 +1918,7 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
|
||||
ginkgo.It("must run a pod with extended resource with one container one resource", func(ctx context.Context) {
|
||||
pod := b.Pod()
|
||||
res := v1.ResourceList{}
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(0))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(0))] = resource.MustParse("1")
|
||||
pod.Spec.Containers[0].Resources.Requests = res
|
||||
pod.Spec.Containers[0].Resources.Limits = res
|
||||
|
||||
@@ -1932,15 +1932,20 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
|
||||
})
|
||||
|
||||
ginkgo.It("must run a pod with extended resource with one container three resources", func(ctx context.Context) {
|
||||
var objects []klog.KMetadata
|
||||
pod := b.Pod()
|
||||
res := v1.ResourceList{}
|
||||
for i := range 3 {
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(i))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(i))] = resource.MustParse("1")
|
||||
if i > 0 {
|
||||
objects = append(objects, b.Class(i))
|
||||
}
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests = res
|
||||
pod.Spec.Containers[0].Resources.Limits = res
|
||||
objects = append(objects, pod)
|
||||
|
||||
b.Create(ctx, pod)
|
||||
b.Create(ctx, objects...)
|
||||
err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)
|
||||
framework.ExpectNoError(err, "start pod")
|
||||
containerEnv := []string{
|
||||
@@ -1951,21 +1956,26 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
|
||||
drautils.TestContainerEnv(ctx, f, pod, pod.Spec.Containers[0].Name, false, containerEnv...)
|
||||
})
|
||||
ginkgo.It("must run a pod with extended resource with three containers one resource each", func(ctx context.Context) {
|
||||
var objects []klog.KMetadata
|
||||
pod := b.Pod()
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, *pod.Spec.Containers[0].DeepCopy())
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, *pod.Spec.Containers[0].DeepCopy())
|
||||
pod.Spec.Containers[0].Name = "container0"
|
||||
pod.Spec.Containers[1].Name = "container1"
|
||||
pod.Spec.Containers[2].Name = "container2"
|
||||
objects = append(objects, pod)
|
||||
|
||||
for i := range 3 {
|
||||
res := v1.ResourceList{}
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(i))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(i))] = resource.MustParse("1")
|
||||
pod.Spec.Containers[i].Resources.Requests = res
|
||||
pod.Spec.Containers[i].Resources.Limits = res
|
||||
if i > 0 {
|
||||
objects = append(objects, b.Class(i))
|
||||
}
|
||||
}
|
||||
|
||||
b.Create(ctx, pod)
|
||||
b.Create(ctx, objects...)
|
||||
err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)
|
||||
framework.ExpectNoError(err, "start pod")
|
||||
for i := range 3 {
|
||||
@@ -1976,6 +1986,7 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
|
||||
}
|
||||
})
|
||||
ginkgo.It("must run a pod with extended resource with three containers multiple resources each", func(ctx context.Context) {
|
||||
var objects []klog.KMetadata
|
||||
pod := b.Pod()
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, *pod.Spec.Containers[0].DeepCopy())
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, *pod.Spec.Containers[0].DeepCopy())
|
||||
@@ -1984,22 +1995,26 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
|
||||
pod.Spec.Containers[2].Name = "container2"
|
||||
|
||||
res := v1.ResourceList{}
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(0))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(0))] = resource.MustParse("1")
|
||||
pod.Spec.Containers[0].Resources.Requests = res
|
||||
pod.Spec.Containers[0].Resources.Limits = res
|
||||
res = v1.ResourceList{}
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(1))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(2))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(1))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(2))] = resource.MustParse("1")
|
||||
pod.Spec.Containers[1].Resources.Requests = res
|
||||
pod.Spec.Containers[1].Resources.Limits = res
|
||||
res = v1.ResourceList{}
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(3))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(4))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(5))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(3))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(4))] = resource.MustParse("1")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(5))] = resource.MustParse("1")
|
||||
pod.Spec.Containers[2].Resources.Requests = res
|
||||
pod.Spec.Containers[2].Resources.Limits = res
|
||||
for i := 1; i < 6; i++ {
|
||||
objects = append(objects, b.Class(i))
|
||||
}
|
||||
objects = append(objects, pod)
|
||||
|
||||
b.Create(ctx, pod)
|
||||
b.Create(ctx, objects...)
|
||||
err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)
|
||||
framework.ExpectNoError(err, "start pod")
|
||||
containerEnv := []string{
|
||||
@@ -2032,23 +2047,26 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
|
||||
// The test runs two pods, one pod request extended resource backed by DRA,
|
||||
// the other pod requests extended resource by device plugin.
|
||||
f.It("must run pods with extended resource on dra nodes and device plugin nodes", f.WithSerial(), func(ctx context.Context) {
|
||||
var objects []klog.KMetadata
|
||||
extendedResourceName := deployDevicePlugin(ctx, f, nodes.ExtraNodeNames)
|
||||
// drautils.ExtendedResourceName(-1) must be the same as the returned extendedResourceName
|
||||
// drautils.ExtendedResourceName(-1) is used for DRA drivers
|
||||
// b.ExtendedResourceName(SingletonIndex) must be the same as the returned extendedResourceName.
|
||||
// b.ExtendedResourceName(SingletonIndex) is used for DRA drivers whereas
|
||||
// extendedResourceName is used for device plugin.
|
||||
gomega.Expect(string(extendedResourceName)).To(gomega.Equal(drautils.ExtendedResourceName(-1)))
|
||||
gomega.Expect(string(extendedResourceName)).To(gomega.Equal(b.ExtendedResourceName(drautils.SingletonIndex)))
|
||||
|
||||
pod1 := b.Pod()
|
||||
res := v1.ResourceList{}
|
||||
res[v1.ResourceName(drautils.ExtendedResourceName(-1))] = resource.MustParse("2")
|
||||
res[v1.ResourceName(b.ExtendedResourceName(drautils.SingletonIndex))] = resource.MustParse("2")
|
||||
pod1.Spec.Containers[0].Resources.Requests = res
|
||||
pod1.Spec.Containers[0].Resources.Limits = res
|
||||
b.Create(ctx, pod1)
|
||||
objects = append(objects, b.Class(drautils.SingletonIndex), pod1)
|
||||
|
||||
pod2 := b.Pod()
|
||||
pod2.Spec.Containers[0].Resources.Requests = res
|
||||
pod2.Spec.Containers[0].Resources.Limits = res
|
||||
b.Create(ctx, pod2)
|
||||
objects = append(objects, pod2)
|
||||
|
||||
b.Create(ctx, objects...)
|
||||
|
||||
err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod1)
|
||||
framework.ExpectNoError(err, "start pod1")
|
||||
|
||||
@@ -53,14 +53,15 @@ import (
|
||||
// "example.com/resource" is not special, any valid extended resource name can be used
|
||||
// instead, except when using example device plugin in the test, which hard coded it,
|
||||
// see test/e2e/dra/deploy_device_plugin.go.
|
||||
// i == -1 is special, the extended resource name has no extra suffix, it is
|
||||
// used in the test where a cluster has both DRA driver and device plugin.
|
||||
func ExtendedResourceName(i int) string {
|
||||
suffix := ""
|
||||
if i >= 0 {
|
||||
suffix = strconv.Itoa(i)
|
||||
// i == -1 == SingletonIndex is special, the extended resource name has no extra suffix
|
||||
// and matches the one used by the example device plugin.
|
||||
func (b *Builder) ExtendedResourceName(i int) string {
|
||||
switch i {
|
||||
case SingletonIndex:
|
||||
return "example.com/resource"
|
||||
default:
|
||||
return b.driver.Name + "/resource" + fmt.Sprintf("-%d", i)
|
||||
}
|
||||
return "example.com/resource" + suffix
|
||||
}
|
||||
|
||||
// Builder contains a running counter to make objects unique within thir
|
||||
@@ -80,17 +81,26 @@ func (b *Builder) ClassName() string {
|
||||
return b.f.UniqueName + b.driver.NameSuffix + "-class"
|
||||
}
|
||||
|
||||
// SingletonIndex causes Builder.Class and ExtendedResourceName to create a
|
||||
// DeviceClass where the the extended resource name has no %d
|
||||
// suffix and matches the name as used by the example device plugin.
|
||||
const SingletonIndex = -1
|
||||
|
||||
// Class returns the device Class that the builder's other objects
|
||||
// reference.
|
||||
// The input i is used to pick the extended resource name whose suffix has the
|
||||
// same i for the device class.
|
||||
// i == -1 is special, the extended resource name has no extra suffix, it is
|
||||
// used in the test where a cluster has both DRA driver and device plugin.
|
||||
// i == -1 == SingletonIndex is special, the extended resource name has no extra suffix.
|
||||
func (b *Builder) Class(i int) *resourceapi.DeviceClass {
|
||||
ern := ExtendedResourceName(i)
|
||||
ern := b.ExtendedResourceName(i)
|
||||
name := b.ClassName()
|
||||
if i >= 0 {
|
||||
name = b.ClassName() + strconv.Itoa(i)
|
||||
switch i {
|
||||
case SingletonIndex:
|
||||
name += "-singleton"
|
||||
case 0:
|
||||
// No numeric suffix. This is what most tests use.
|
||||
default:
|
||||
name += "-" + strconv.Itoa(i)
|
||||
}
|
||||
class := &resourceapi.DeviceClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -467,9 +477,7 @@ func NewBuilderNow(ctx context.Context, f *framework.Framework, driver *Driver)
|
||||
func (b *Builder) setUp(ctx context.Context) {
|
||||
b.podCounter = 0
|
||||
b.claimCounter = 0
|
||||
for i := -1; i < 6; i++ {
|
||||
b.Create(ctx, b.Class(i))
|
||||
}
|
||||
b.Create(ctx, b.Class(0))
|
||||
ginkgo.DeferCleanup(b.tearDown)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user