Use Mock CSI Driver for MutableCSINodeAllocatableCount storage e2e test (#132373)

* Refactor MutableCSINodeAllocatableCount storage e2e test

Signed-off-by: Eddie Torres <torredil@amazon.com>

* Update var names and formatting

Signed-off-by: Eddie Torres <torredil@amazon.com>

* Update function names

Signed-off-by: Eddie Torres <torredil@amazon.com>

---------

Signed-off-by: Eddie Torres <torredil@amazon.com>
This commit is contained in:
Eddie Torres
2025-06-18 17:02:57 -04:00
committed by GitHub
parent 11533d9732
commit 6f97f5bbf8
4 changed files with 147 additions and 151 deletions

View File

@@ -0,0 +1,144 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csimock
import (
"context"
"fmt"
"strings"
"sync/atomic"
"time"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/onsi/ginkgo/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api"
)
const (
initialMaxVolumesPerNode = int64(5)
updatedMaxVolumesPerNode = int64(8)
updatePeriodSeconds = int64(10)
timeout = 30 * time.Second
)
var _ = utils.SIGDescribe("MutableCSINodeAllocatableCount", framework.WithFeatureGate(features.MutableCSINodeAllocatableCount), func() {
f := framework.NewDefaultFramework("mutable-allocatable-mock")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
var (
driver drivers.MockCSITestDriver
cfg *storageframework.PerTestConfig
clientSet clientset.Interface
nodeName string
driverName string
)
ginkgo.BeforeEach(func(ctx context.Context) {
var calls int32
hook := drivers.Hooks{
Post: func(_ context.Context, method string, _ interface{}, reply interface{}, err error) (interface{}, error) {
if strings.Contains(method, "NodeGetInfo") {
if r, ok := reply.(*csipbv1.NodeGetInfoResponse); ok && err == nil {
if atomic.AddInt32(&calls, 1) == 1 {
r.MaxVolumesPerNode = initialMaxVolumesPerNode
} else {
r.MaxVolumesPerNode = updatedMaxVolumesPerNode
}
framework.Logf("NodeGetInfo called, setting MaxVolumesPerNode to %d", r.MaxVolumesPerNode)
return r, nil
}
}
return reply, err
},
}
opts := drivers.CSIMockDriverOpts{
Embedded: true,
RegisterDriver: true,
Hooks: hook,
}
driver = drivers.InitMockCSIDriver(opts)
cfg = driver.PrepareTest(ctx, f)
clientSet = f.ClientSet
driverName = cfg.GetUniqueDriverName()
nodeName = cfg.ClientNodeSelection.Name
updateCSIDriverWithNodeAllocatableUpdatePeriodSeconds(ctx, clientSet, driverName, updatePeriodSeconds)
err := drivers.WaitForCSIDriverRegistrationOnNode(ctx, nodeName, driverName, clientSet)
framework.ExpectNoError(err)
})
f.It("should observe dynamic changes in CSINode allocatable count", func(ctx context.Context) {
framework.Logf("Testing dynamic changes in CSINode allocatable count")
initVal, err := readCSINodeLimit(ctx, clientSet, nodeName, driverName)
framework.ExpectNoError(err)
framework.Logf("Initial MaxVolumesPerNode limit: %d", initVal)
err = wait.PollUntilContextTimeout(ctx, time.Duration(updatePeriodSeconds), timeout, true, func(ctx context.Context) (bool, error) {
cur, err := readCSINodeLimit(ctx, clientSet, nodeName, driverName)
if err != nil {
return false, nil
}
return int64(cur) == updatedMaxVolumesPerNode, nil
})
framework.ExpectNoError(err, "CSINode allocatable count was not updated to %d in time", updatedMaxVolumesPerNode)
framework.Logf("SUCCESS: MaxVolumesPerNode updated limit %d", updatedMaxVolumesPerNode)
})
})
func updateCSIDriverWithNodeAllocatableUpdatePeriodSeconds(ctx context.Context, cs clientset.Interface, driverName string, period int64) {
err := wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
obj, err := cs.StorageV1().CSIDrivers().Get(ctx, driverName, metav1.GetOptions{})
if err != nil {
return false, nil
}
if obj.Spec.NodeAllocatableUpdatePeriodSeconds != nil && *obj.Spec.NodeAllocatableUpdatePeriodSeconds == period {
return true, nil
}
obj.Spec.NodeAllocatableUpdatePeriodSeconds = &period
_, err = cs.StorageV1().CSIDrivers().Update(ctx, obj, metav1.UpdateOptions{})
return err == nil, nil
})
framework.ExpectNoError(err, "enabling periodic CSINode allocatable updates failed")
}
func readCSINodeLimit(ctx context.Context, cs clientset.Interface, node, drv string) (int32, error) {
c, err := cs.StorageV1().CSINodes().Get(ctx, node, metav1.GetOptions{})
if err != nil {
return 0, err
}
for _, d := range c.Spec.Drivers {
if d.Name == drv && d.Allocatable != nil && d.Allocatable.Count != nil {
return *d.Allocatable.Count, nil
}
}
return 0, fmt.Errorf("driver %q not present on CSINode", drv)
}

View File

@@ -94,10 +94,9 @@ const (
// hostpathCSI
type hostpathCSIDriver struct {
driverInfo storageframework.DriverInfo
manifests []string
volumeAttributes []map[string]string
enableDynamicAllocatable bool
driverInfo storageframework.DriverInfo
manifests []string
volumeAttributes []map[string]string
}
func initHostPathCSIDriver(name string, capabilities map[storageframework.Capability]bool, volumeAttributes []map[string]string, manifests ...string) storageframework.TestDriver {
@@ -186,13 +185,6 @@ func InitHostPathCSIDriver() storageframework.TestDriver {
)
}
func InitHostPathCSIDriverWithDynamicAllocatable() storageframework.TestDriver {
driver := InitHostPathCSIDriver()
hostpathDriver := driver.(*hostpathCSIDriver)
hostpathDriver.enableDynamicAllocatable = true
return driver
}
func (h *hostpathCSIDriver) GetDriverInfo() *storageframework.DriverInfo {
return &h.driverInfo
}
@@ -281,16 +273,6 @@ func (h *hostpathCSIDriver) PrepareTest(ctx context.Context, f *framework.Framew
NodeName: node.Name,
})
if h.enableDynamicAllocatable {
patches = append(patches, utils.PatchCSIOptions{
NodeAllocatableUpdatePeriodSeconds: &[]int64{10}[0],
})
patches = append(patches, utils.PatchCSIOptions{
DriverContainerName: "hostpath",
DriverContainerArguments: []string{"--attach-limit=-1"},
})
}
// VAC E2E HostPath patch
// Enables ModifyVolume support in the hostpath CSI driver, and adds an enabled parameter name
patches = append(patches, utils.PatchCSIOptions{

View File

@@ -1,124 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/storage/drivers"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api"
)
var _ = utils.SIGDescribe("MutableCSINodeAllocatableCount", framework.WithFeatureGate(features.MutableCSINodeAllocatableCount), func() {
f := framework.NewDefaultFramework("dynamic-allocatable")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
var (
driver storageframework.DynamicPVTestDriver
testConfig *storageframework.PerTestConfig
)
ginkgo.BeforeEach(func(ctx context.Context) {
driver = drivers.InitHostPathCSIDriverWithDynamicAllocatable().(storageframework.DynamicPVTestDriver)
testConfig = driver.PrepareTest(ctx, f)
})
f.It("should observe dynamic changes in CSINode allocatable count", func(ctx context.Context) {
cs := f.ClientSet
ginkgo.By("Retrieving node for testing")
nodeName := testConfig.ClientNodeSelection.Name
if nodeName == "" {
node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs)
framework.ExpectNoError(err)
nodeName = node.Name
}
ginkgo.By("Retrieving driver details")
sc := driver.GetDynamicProvisionStorageClass(ctx, testConfig, "")
driverName := sc.Provisioner
ginkgo.By("Retrieving initial allocatable value")
initialLimit, err := getCSINodeLimits(ctx, cs, testConfig, nodeName, driverName)
framework.ExpectNoError(err, "error retrieving initial CSINode limit")
framework.Logf("Initial allocatable count: %d", initialLimit)
ginkgo.By("Polling until value changes")
err = wait.PollUntilContextTimeout(ctx, 10*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
currentLimit, err := getCSINodeLimits(ctx, cs, testConfig, nodeName, driverName)
if err != nil {
framework.Logf("Error getting CSINode limits: %v", err)
return false, nil
}
framework.Logf("Current allocatable count: %d", currentLimit)
if currentLimit != initialLimit {
framework.Logf("Detected change in allocatable count from %d to %d", initialLimit, currentLimit)
return true, nil
}
return false, nil
})
framework.ExpectNoError(err, "CSINode allocatable count did not change within timeout")
framework.Logf("Successfully verified that CSINode allocatable count was updated")
})
})
func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName, driverName string) (int, error) {
var limit int
err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
csiNode, err := cs.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
framework.Logf("%s", err)
return false, nil
}
var csiDriver *storagev1.CSINodeDriver
for i, c := range csiNode.Spec.Drivers {
if c.Name == driverName || c.Name == config.GetUniqueDriverName() {
csiDriver = &csiNode.Spec.Drivers[i]
break
}
}
if csiDriver == nil {
framework.Logf("CSINodeInfo does not have driver %s yet", driverName)
return false, nil
}
if csiDriver.Allocatable == nil {
return false, fmt.Errorf("CSINodeInfo does not have Allocatable for driver %s", driverName)
}
if csiDriver.Allocatable.Count == nil {
return false, fmt.Errorf("CSINodeInfo does not have Allocatable.Count for driver %s", driverName)
}
limit = int(*csiDriver.Allocatable.Count)
return true, nil
})
if err != nil {
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverName, err)
}
return limit, nil
}

View File

@@ -161,9 +161,6 @@ func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object int
if o.SELinuxMount != nil {
object.Spec.SELinuxMount = o.SELinuxMount
}
if o.NodeAllocatableUpdatePeriodSeconds != nil {
object.Spec.NodeAllocatableUpdatePeriodSeconds = o.NodeAllocatableUpdatePeriodSeconds
}
}
return nil
@@ -227,9 +224,6 @@ type PatchCSIOptions struct {
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
SELinuxMount *bool
// If not nil, the value to use for the CSIDriver.Spec.NodeAllocatableUpdatePeriodSeconds
// field *if* the driver deploys a CSIDriver object. Ignored otherwise.
NodeAllocatableUpdatePeriodSeconds *int64
// If not nil, the values will be used for setting feature arguments to
// specific sidecar.
// Feature is a map - where key is sidecar name such as: