mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Merge pull request #88059 from msau42/refactor-e2e-node-selection
Refactor e2e node selection
This commit is contained in:
commit
4ab8c5393f
@ -86,10 +86,7 @@ func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.P
|
|||||||
// CreateSecPodWithNodeSelection creates security pod with given claims
|
// CreateSecPodWithNodeSelection creates security pod with given claims
|
||||||
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
|
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
|
||||||
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
||||||
// Setting node
|
SetNodeSelection(pod, node)
|
||||||
pod.Spec.NodeName = node.Name
|
|
||||||
pod.Spec.NodeSelector = node.Selector
|
|
||||||
pod.Spec.Affinity = node.Affinity
|
|
||||||
|
|
||||||
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -87,3 +87,19 @@ func SetNodeAffinity(pod *v1.Pod, nodeName string) {
|
|||||||
SetAffinity(nodeSelection, nodeName)
|
SetAffinity(nodeSelection, nodeName)
|
||||||
pod.Spec.Affinity = nodeSelection.Affinity
|
pod.Spec.Affinity = nodeSelection.Affinity
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNodeSelection modifies the given pod object with
|
||||||
|
// the specified NodeSelection
|
||||||
|
func SetNodeSelection(pod *v1.Pod, nodeSelection NodeSelection) {
|
||||||
|
pod.Spec.NodeSelector = nodeSelection.Selector
|
||||||
|
pod.Spec.Affinity = nodeSelection.Affinity
|
||||||
|
// pod.Spec.NodeName should not be set directly because
|
||||||
|
// it will bypass the scheduler, potentially causing
|
||||||
|
// kubelet to Fail the pod immediately if it's out of
|
||||||
|
// resources. Instead, we want the pod to remain
|
||||||
|
// pending in the scheduler until the node has resources
|
||||||
|
// freed up.
|
||||||
|
if nodeSelection.Name != "" {
|
||||||
|
SetNodeAffinity(pod, nodeSelection.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -128,12 +128,8 @@ type TestConfig struct {
|
|||||||
// Wait for the pod to terminate successfully
|
// Wait for the pod to terminate successfully
|
||||||
// False indicates that the pod is long running
|
// False indicates that the pod is long running
|
||||||
WaitForCompletion bool
|
WaitForCompletion bool
|
||||||
// ServerNodeName is the spec.nodeName to run server pod on. Default is any node.
|
// ClientNodeSelection restricts where the client pod runs on. Default is any node.
|
||||||
ServerNodeName string
|
ClientNodeSelection e2epod.NodeSelection
|
||||||
// ClientNodeName is the spec.nodeName to run client pod on. Default is any node.
|
|
||||||
ClientNodeName string
|
|
||||||
// NodeSelector to use in pod spec (server, client and injector pods).
|
|
||||||
NodeSelector map[string]string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test contains a volume to mount into a client pod and its
|
// Test contains a volume to mount into a client pod and its
|
||||||
@ -297,8 +293,6 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
|||||||
},
|
},
|
||||||
Volumes: volumes,
|
Volumes: volumes,
|
||||||
RestartPolicy: restartPolicy,
|
RestartPolicy: restartPolicy,
|
||||||
NodeName: config.ServerNodeName,
|
|
||||||
NodeSelector: config.NodeSelector,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,10 +383,9 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix
|
|||||||
TerminationGracePeriodSeconds: &gracePeriod,
|
TerminationGracePeriodSeconds: &gracePeriod,
|
||||||
SecurityContext: GeneratePodSecurityContext(fsGroup, seLinuxOptions),
|
SecurityContext: GeneratePodSecurityContext(fsGroup, seLinuxOptions),
|
||||||
Volumes: []v1.Volume{},
|
Volumes: []v1.Volume{},
|
||||||
NodeName: config.ClientNodeName,
|
|
||||||
NodeSelector: config.NodeSelector,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
e2epod.SetNodeSelection(clientPod, config.ClientNodeSelection)
|
||||||
|
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
|
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
|
||||||
|
@ -117,7 +117,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
m.provisioner = config.GetUniqueDriverName()
|
m.provisioner = config.GetUniqueDriverName()
|
||||||
|
|
||||||
if tp.nodeSelectorKey != "" {
|
if tp.nodeSelectorKey != "" {
|
||||||
framework.AddOrUpdateLabelOnNode(m.cs, m.config.ClientNodeName, tp.nodeSelectorKey, f.Namespace.Name)
|
framework.AddOrUpdateLabelOnNode(m.cs, m.config.ClientNodeSelection.Name, tp.nodeSelectorKey, f.Namespace.Name)
|
||||||
m.nodeLabel = map[string]string{
|
m.nodeLabel = map[string]string{
|
||||||
tp.nodeSelectorKey: f.Namespace.Name,
|
tp.nodeSelectorKey: f.Namespace.Name,
|
||||||
}
|
}
|
||||||
@ -138,7 +138,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||||
}
|
}
|
||||||
nodeName := m.config.ClientNodeName
|
nodeName := m.config.ClientNodeSelection.Name
|
||||||
scTest := testsuites.StorageClassTest{
|
scTest := testsuites.StorageClassTest{
|
||||||
Name: m.driver.GetDriverInfo().Name,
|
Name: m.driver.GetDriverInfo().Name,
|
||||||
Provisioner: sc.Provisioner,
|
Provisioner: sc.Provisioner,
|
||||||
@ -184,7 +184,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||||
nodeName := m.config.ClientNodeName
|
nodeName := m.config.ClientNodeSelection.Name
|
||||||
nodeSelection := e2epod.NodeSelection{
|
nodeSelection := e2epod.NodeSelection{
|
||||||
Name: nodeName,
|
Name: nodeName,
|
||||||
}
|
}
|
||||||
@ -230,7 +230,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(m.nodeLabel) > 0 && len(m.tp.nodeSelectorKey) > 0 {
|
if len(m.nodeLabel) > 0 && len(m.tp.nodeSelectorKey) > 0 {
|
||||||
framework.RemoveLabelOffNode(m.cs, m.config.ClientNodeName, m.tp.nodeSelectorKey)
|
framework.RemoveLabelOffNode(m.cs, m.config.ClientNodeSelection.Name, m.tp.nodeSelectorKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := utilerrors.NewAggregate(errs)
|
err := utilerrors.NewAggregate(errs)
|
||||||
@ -274,7 +274,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
|
|
||||||
ginkgo.By("Checking if VolumeAttachment was created for the pod")
|
ginkgo.By("Checking if VolumeAttachment was created for the pod")
|
||||||
handle := getVolumeHandle(m.cs, claim)
|
handle := getVolumeHandle(m.cs, claim)
|
||||||
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName)))
|
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeSelection.Name)))
|
||||||
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
|
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
|
||||||
_, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
|
_, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -390,7 +390,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
nodeSelectorKey := fmt.Sprintf("attach-limit-csi-%s", f.Namespace.Name)
|
nodeSelectorKey := fmt.Sprintf("attach-limit-csi-%s", f.Namespace.Name)
|
||||||
init(testParameters{nodeSelectorKey: nodeSelectorKey, attachLimit: 2})
|
init(testParameters{nodeSelectorKey: nodeSelectorKey, attachLimit: 2})
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
nodeName := m.config.ClientNodeName
|
nodeName := m.config.ClientNodeSelection.Name
|
||||||
driverName := m.config.GetUniqueDriverName()
|
driverName := m.config.GetUniqueDriverName()
|
||||||
|
|
||||||
csiNodeAttachLimit, err := checkCSINodeForLimits(nodeName, driverName, m.cs)
|
csiNodeAttachLimit, err := checkCSINodeForLimits(nodeName, driverName, m.cs)
|
||||||
|
@ -53,6 +53,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
@ -175,10 +176,10 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per
|
|||||||
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
config := &testsuites.PerTestConfig{
|
config := &testsuites.PerTestConfig{
|
||||||
Driver: h,
|
Driver: h,
|
||||||
Prefix: "hostpath",
|
Prefix: "hostpath",
|
||||||
Framework: f,
|
Framework: f,
|
||||||
ClientNodeName: node.Name,
|
ClientNodeSelection: e2epod.NodeSelection{Name: node.Name},
|
||||||
}
|
}
|
||||||
|
|
||||||
o := utils.PatchCSIOptions{
|
o := utils.PatchCSIOptions{
|
||||||
@ -299,10 +300,10 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
|||||||
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
config := &testsuites.PerTestConfig{
|
config := &testsuites.PerTestConfig{
|
||||||
Driver: m,
|
Driver: m,
|
||||||
Prefix: "mock",
|
Prefix: "mock",
|
||||||
Framework: f,
|
Framework: f,
|
||||||
ClientNodeName: node.Name,
|
ClientNodeSelection: e2epod.NodeSelection{Name: node.Name},
|
||||||
}
|
}
|
||||||
|
|
||||||
containerArgs := []string{"--name=csi-mock-" + f.UniqueName}
|
containerArgs := []string{"--name=csi-mock-" + f.UniqueName}
|
||||||
@ -324,7 +325,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
|||||||
DriverContainerName: "mock",
|
DriverContainerName: "mock",
|
||||||
DriverContainerArguments: containerArgs,
|
DriverContainerArguments: containerArgs,
|
||||||
ProvisionerContainerName: "csi-provisioner",
|
ProvisionerContainerName: "csi-provisioner",
|
||||||
NodeName: config.ClientNodeName,
|
NodeName: node.Name,
|
||||||
PodInfo: m.podInfo,
|
PodInfo: m.podInfo,
|
||||||
CanAttach: &m.attachable,
|
CanAttach: &m.attachable,
|
||||||
VolumeLifecycleModes: &[]storagev1beta1.VolumeLifecycleMode{
|
VolumeLifecycleModes: &[]storagev1beta1.VolumeLifecycleMode{
|
||||||
|
@ -442,7 +442,7 @@ func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
|
|||||||
|
|
||||||
c, serverPod, serverIP, iqn := newISCSIServer(cs, ns.Name)
|
c, serverPod, serverIP, iqn := newISCSIServer(cs, ns.Name)
|
||||||
config.ServerConfig = &c
|
config.ServerConfig = &c
|
||||||
config.ClientNodeName = c.ClientNodeName
|
config.ClientNodeSelection = c.ClientNodeSelection
|
||||||
return &iSCSIVolume{
|
return &iSCSIVolume{
|
||||||
serverPod: serverPod,
|
serverPod: serverPod,
|
||||||
serverIP: serverIP,
|
serverIP: serverIP,
|
||||||
@ -473,7 +473,7 @@ func newISCSIServer(cs clientset.Interface, namespace string) (config volume.Tes
|
|||||||
}
|
}
|
||||||
pod, ip = volume.CreateStorageServer(cs, config)
|
pod, ip = volume.CreateStorageServer(cs, config)
|
||||||
// Make sure the client runs on the same node as server so we don't need to open any firewalls.
|
// Make sure the client runs on the same node as server so we don't need to open any firewalls.
|
||||||
config.ClientNodeName = pod.Spec.NodeName
|
config.ClientNodeSelection = e2epod.NodeSelection{Name: pod.Spec.NodeName}
|
||||||
return config, pod, ip, iqn
|
return config, pod, ip, iqn
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -820,7 +820,7 @@ func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType
|
|||||||
// pods should be scheduled on the node
|
// pods should be scheduled on the node
|
||||||
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
config.ClientNodeName = node.Name
|
config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -902,7 +902,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v
|
|||||||
// pods should be scheduled on the node
|
// pods should be scheduled on the node
|
||||||
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
config.ClientNodeName = node.Name
|
config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name}
|
||||||
|
|
||||||
cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath)
|
cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath)
|
||||||
privileged := true
|
privileged := true
|
||||||
@ -1317,8 +1317,10 @@ func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo
|
|||||||
Framework: f,
|
Framework: f,
|
||||||
}
|
}
|
||||||
if framework.NodeOSDistroIs("windows") {
|
if framework.NodeOSDistroIs("windows") {
|
||||||
config.ClientNodeSelector = map[string]string{
|
config.ClientNodeSelection = e2epod.NodeSelection{
|
||||||
"beta.kubernetes.io/os": "windows",
|
Selector: map[string]string{
|
||||||
|
"beta.kubernetes.io/os": "windows",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return config, func() {}
|
return config, func() {}
|
||||||
@ -1329,8 +1331,10 @@ func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
|
|||||||
if volType == testpatterns.InlineVolume {
|
if volType == testpatterns.InlineVolume {
|
||||||
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
|
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
|
||||||
// so pods should be also scheduled there.
|
// so pods should be also scheduled there.
|
||||||
config.ClientNodeSelector = map[string]string{
|
config.ClientNodeSelection = e2epod.NodeSelection{
|
||||||
v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
|
Selector: map[string]string{
|
||||||
|
v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ginkgo.By("creating a test gce pd volume")
|
ginkgo.By("creating a test gce pd volume")
|
||||||
@ -1710,8 +1714,10 @@ func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf
|
|||||||
Framework: f,
|
Framework: f,
|
||||||
}
|
}
|
||||||
if framework.NodeOSDistroIs("windows") {
|
if framework.NodeOSDistroIs("windows") {
|
||||||
config.ClientNodeSelector = map[string]string{
|
config.ClientNodeSelection = e2epod.NodeSelection{
|
||||||
"beta.kubernetes.io/os": "windows",
|
Selector: map[string]string{
|
||||||
|
"beta.kubernetes.io/os": "windows",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return config, func() {}
|
return config, func() {}
|
||||||
@ -1721,8 +1727,10 @@ func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
|
|||||||
if volType == testpatterns.InlineVolume {
|
if volType == testpatterns.InlineVolume {
|
||||||
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
|
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
|
||||||
// so pods should be also scheduled there.
|
// so pods should be also scheduled there.
|
||||||
config.ClientNodeSelector = map[string]string{
|
config.ClientNodeSelection = e2epod.NodeSelection{
|
||||||
v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
|
Selector: map[string]string{
|
||||||
|
v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ginkgo.By("creating a test aws volume")
|
ginkgo.By("creating a test aws volume")
|
||||||
@ -1858,10 +1866,10 @@ func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &testsuites.PerTestConfig{
|
return &testsuites.PerTestConfig{
|
||||||
Driver: l,
|
Driver: l,
|
||||||
Prefix: "local",
|
Prefix: "local",
|
||||||
Framework: f,
|
Framework: f,
|
||||||
ClientNodeName: l.node.Name,
|
ClientNodeSelection: e2epod.NodeSelection{Name: l.node.Name},
|
||||||
}, func() {
|
}, func() {
|
||||||
l.hostExec.Cleanup()
|
l.hostExec.Cleanup()
|
||||||
}
|
}
|
||||||
@ -1872,7 +1880,7 @@ func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
|
|||||||
case testpatterns.PreprovisionedPV:
|
case testpatterns.PreprovisionedPV:
|
||||||
node := l.node
|
node := l.node
|
||||||
// assign this to schedule pod on this node
|
// assign this to schedule pod on this node
|
||||||
config.ClientNodeName = node.Name
|
config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name}
|
||||||
return &localVolume{
|
return &localVolume{
|
||||||
ltrMgr: l.ltrMgr,
|
ltrMgr: l.ltrMgr,
|
||||||
ltr: l.ltrMgr.Create(node, l.volumeType, nil),
|
ltr: l.ltrMgr.Create(node, l.volumeType, nil),
|
||||||
|
1
test/e2e/storage/external/BUILD
vendored
1
test/e2e/storage/external/BUILD
vendored
@ -15,6 +15,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/config:go_default_library",
|
"//test/e2e/framework/config:go_default_library",
|
||||||
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/skipper:go_default_library",
|
"//test/e2e/framework/skipper:go_default_library",
|
||||||
"//test/e2e/framework/volume:go_default_library",
|
"//test/e2e/framework/volume:go_default_library",
|
||||||
"//test/e2e/storage/testpatterns:go_default_library",
|
"//test/e2e/storage/testpatterns:go_default_library",
|
||||||
|
9
test/e2e/storage/external/external.go
vendored
9
test/e2e/storage/external/external.go
vendored
@ -32,6 +32,7 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
@ -330,10 +331,10 @@ func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) st
|
|||||||
|
|
||||||
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
config := &testsuites.PerTestConfig{
|
config := &testsuites.PerTestConfig{
|
||||||
Driver: d,
|
Driver: d,
|
||||||
Prefix: "external",
|
Prefix: "external",
|
||||||
Framework: f,
|
Framework: f,
|
||||||
ClientNodeName: d.ClientNodeName,
|
ClientNodeSelection: e2epod.NodeSelection{Name: d.ClientNodeName},
|
||||||
}
|
}
|
||||||
return config, func() {}
|
return config, func() {}
|
||||||
}
|
}
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
@ -177,9 +178,9 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
|
|||||||
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
config = volume.TestConfig{
|
config = volume.TestConfig{
|
||||||
Namespace: ns.Name,
|
Namespace: ns.Name,
|
||||||
Prefix: "flex",
|
Prefix: "flex",
|
||||||
ClientNodeName: node.Name,
|
ClientNodeSelection: e2epod.NodeSelection{Name: node.Name},
|
||||||
}
|
}
|
||||||
suffix = ns.Name
|
suffix = ns.Name
|
||||||
})
|
})
|
||||||
|
@ -988,7 +988,8 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume,
|
|||||||
if pod == nil {
|
if pod == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pod.Spec.NodeName = nodeName
|
|
||||||
|
e2epod.SetNodeAffinity(pod, nodeName)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -429,10 +429,9 @@ func convertTestConfig(in *PerTestConfig) volume.TestConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return volume.TestConfig{
|
return volume.TestConfig{
|
||||||
Namespace: in.Framework.Namespace.Name,
|
Namespace: in.Framework.Namespace.Name,
|
||||||
Prefix: in.Prefix,
|
Prefix: in.Prefix,
|
||||||
ClientNodeName: in.ClientNodeName,
|
ClientNodeSelection: in.ClientNodeSelection,
|
||||||
NodeSelector: in.ClientNodeSelector,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
|||||||
pvcs = append(pvcs, l.resource.Pvc)
|
pvcs = append(pvcs, l.resource.Pvc)
|
||||||
}
|
}
|
||||||
ginkgo.By("Creating a pod with pvc")
|
ginkgo.By("Creating a pod with pvc")
|
||||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout)
|
||||||
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||||
|
|
||||||
if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil {
|
if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil {
|
||||||
|
@ -101,7 +101,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
|||||||
Client: l.config.Framework.ClientSet,
|
Client: l.config.Framework.ClientSet,
|
||||||
Namespace: f.Namespace.Name,
|
Namespace: f.Namespace.Name,
|
||||||
DriverName: eDriver.GetCSIDriverName(l.config),
|
DriverName: eDriver.GetCSIDriverName(l.config),
|
||||||
Node: e2epod.NodeSelection{Name: l.config.ClientNodeName},
|
Node: l.config.ClientNodeSelection,
|
||||||
GetVolume: func(volumeNumber int) (map[string]string, bool, bool) {
|
GetVolume: func(volumeNumber int) (map[string]string, bool, bool) {
|
||||||
return eDriver.GetVolume(l.config, volumeNumber)
|
return eDriver.GetVolume(l.config, volumeNumber)
|
||||||
},
|
},
|
||||||
@ -291,9 +291,6 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
NodeName: node.Name,
|
|
||||||
NodeSelector: node.Selector,
|
|
||||||
Affinity: node.Affinity,
|
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "csi-volume-tester",
|
Name: "csi-volume-tester",
|
||||||
@ -304,6 +301,7 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
|
|||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
e2epod.SetNodeSelection(pod, node)
|
||||||
|
|
||||||
for i, csiVolume := range csiVolumes {
|
for i, csiVolume := range csiVolumes {
|
||||||
name := fmt.Sprintf("my-volume-%d", i)
|
name := fmt.Sprintf("my-volume-%d", i)
|
||||||
|
@ -150,7 +150,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
}
|
}
|
||||||
|
|
||||||
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
||||||
e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */)
|
l.config.ClientNodeSelection, pvcs, true /* sameNode */)
|
||||||
})
|
})
|
||||||
|
|
||||||
// This tests below configuration:
|
// This tests below configuration:
|
||||||
@ -178,14 +178,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
if len(nodes.Items) < 2 {
|
if len(nodes.Items) < 2 {
|
||||||
e2eskipper.Skipf("Number of available nodes is less than 2 - skipping")
|
e2eskipper.Skipf("Number of available nodes is less than 2 - skipping")
|
||||||
}
|
}
|
||||||
if l.config.ClientNodeName != "" {
|
if l.config.ClientNodeSelection.Name != "" {
|
||||||
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
||||||
}
|
}
|
||||||
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
||||||
nodeSelection := e2epod.NodeSelection{Name: l.config.ClientNodeName}
|
|
||||||
topologyKeys := dInfo.TopologyKeys
|
topologyKeys := dInfo.TopologyKeys
|
||||||
if len(topologyKeys) != 0 {
|
if len(topologyKeys) != 0 {
|
||||||
if err = ensureTopologyRequirements(&nodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
||||||
framework.Failf("Error setting topology requirements: %v", err)
|
framework.Failf("Error setting topology requirements: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -201,7 +200,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
}
|
}
|
||||||
|
|
||||||
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
||||||
nodeSelection, pvcs, false /* sameNode */)
|
l.config.ClientNodeSelection, pvcs, false /* sameNode */)
|
||||||
})
|
})
|
||||||
|
|
||||||
// This tests below configuration (only <block, filesystem> pattern is tested):
|
// This tests below configuration (only <block, filesystem> pattern is tested):
|
||||||
@ -240,7 +239,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
}
|
}
|
||||||
|
|
||||||
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
||||||
e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */)
|
l.config.ClientNodeSelection, pvcs, true /* sameNode */)
|
||||||
})
|
})
|
||||||
|
|
||||||
// This tests below configuration (only <block, filesystem> pattern is tested):
|
// This tests below configuration (only <block, filesystem> pattern is tested):
|
||||||
@ -272,14 +271,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
if len(nodes.Items) < 2 {
|
if len(nodes.Items) < 2 {
|
||||||
e2eskipper.Skipf("Number of available nodes is less than 2 - skipping")
|
e2eskipper.Skipf("Number of available nodes is less than 2 - skipping")
|
||||||
}
|
}
|
||||||
if l.config.ClientNodeName != "" {
|
if l.config.ClientNodeSelection.Name != "" {
|
||||||
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
||||||
}
|
}
|
||||||
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
||||||
nodeSelection := e2epod.NodeSelection{Name: l.config.ClientNodeName}
|
|
||||||
topologyKeys := dInfo.TopologyKeys
|
topologyKeys := dInfo.TopologyKeys
|
||||||
if len(topologyKeys) != 0 {
|
if len(topologyKeys) != 0 {
|
||||||
if err = ensureTopologyRequirements(&nodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
||||||
framework.Failf("Error setting topology requirements: %v", err)
|
framework.Failf("Error setting topology requirements: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -300,7 +298,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
}
|
}
|
||||||
|
|
||||||
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
||||||
nodeSelection, pvcs, false /* sameNode */)
|
l.config.ClientNodeSelection, pvcs, false /* sameNode */)
|
||||||
})
|
})
|
||||||
|
|
||||||
// This tests below configuration:
|
// This tests below configuration:
|
||||||
@ -325,7 +323,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
|
|
||||||
// Test access to the volume from pods on different node
|
// Test access to the volume from pods on different node
|
||||||
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
|
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
|
||||||
e2epod.NodeSelection{Name: l.config.ClientNodeName}, resource.Pvc, numPods, true /* sameNode */)
|
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */)
|
||||||
})
|
})
|
||||||
|
|
||||||
// This tests below configuration:
|
// This tests below configuration:
|
||||||
@ -349,14 +347,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
if len(nodes.Items) < numPods {
|
if len(nodes.Items) < numPods {
|
||||||
e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", numPods))
|
e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", numPods))
|
||||||
}
|
}
|
||||||
if l.config.ClientNodeName != "" {
|
if l.config.ClientNodeSelection.Name != "" {
|
||||||
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
||||||
}
|
}
|
||||||
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
||||||
nodeSelection := e2epod.NodeSelection{Name: l.config.ClientNodeName}
|
|
||||||
topologyKeys := dInfo.TopologyKeys
|
topologyKeys := dInfo.TopologyKeys
|
||||||
if len(topologyKeys) != 0 {
|
if len(topologyKeys) != 0 {
|
||||||
if err = ensureTopologyRequirements(&nodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
||||||
framework.Failf("Error setting topology requirements: %v", err)
|
framework.Failf("Error setting topology requirements: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -368,7 +365,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
|
|
||||||
// Test access to the volume from pods on different node
|
// Test access to the volume from pods on different node
|
||||||
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
|
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
|
||||||
nodeSelection, resource.Pvc, numPods, false /* sameNode */)
|
l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
|||||||
|
|
||||||
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
|
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
|
||||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||||
PVWriteReadSingleNodeCheck(l.cs, claim, e2epod.NodeSelection{Name: l.config.ClientNodeName})
|
PVWriteReadSingleNodeCheck(l.cs, claim, l.config.ClientNodeSelection)
|
||||||
}
|
}
|
||||||
l.testCase.TestDynamicProvisioning()
|
l.testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
@ -201,14 +201,14 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
|||||||
|
|
||||||
dc := l.config.Framework.DynamicClient
|
dc := l.config.Framework.DynamicClient
|
||||||
vsc := sDriver.GetSnapshotClass(l.config)
|
vsc := sDriver.GetSnapshotClass(l.config)
|
||||||
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(e2epod.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc)
|
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(l.config.ClientNodeSelection, l.cs, dc, l.pvc, l.sc, vsc)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
l.pvc.Spec.DataSource = dataSource
|
l.pvc.Spec.DataSource = dataSource
|
||||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||||
ginkgo.By("checking whether the created volume has the pre-populated data")
|
ginkgo.By("checking whether the created volume has the pre-populated data")
|
||||||
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
|
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
|
||||||
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, e2epod.NodeSelection{Name: l.config.ClientNodeName})
|
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, l.config.ClientNodeSelection)
|
||||||
}
|
}
|
||||||
l.testCase.TestDynamicProvisioning()
|
l.testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
@ -221,14 +221,14 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
dc := l.config.Framework.DynamicClient
|
dc := l.config.Framework.DynamicClient
|
||||||
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(e2epod.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.sourcePVC, l.sc)
|
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(l.config.ClientNodeSelection, l.cs, dc, l.sourcePVC, l.sc)
|
||||||
defer dataSourceCleanup()
|
defer dataSourceCleanup()
|
||||||
|
|
||||||
l.pvc.Spec.DataSource = dataSource
|
l.pvc.Spec.DataSource = dataSource
|
||||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||||
ginkgo.By("checking whether the created volume has the pre-populated data")
|
ginkgo.By("checking whether the created volume has the pre-populated data")
|
||||||
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
|
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
|
||||||
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-datasource-tester", command, e2epod.NodeSelection{Name: l.config.ClientNodeName})
|
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-datasource-tester", command, l.config.ClientNodeSelection)
|
||||||
}
|
}
|
||||||
l.testCase.TestDynamicProvisioning()
|
l.testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
@ -567,9 +567,6 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
NodeName: node.Name,
|
|
||||||
NodeSelector: node.Selector,
|
|
||||||
Affinity: node.Affinity,
|
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "volume-tester",
|
Name: "volume-tester",
|
||||||
@ -598,6 +595,7 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
e2epod.SetNodeSelection(pod, node)
|
||||||
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
||||||
return pod
|
return pod
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
|
||||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
@ -152,7 +151,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
|||||||
|
|
||||||
ginkgo.By("starting a pod to use the claim")
|
ginkgo.By("starting a pod to use the claim")
|
||||||
command := "echo 'hello world' > /mnt/test/data"
|
command := "echo 'hello world' > /mnt/test/data"
|
||||||
pod := StartInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, e2epod.NodeSelection{Name: config.ClientNodeName})
|
pod := StartInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
|
||||||
defer StopPod(cs, pod)
|
defer StopPod(cs, pod)
|
||||||
|
|
||||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||||
|
@ -151,12 +151,10 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
|
|
||||||
subPath := f.Namespace.Name
|
subPath := f.Namespace.Name
|
||||||
l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, true)
|
l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, true)
|
||||||
l.pod.Spec.NodeName = l.config.ClientNodeName
|
e2epod.SetNodeSelection(l.pod, l.config.ClientNodeSelection)
|
||||||
l.pod.Spec.NodeSelector = l.config.ClientNodeSelector
|
|
||||||
|
|
||||||
l.formatPod = volumeFormatPod(f, l.resource.VolSource)
|
l.formatPod = volumeFormatPod(f, l.resource.VolSource)
|
||||||
l.formatPod.Spec.NodeName = l.config.ClientNodeName
|
e2epod.SetNodeSelection(l.formatPod, l.config.ClientNodeSelection)
|
||||||
l.formatPod.Spec.NodeSelector = l.config.ClientNodeSelector
|
|
||||||
|
|
||||||
l.subPathDir = filepath.Join(volumePath, subPath)
|
l.subPathDir = filepath.Join(volumePath, subPath)
|
||||||
l.filePathInSubpath = filepath.Join(volumePath, fileName)
|
l.filePathInSubpath = filepath.Join(volumePath, fileName)
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
)
|
)
|
||||||
@ -205,16 +206,10 @@ type PerTestConfig struct {
|
|||||||
// The framework instance allocated for the current test.
|
// The framework instance allocated for the current test.
|
||||||
Framework *framework.Framework
|
Framework *framework.Framework
|
||||||
|
|
||||||
// If non-empty, then pods using a volume will be scheduled
|
// If non-empty, Pods using a volume will be scheduled
|
||||||
// onto the node with this name. Otherwise Kubernetes will
|
// according to the NodeSelection. Otherwise Kubernetes will
|
||||||
// pick a node.
|
// pick a node.
|
||||||
ClientNodeName string
|
ClientNodeSelection e2epod.NodeSelection
|
||||||
|
|
||||||
// Some tests also support scheduling pods onto nodes with
|
|
||||||
// these label/value pairs. As not all tests use this field,
|
|
||||||
// a driver that absolutely needs the pods on a specific
|
|
||||||
// node must use ClientNodeName.
|
|
||||||
ClientNodeSelector map[string]string
|
|
||||||
|
|
||||||
// Some test drivers initialize a storage server. This is
|
// Some test drivers initialize a storage server. This is
|
||||||
// the configuration that then has to be used to run tests.
|
// the configuration that then has to be used to run tests.
|
||||||
|
@ -166,7 +166,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
ginkgo.By("Creating a pod with dynamically provisioned volume")
|
ginkgo.By("Creating a pod with dynamically provisioned volume")
|
||||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout)
|
||||||
defer func() {
|
defer func() {
|
||||||
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
|
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
|
||||||
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
|
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
|
||||||
@ -203,7 +203,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
|||||||
l.resource.Pvc = npvc
|
l.resource.Pvc = npvc
|
||||||
|
|
||||||
ginkgo.By("Creating a new pod with same volume")
|
ginkgo.By("Creating a new pod with same volume")
|
||||||
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout)
|
||||||
defer func() {
|
defer func() {
|
||||||
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2)
|
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2)
|
||||||
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
|
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
|
||||||
@ -224,7 +224,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
ginkgo.By("Creating a pod with dynamically provisioned volume")
|
ginkgo.By("Creating a pod with dynamically provisioned volume")
|
||||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
|
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout)
|
||||||
defer func() {
|
defer func() {
|
||||||
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
|
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
|
||||||
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
|
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
|
||||||
|
@ -183,7 +183,7 @@ func createFileSizes(maxFileSize int64) []int64 {
|
|||||||
func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
|
func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
|
||||||
var gracePeriod int64 = 1
|
var gracePeriod int64 = 1
|
||||||
volName := fmt.Sprintf("io-volume-%s", config.Namespace)
|
volName := fmt.Sprintf("io-volume-%s", config.Namespace)
|
||||||
return &v1.Pod{
|
pod := &v1.Pod{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "Pod",
|
Kind: "Pod",
|
||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
@ -238,10 +238,11 @@ func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSourc
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
|
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
|
||||||
NodeName: config.ClientNodeName,
|
|
||||||
NodeSelector: config.NodeSelector,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
e2epod.SetNodeSelection(pod, config.ClientNodeSelection)
|
||||||
|
return pod
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
|
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
|
||||||
|
@ -128,7 +128,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
|||||||
ginkgo.By("Picking a node")
|
ginkgo.By("Picking a node")
|
||||||
// Some CSI drivers are deployed to a single node (e.g csi-hostpath),
|
// Some CSI drivers are deployed to a single node (e.g csi-hostpath),
|
||||||
// so we use that node instead of picking a random one.
|
// so we use that node instead of picking a random one.
|
||||||
nodeName := l.config.ClientNodeName
|
nodeName := l.config.ClientNodeSelection.Name
|
||||||
if nodeName == "" {
|
if nodeName == "" {
|
||||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -215,7 +215,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
|||||||
ginkgo.By("Creating pod")
|
ginkgo.By("Creating pod")
|
||||||
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
|
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
|
||||||
// Setting node
|
// Setting node
|
||||||
pod.Spec.NodeName = l.config.ClientNodeName
|
e2epod.SetNodeSelection(pod, l.config.ClientNodeSelection)
|
||||||
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
|
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "Failed to create pod")
|
framework.ExpectNoError(err, "Failed to create pod")
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -245,10 +245,9 @@ func testScriptInPod(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
NodeSelector: config.ClientNodeSelector,
|
|
||||||
NodeName: config.ClientNodeName,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
e2epod.SetNodeSelection(pod, config.ClientNodeSelection)
|
||||||
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
|
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||||
f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName})
|
f.TestContainerOutput("exec-volume-test", pod, 0, []string{fileName})
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user