mirror of
				https://github.com/k3s-io/kubernetes.git
				synced 2025-10-30 21:30:16 +00:00 
			
		
		
		
	[e2e] test/e2e/framework/volume_util.go -> test/e2e/framework/volume/fixtures.go
This commit is contained in:
		| @@ -78,6 +78,7 @@ go_library( | ||||
|         "//staging/src/k8s.io/client-go/tools/watch:go_default_library", | ||||
|         "//test/e2e/framework:go_default_library", | ||||
|         "//test/e2e/framework/replicaset:go_default_library", | ||||
|         "//test/e2e/framework/volume:go_default_library", | ||||
|         "//test/utils:go_default_library", | ||||
|         "//test/utils/image:go_default_library", | ||||
|         "//vendor/github.com/onsi/ginkgo:go_default_library", | ||||
|   | ||||
| @@ -46,6 +46,7 @@ import ( | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
|  | ||||
| 	. "github.com/onsi/ginkgo" | ||||
| 	. "github.com/onsi/gomega" | ||||
| @@ -73,10 +74,10 @@ var _ = Describe("[sig-storage] GCP Volumes", func() { | ||||
| 	//////////////////////////////////////////////////////////////////////// | ||||
| 	Describe("NFSv4", func() { | ||||
| 		It("should be mountable for NFSv4", func() { | ||||
| 			config, _, serverIP := framework.NewNFSServer(c, namespace.Name, []string{}) | ||||
| 			defer framework.VolumeTestCleanup(f, config) | ||||
| 			config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{}) | ||||
| 			defer volume.TestCleanup(f, config) | ||||
|  | ||||
| 			tests := []framework.VolumeTest{ | ||||
| 			tests := []volume.Test{ | ||||
| 				{ | ||||
| 					Volume: v1.VolumeSource{ | ||||
| 						NFS: &v1.NFSVolumeSource{ | ||||
| @@ -91,16 +92,16 @@ var _ = Describe("[sig-storage] GCP Volumes", func() { | ||||
| 			} | ||||
|  | ||||
| 			// Must match content of test/images/volumes-tester/nfs/index.html | ||||
| 			framework.TestVolumeClient(c, config, nil, "" /* fsType */, tests) | ||||
| 			volume.TestVolumeClient(c, config, nil, "" /* fsType */, tests) | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| 	Describe("NFSv3", func() { | ||||
| 		It("should be mountable for NFSv3", func() { | ||||
| 			config, _, serverIP := framework.NewNFSServer(c, namespace.Name, []string{}) | ||||
| 			defer framework.VolumeTestCleanup(f, config) | ||||
| 			config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{}) | ||||
| 			defer volume.TestCleanup(f, config) | ||||
|  | ||||
| 			tests := []framework.VolumeTest{ | ||||
| 			tests := []volume.Test{ | ||||
| 				{ | ||||
| 					Volume: v1.VolumeSource{ | ||||
| 						NFS: &v1.NFSVolumeSource{ | ||||
| @@ -114,7 +115,7 @@ var _ = Describe("[sig-storage] GCP Volumes", func() { | ||||
| 				}, | ||||
| 			} | ||||
| 			// Must match content of test/images/volume-tester/nfs/index.html | ||||
| 			framework.TestVolumeClient(c, config, nil, "" /* fsType */, tests) | ||||
| 			volume.TestVolumeClient(c, config, nil, "" /* fsType */, tests) | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| @@ -124,15 +125,15 @@ var _ = Describe("[sig-storage] GCP Volumes", func() { | ||||
| 	Describe("GlusterFS", func() { | ||||
| 		It("should be mountable", func() { | ||||
| 			// create gluster server and endpoints | ||||
| 			config, _, _ := framework.NewGlusterfsServer(c, namespace.Name) | ||||
| 			config, _, _ := volume.NewGlusterfsServer(c, namespace.Name) | ||||
| 			name := config.Prefix + "-server" | ||||
| 			defer func() { | ||||
| 				framework.VolumeTestCleanup(f, config) | ||||
| 				volume.TestCleanup(f, config) | ||||
| 				err := c.CoreV1().Endpoints(namespace.Name).Delete(name, nil) | ||||
| 				Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed") | ||||
| 			}() | ||||
|  | ||||
| 			tests := []framework.VolumeTest{ | ||||
| 			tests := []volume.Test{ | ||||
| 				{ | ||||
| 					Volume: v1.VolumeSource{ | ||||
| 						Glusterfs: &v1.GlusterfsVolumeSource{ | ||||
| @@ -147,7 +148,7 @@ var _ = Describe("[sig-storage] GCP Volumes", func() { | ||||
| 					ExpectedContent: "Hello from GlusterFS!", | ||||
| 				}, | ||||
| 			} | ||||
| 			framework.TestVolumeClient(c, config, nil, "" /* fsType */, tests) | ||||
| 			volume.TestVolumeClient(c, config, nil, "" /* fsType */, tests) | ||||
| 		}) | ||||
| 	}) | ||||
| }) | ||||
|   | ||||
| @@ -34,7 +34,6 @@ go_library( | ||||
|         "test_context.go", | ||||
|         "upgrade_util.go", | ||||
|         "util.go", | ||||
|         "volume_util.go", | ||||
|     ], | ||||
|     importpath = "k8s.io/kubernetes/test/e2e/framework", | ||||
|     deps = [ | ||||
| @@ -166,6 +165,7 @@ filegroup( | ||||
|         "//test/e2e/framework/testfiles:all-srcs", | ||||
|         "//test/e2e/framework/timer:all-srcs", | ||||
|         "//test/e2e/framework/viperconfig:all-srcs", | ||||
|         "//test/e2e/framework/volume:all-srcs", | ||||
|     ], | ||||
|     tags = ["automanaged"], | ||||
| ) | ||||
|   | ||||
							
								
								
									
										33
									
								
								test/e2e/framework/volume/BUILD
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								test/e2e/framework/volume/BUILD
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | ||||
| package(default_visibility = ["//visibility:public"]) | ||||
|  | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["fixtures.go"], | ||||
|     importpath = "k8s.io/kubernetes/test/e2e/framework/volume", | ||||
|     deps = [ | ||||
|         "//staging/src/k8s.io/api/core/v1:go_default_library", | ||||
|         "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", | ||||
|         "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", | ||||
|         "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", | ||||
|         "//staging/src/k8s.io/client-go/kubernetes:go_default_library", | ||||
|         "//test/e2e/framework:go_default_library", | ||||
|         "//test/utils/image:go_default_library", | ||||
|         "//vendor/github.com/onsi/ginkgo:go_default_library", | ||||
|         "//vendor/github.com/onsi/gomega:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
| ) | ||||
| @@ -37,7 +37,7 @@ limitations under the License. | ||||
|  * and checks, that Kubernetes can use it as a volume. | ||||
|  */ | ||||
| 
 | ||||
| package framework | ||||
| package volume | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| @@ -51,6 +51,7 @@ import ( | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/rand" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	imageutils "k8s.io/kubernetes/test/utils/image" | ||||
| 
 | ||||
| 	"github.com/onsi/ginkgo" | ||||
| @@ -86,10 +87,10 @@ const ( | ||||
| 	iSCSIIQNTemplate = "iqn.2003-01.io.k8s:e2e.%s" | ||||
| ) | ||||
| 
 | ||||
| // VolumeTestConfig is a struct for configuration of one tests. The test consist of: | ||||
| // TestConfig is a struct for configuration of one tests. The test consist of: | ||||
| // - server pod - runs serverImage, exports ports[] | ||||
| // - client pod - does not need any special configuration | ||||
| type VolumeTestConfig struct { | ||||
| type TestConfig struct { | ||||
| 	Namespace string | ||||
| 	// Prefix of all pods. Typically the test name. | ||||
| 	Prefix string | ||||
| @@ -120,17 +121,17 @@ type VolumeTestConfig struct { | ||||
| 	NodeSelector map[string]string | ||||
| } | ||||
| 
 | ||||
| // VolumeTest contains a volume to mount into a client pod and its | ||||
| // Test contains a volume to mount into a client pod and its | ||||
| // expected content. | ||||
| type VolumeTest struct { | ||||
| type Test struct { | ||||
| 	Volume          v1.VolumeSource | ||||
| 	File            string | ||||
| 	ExpectedContent string | ||||
| } | ||||
| 
 | ||||
| // NewNFSServer is a NFS-specific wrapper for CreateStorageServer. | ||||
| func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) { | ||||
| 	config = VolumeTestConfig{ | ||||
| func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, ip string) { | ||||
| 	config = TestConfig{ | ||||
| 		Namespace:          namespace, | ||||
| 		Prefix:             "nfs", | ||||
| 		ServerImage:        imageutils.GetE2EImage(imageutils.VolumeNFSServer), | ||||
| @@ -146,8 +147,8 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf | ||||
| } | ||||
| 
 | ||||
| // NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object. | ||||
| func NewGlusterfsServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) { | ||||
| 	config = VolumeTestConfig{ | ||||
| func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip string) { | ||||
| 	config = TestConfig{ | ||||
| 		Namespace:   namespace, | ||||
| 		Prefix:      "gluster", | ||||
| 		ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer), | ||||
| @@ -182,17 +183,16 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config Volume | ||||
| 		}, | ||||
| 	} | ||||
| 	endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints) | ||||
| 	ExpectNoError(err, "failed to create endpoints for Gluster server") | ||||
| 	framework.ExpectNoError(err, "failed to create endpoints for Gluster server") | ||||
| 
 | ||||
| 	return config, pod, ip | ||||
| } | ||||
| 
 | ||||
| // NewISCSIServer is an iSCSI-specific wrapper for CreateStorageServer. | ||||
| func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip, iqn string) { | ||||
| func NewISCSIServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip, iqn string) { | ||||
| 	// Generate cluster-wide unique IQN | ||||
| 	iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace) | ||||
| 
 | ||||
| 	config = VolumeTestConfig{ | ||||
| 	config = TestConfig{ | ||||
| 		Namespace:   namespace, | ||||
| 		Prefix:      "iscsi", | ||||
| 		ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer), | ||||
| @@ -215,8 +215,8 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest | ||||
| } | ||||
| 
 | ||||
| // NewRBDServer is a CephRBD-specific wrapper for CreateStorageServer. | ||||
| func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, secret *v1.Secret, ip string) { | ||||
| 	config = VolumeTestConfig{ | ||||
| func NewRBDServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) { | ||||
| 	config = TestConfig{ | ||||
| 		Namespace:   namespace, | ||||
| 		Prefix:      "rbd", | ||||
| 		ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer), | ||||
| @@ -245,7 +245,7 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo | ||||
| 
 | ||||
| 	secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret) | ||||
| 	if err != nil { | ||||
| 		Failf("Failed to create secrets for Ceph RBD: %v", err) | ||||
| 		framework.Failf("Failed to create secrets for Ceph RBD: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return config, pod, secret, ip | ||||
| @@ -254,19 +254,19 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo | ||||
| // CreateStorageServer is a wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer | ||||
| // and ip address string are returned. | ||||
| // Note: Expect() is called so no error is returned. | ||||
| func CreateStorageServer(cs clientset.Interface, config VolumeTestConfig) (pod *v1.Pod, ip string) { | ||||
| func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) { | ||||
| 	pod = StartVolumeServer(cs, config) | ||||
| 	gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil") | ||||
| 	ip = pod.Status.PodIP | ||||
| 	gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name)) | ||||
| 	Logf("%s server pod IP address: %s", config.Prefix, ip) | ||||
| 	framework.Logf("%s server pod IP address: %s", config.Prefix, ip) | ||||
| 	return pod, ip | ||||
| } | ||||
| 
 | ||||
| // StartVolumeServer starts a container specified by config.serverImage and exports all | ||||
| // config.serverPorts from it. The returned pod should be used to get the server | ||||
| // IP address and create appropriate VolumeSource. | ||||
| func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod { | ||||
| func StartVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { | ||||
| 	podClient := client.CoreV1().Pods(config.Namespace) | ||||
| 
 | ||||
| 	portCount := len(config.ServerPorts) | ||||
| @@ -353,85 +353,85 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1. | ||||
| 	// ok if the server pod already exists. TODO: make this controllable by callers | ||||
| 	if err != nil { | ||||
| 		if apierrs.IsAlreadyExists(err) { | ||||
| 			Logf("Ignore \"already-exists\" error, re-get pod...") | ||||
| 			framework.Logf("Ignore \"already-exists\" error, re-get pod...") | ||||
| 			ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName)) | ||||
| 			serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{}) | ||||
| 			ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err) | ||||
| 			framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err) | ||||
| 			pod = serverPod | ||||
| 		} else { | ||||
| 			ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err) | ||||
| 			framework.ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err) | ||||
| 		} | ||||
| 	} | ||||
| 	if config.WaitForCompletion { | ||||
| 		ExpectNoError(WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace)) | ||||
| 		ExpectNoError(podClient.Delete(serverPod.Name, nil)) | ||||
| 		framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace)) | ||||
| 		framework.ExpectNoError(podClient.Delete(serverPod.Name, nil)) | ||||
| 	} else { | ||||
| 		ExpectNoError(WaitForPodRunningInNamespace(client, serverPod)) | ||||
| 		framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod)) | ||||
| 		if pod == nil { | ||||
| 			ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName)) | ||||
| 			pod, err = podClient.Get(serverPodName, metav1.GetOptions{}) | ||||
| 			ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err) | ||||
| 			framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err) | ||||
| 		} | ||||
| 	} | ||||
| 	if config.ServerReadyMessage != "" { | ||||
| 		_, err := LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout) | ||||
| 		ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err) | ||||
| 		_, err := framework.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout) | ||||
| 		framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err) | ||||
| 	} | ||||
| 	return pod | ||||
| } | ||||
| 
 | ||||
| // CleanUpVolumeServer is a wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function. | ||||
| func CleanUpVolumeServer(f *Framework, serverPod *v1.Pod) { | ||||
| func CleanUpVolumeServer(f *framework.Framework, serverPod *v1.Pod) { | ||||
| 	CleanUpVolumeServerWithSecret(f, serverPod, nil) | ||||
| } | ||||
| 
 | ||||
| // CleanUpVolumeServerWithSecret is a wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function. | ||||
| func CleanUpVolumeServerWithSecret(f *Framework, serverPod *v1.Pod, secret *v1.Secret) { | ||||
| func CleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, secret *v1.Secret) { | ||||
| 	cs := f.ClientSet | ||||
| 	ns := f.Namespace | ||||
| 
 | ||||
| 	if secret != nil { | ||||
| 		Logf("Deleting server secret %q...", secret.Name) | ||||
| 		framework.Logf("Deleting server secret %q...", secret.Name) | ||||
| 		err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{}) | ||||
| 		if err != nil { | ||||
| 			Logf("Delete secret failed: %v", err) | ||||
| 			framework.Logf("Delete secret failed: %v", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	Logf("Deleting server pod %q...", serverPod.Name) | ||||
| 	err := DeletePodWithWait(f, cs, serverPod) | ||||
| 	framework.Logf("Deleting server pod %q...", serverPod.Name) | ||||
| 	err := framework.DeletePodWithWait(f, cs, serverPod) | ||||
| 	if err != nil { | ||||
| 		Logf("Server pod delete failed: %v", err) | ||||
| 		framework.Logf("Server pod delete failed: %v", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // VolumeTestCleanup cleans both server and client pods. | ||||
| func VolumeTestCleanup(f *Framework, config VolumeTestConfig) { | ||||
| // TestCleanup cleans both server and client pods. | ||||
| func TestCleanup(f *framework.Framework, config TestConfig) { | ||||
| 	ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix)) | ||||
| 
 | ||||
| 	defer ginkgo.GinkgoRecover() | ||||
| 
 | ||||
| 	cs := f.ClientSet | ||||
| 
 | ||||
| 	err := DeletePodWithWaitByName(f, cs, config.Prefix+"-client", config.Namespace) | ||||
| 	err := framework.DeletePodWithWaitByName(f, cs, config.Prefix+"-client", config.Namespace) | ||||
| 	gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-client", config.Namespace) | ||||
| 
 | ||||
| 	if config.ServerImage != "" { | ||||
| 		err := DeletePodWithWaitByName(f, cs, config.Prefix+"-server", config.Namespace) | ||||
| 		err := framework.DeletePodWithWaitByName(f, cs, config.Prefix+"-server", config.Namespace) | ||||
| 		gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer()) | ||||
| // and check that the pod sees expected data, e.g. from the server pod. | ||||
| // Multiple VolumeTests can be specified to mount multiple volumes to a single | ||||
| // Multiple Tests can be specified to mount multiple volumes to a single | ||||
| // pod. | ||||
| func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, fsType string, tests []VolumeTest) { | ||||
| func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) { | ||||
| 	ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-client")) | ||||
| 	var gracePeriod int64 = 1 | ||||
| 	var command string | ||||
| 
 | ||||
| 	if !NodeOSDistroIs("windows") { | ||||
| 	if !framework.NodeOSDistroIs("windows") { | ||||
| 		command = "while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/  ; sleep 2 ; done " | ||||
| 	} else { | ||||
| 		command = "while(1) {cat /opt/0/index.html ; sleep 2 ; ls /opt/; sleep 2}" | ||||
| @@ -452,7 +452,7 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro | ||||
| 			Containers: []v1.Container{ | ||||
| 				{ | ||||
| 					Name:       config.Prefix + "-client", | ||||
| 					Image:      GetTestImage(BusyBoxImage), | ||||
| 					Image:      GetTestImage(framework.BusyBoxImage), | ||||
| 					WorkingDir: "/opt", | ||||
| 					// An imperative and easily debuggable container which reads vol contents for | ||||
| 					// us to scan in the tests or by eye. | ||||
| @@ -483,29 +483,29 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro | ||||
| 	} | ||||
| 	clientPod, err := podsNamespacer.Create(clientPod) | ||||
| 	if err != nil { | ||||
| 		Failf("Failed to create %s pod: %v", clientPod.Name, err) | ||||
| 		framework.Failf("Failed to create %s pod: %v", clientPod.Name, err) | ||||
| 
 | ||||
| 	} | ||||
| 	ExpectNoError(WaitForPodRunningInNamespace(client, clientPod)) | ||||
| 	framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod)) | ||||
| 
 | ||||
| 	ginkgo.By("Checking that text file contents are perfect.") | ||||
| 	for i, test := range tests { | ||||
| 		fileName := fmt.Sprintf("/opt/%d/%s", i, test.File) | ||||
| 		commands := GenerateReadFileCmd(fileName) | ||||
| 		_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, commands, test.ExpectedContent, time.Minute) | ||||
| 		ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName) | ||||
| 		_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, commands, test.ExpectedContent, time.Minute) | ||||
| 		framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName) | ||||
| 	} | ||||
| 	if !NodeOSDistroIs("windows") { | ||||
| 	if !framework.NodeOSDistroIs("windows") { | ||||
| 		if fsGroup != nil { | ||||
| 			ginkgo.By("Checking fsGroup is correct.") | ||||
| 			_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute) | ||||
| 			ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup)) | ||||
| 			_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute) | ||||
| 			framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup)) | ||||
| 		} | ||||
| 
 | ||||
| 		if fsType != "" { | ||||
| 			ginkgo.By("Checking fsType is correct.") | ||||
| 			_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"grep", " /opt/0 ", "/proc/mounts"}, fsType, time.Minute) | ||||
| 			ExpectNoError(err, "failed: getting the right fsType %s", fsType) | ||||
| 			_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"grep", " /opt/0 ", "/proc/mounts"}, fsType, time.Minute) | ||||
| 			framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| @@ -513,7 +513,7 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro | ||||
| // InjectHTML inserts index.html with given content into given volume. It does so by | ||||
| // starting and auxiliary pod which writes the file there. | ||||
| // The volume must be writable. | ||||
| func InjectHTML(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, volume v1.VolumeSource, content string) { | ||||
| func InjectHTML(client clientset.Interface, config TestConfig, fsGroup *int64, volume v1.VolumeSource, content string) { | ||||
| 	ginkgo.By(fmt.Sprint("starting ", config.Prefix, " injector")) | ||||
| 	podClient := client.CoreV1().Pods(config.Namespace) | ||||
| 	podName := fmt.Sprintf("%s-injector-%s", config.Prefix, rand.String(4)) | ||||
| @@ -535,7 +535,7 @@ func InjectHTML(client clientset.Interface, config VolumeTestConfig, fsGroup *in | ||||
| 			Containers: []v1.Container{ | ||||
| 				{ | ||||
| 					Name:    config.Prefix + "-injector", | ||||
| 					Image:   GetTestImage(BusyBoxImage), | ||||
| 					Image:   GetTestImage(framework.BusyBoxImage), | ||||
| 					Command: GenerateWriteFileCmd(content, fileName), | ||||
| 					VolumeMounts: []v1.VolumeMount{ | ||||
| 						{ | ||||
| @@ -566,15 +566,15 @@ func InjectHTML(client clientset.Interface, config VolumeTestConfig, fsGroup *in | ||||
| 	}() | ||||
| 
 | ||||
| 	injectPod, err := podClient.Create(injectPod) | ||||
| 	ExpectNoError(err, "Failed to create injector pod: %v", err) | ||||
| 	err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace) | ||||
| 	ExpectNoError(err) | ||||
| 	framework.ExpectNoError(err, "Failed to create injector pod: %v", err) | ||||
| 	err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace) | ||||
| 	framework.ExpectNoError(err) | ||||
| } | ||||
| 
 | ||||
| // CreateGCEVolume creates PersistentVolumeSource for GCEVolume. | ||||
| func CreateGCEVolume() (*v1.PersistentVolumeSource, string) { | ||||
| 	diskName, err := CreatePDWithRetry() | ||||
| 	ExpectNoError(err) | ||||
| 	diskName, err := framework.CreatePDWithRetry() | ||||
| 	framework.ExpectNoError(err) | ||||
| 	return &v1.PersistentVolumeSource{ | ||||
| 		GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ | ||||
| 			PDName:   diskName, | ||||
| @@ -588,7 +588,7 @@ func CreateGCEVolume() (*v1.PersistentVolumeSource, string) { | ||||
| // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh | ||||
| func GenerateScriptCmd(command string) []string { | ||||
| 	var commands []string | ||||
| 	if !NodeOSDistroIs("windows") { | ||||
| 	if !framework.NodeOSDistroIs("windows") { | ||||
| 		commands = []string{"/bin/sh", "-c", command} | ||||
| 	} else { | ||||
| 		commands = []string{"powershell", "/c", command} | ||||
| @@ -600,7 +600,7 @@ func GenerateScriptCmd(command string) []string { | ||||
| // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh | ||||
| func GenerateWriteFileCmd(content, fullPath string) []string { | ||||
| 	var commands []string | ||||
| 	if !NodeOSDistroIs("windows") { | ||||
| 	if !framework.NodeOSDistroIs("windows") { | ||||
| 		commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + fullPath} | ||||
| 	} else { | ||||
| 		commands = []string{"powershell", "/c", "echo '" + content + "' > " + fullPath} | ||||
| @@ -612,7 +612,7 @@ func GenerateWriteFileCmd(content, fullPath string) []string { | ||||
| // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh | ||||
| func GenerateReadFileCmd(fullPath string) []string { | ||||
| 	var commands []string | ||||
| 	if !NodeOSDistroIs("windows") { | ||||
| 	if !framework.NodeOSDistroIs("windows") { | ||||
| 		commands = []string{"cat", fullPath} | ||||
| 	} else { | ||||
| 		commands = []string{"powershell", "/c", "type " + fullPath} | ||||
| @@ -625,12 +625,12 @@ func GenerateReadFileCmd(fullPath string) []string { | ||||
| // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh | ||||
| func GenerateWriteandExecuteScriptFileCmd(content, fileName, filePath string) []string { | ||||
| 	// for windows cluster, modify the Pod spec. | ||||
| 	if NodeOSDistroIs("windows") { | ||||
| 	if framework.NodeOSDistroIs("windows") { | ||||
| 		scriptName := fmt.Sprintf("%s.ps1", fileName) | ||||
| 		fullPath := filepath.Join(filePath, scriptName) | ||||
| 
 | ||||
| 		cmd := "echo \"" + content + "\" > " + fullPath + "; .\\" + fullPath | ||||
| 		Logf("generated pod command %s", cmd) | ||||
| 		framework.Logf("generated pod command %s", cmd) | ||||
| 		return []string{"powershell", "/c", cmd} | ||||
| 	} | ||||
| 	scriptName := fmt.Sprintf("%s.sh", fileName) | ||||
| @@ -643,7 +643,7 @@ func GenerateWriteandExecuteScriptFileCmd(content, fileName, filePath string) [] | ||||
| // If the Node OS is windows, currently we will ignore the inputs and return nil. | ||||
| // TODO: Will modify it after windows has its own security context | ||||
| func GenerateSecurityContext(privileged bool) *v1.SecurityContext { | ||||
| 	if NodeOSDistroIs("windows") { | ||||
| 	if framework.NodeOSDistroIs("windows") { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return &v1.SecurityContext{ | ||||
| @@ -655,7 +655,7 @@ func GenerateSecurityContext(privileged bool) *v1.SecurityContext { | ||||
| // If the Node OS is windows, currently we will ignore the inputs and return nil. | ||||
| // TODO: Will modify it after windows has its own security context | ||||
| func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOptions) *v1.PodSecurityContext { | ||||
| 	if NodeOSDistroIs("windows") { | ||||
| 	if framework.NodeOSDistroIs("windows") { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return &v1.PodSecurityContext{ | ||||
| @@ -668,7 +668,7 @@ func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOption | ||||
| // If the Node OS is windows, currently we return Nettest image for Windows node | ||||
| // due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35. | ||||
| func GetTestImage(image string) string { | ||||
| 	if NodeOSDistroIs("windows") { | ||||
| 	if framework.NodeOSDistroIs("windows") { | ||||
| 		return imageutils.GetE2EImage(imageutils.Nettest) | ||||
| 	} | ||||
| 	return image | ||||
| @@ -37,6 +37,7 @@ go_library( | ||||
|         "//test/e2e/common:go_default_library", | ||||
|         "//test/e2e/framework:go_default_library", | ||||
|         "//test/e2e/framework/job:go_default_library", | ||||
|         "//test/e2e/framework/volume:go_default_library", | ||||
|         "//test/utils:go_default_library", | ||||
|         "//test/utils/image:go_default_library", | ||||
|         "//vendor/github.com/onsi/ginkgo:go_default_library", | ||||
|   | ||||
| @@ -29,6 +29,7 @@ import ( | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	testutils "k8s.io/kubernetes/test/utils" | ||||
| 	imageutils "k8s.io/kubernetes/test/utils/image" | ||||
|  | ||||
| @@ -388,7 +389,7 @@ var _ = SIGDescribe("kubelet", func() { | ||||
|  | ||||
| 			BeforeEach(func() { | ||||
| 				framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) | ||||
| 				_, nfsServerPod, nfsIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) | ||||
| 				_, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) | ||||
| 			}) | ||||
|  | ||||
| 			AfterEach(func() { | ||||
|   | ||||
| @@ -69,6 +69,7 @@ go_library( | ||||
|         "//test/e2e/framework/metrics:go_default_library", | ||||
|         "//test/e2e/framework/providers/gce:go_default_library", | ||||
|         "//test/e2e/framework/testfiles:go_default_library", | ||||
|         "//test/e2e/framework/volume:go_default_library", | ||||
|         "//test/e2e/storage/drivers:go_default_library", | ||||
|         "//test/e2e/storage/testpatterns:go_default_library", | ||||
|         "//test/e2e/storage/testsuites:go_default_library", | ||||
|   | ||||
| @@ -22,6 +22,7 @@ go_library( | ||||
|         "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", | ||||
|         "//staging/src/k8s.io/client-go/kubernetes:go_default_library", | ||||
|         "//test/e2e/framework:go_default_library", | ||||
|         "//test/e2e/framework/volume:go_default_library", | ||||
|         "//test/e2e/storage/testpatterns:go_default_library", | ||||
|         "//test/e2e/storage/testsuites:go_default_library", | ||||
|         "//test/e2e/storage/utils:go_default_library", | ||||
|   | ||||
| @@ -54,6 +54,7 @@ import ( | ||||
| 	"k8s.io/apimachinery/pkg/util/sets" | ||||
| 	"k8s.io/apiserver/pkg/authentication/serviceaccount" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/testpatterns" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/testsuites" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| @@ -186,7 +187,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp | ||||
| 	case testpatterns.InlineVolume: | ||||
| 		fallthrough | ||||
| 	case testpatterns.PreprovisionedPV: | ||||
| 		c, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{}) | ||||
| 		c, serverPod, serverIP := volume.NewNFSServer(cs, ns.Name, []string{}) | ||||
| 		config.ServerConfig = &c | ||||
| 		return &nfsVolume{ | ||||
| 			serverIP:  serverIP, | ||||
| @@ -202,7 +203,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp | ||||
| } | ||||
|  | ||||
| func (v *nfsVolume) DeleteVolume() { | ||||
| 	framework.CleanUpVolumeServer(v.f, v.serverPod) | ||||
| 	volume.CleanUpVolumeServer(v.f, v.serverPod) | ||||
| } | ||||
|  | ||||
| // Gluster | ||||
| @@ -290,7 +291,7 @@ func (g *glusterFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType | ||||
| 	cs := f.ClientSet | ||||
| 	ns := f.Namespace | ||||
|  | ||||
| 	c, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name) | ||||
| 	c, serverPod, _ := volume.NewGlusterfsServer(cs, ns.Name) | ||||
| 	config.ServerConfig = &c | ||||
| 	return &glusterVolume{ | ||||
| 		prefix:    config.Prefix, | ||||
| @@ -418,7 +419,7 @@ func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes | ||||
| 	cs := f.ClientSet | ||||
| 	ns := f.Namespace | ||||
|  | ||||
| 	c, serverPod, serverIP, iqn := framework.NewISCSIServer(cs, ns.Name) | ||||
| 	c, serverPod, serverIP, iqn := volume.NewISCSIServer(cs, ns.Name) | ||||
| 	config.ServerConfig = &c | ||||
| 	config.ClientNodeName = c.ClientNodeName | ||||
| 	return &iSCSIVolume{ | ||||
| @@ -430,7 +431,7 @@ func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes | ||||
| } | ||||
|  | ||||
| func (v *iSCSIVolume) DeleteVolume() { | ||||
| 	framework.CleanUpVolumeServer(v.f, v.serverPod) | ||||
| 	volume.CleanUpVolumeServer(v.f, v.serverPod) | ||||
| } | ||||
|  | ||||
| // Ceph RBD | ||||
| @@ -542,7 +543,7 @@ func (r *rbdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp | ||||
| 	cs := f.ClientSet | ||||
| 	ns := f.Namespace | ||||
|  | ||||
| 	c, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) | ||||
| 	c, serverPod, secret, serverIP := volume.NewRBDServer(cs, ns.Name) | ||||
| 	config.ServerConfig = &c | ||||
| 	return &rbdVolume{ | ||||
| 		serverPod: serverPod, | ||||
| @@ -553,7 +554,7 @@ func (r *rbdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp | ||||
| } | ||||
|  | ||||
| func (v *rbdVolume) DeleteVolume() { | ||||
| 	framework.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) | ||||
| 	volume.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) | ||||
| } | ||||
|  | ||||
| // Ceph | ||||
| @@ -651,7 +652,7 @@ func (c *cephFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType te | ||||
| 	cs := f.ClientSet | ||||
| 	ns := f.Namespace | ||||
|  | ||||
| 	cfg, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) | ||||
| 	cfg, serverPod, secret, serverIP := volume.NewRBDServer(cs, ns.Name) | ||||
| 	config.ServerConfig = &cfg | ||||
| 	return &cephVolume{ | ||||
| 		serverPod: serverPod, | ||||
| @@ -662,7 +663,7 @@ func (c *cephFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType te | ||||
| } | ||||
|  | ||||
| func (v *cephVolume) DeleteVolume() { | ||||
| 	framework.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) | ||||
| 	volume.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) | ||||
| } | ||||
|  | ||||
| // Hostpath | ||||
|   | ||||
| @@ -33,6 +33,7 @@ import ( | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/testfiles" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| ) | ||||
|  | ||||
| @@ -50,8 +51,8 @@ const ( | ||||
|  | ||||
| // testFlexVolume tests that a client pod using a given flexvolume driver | ||||
| // successfully mounts it and runs | ||||
| func testFlexVolume(driver string, cs clientset.Interface, config framework.VolumeTestConfig, f *framework.Framework) { | ||||
| 	tests := []framework.VolumeTest{ | ||||
| func testFlexVolume(driver string, cs clientset.Interface, config volume.TestConfig, f *framework.Framework) { | ||||
| 	tests := []volume.Test{ | ||||
| 		{ | ||||
| 			Volume: v1.VolumeSource{ | ||||
| 				FlexVolume: &v1.FlexVolumeSource{ | ||||
| @@ -63,9 +64,9 @@ func testFlexVolume(driver string, cs clientset.Interface, config framework.Volu | ||||
| 			ExpectedContent: "Hello from flexvolume!", | ||||
| 		}, | ||||
| 	} | ||||
| 	framework.TestVolumeClient(cs, config, nil, "" /* fsType */, tests) | ||||
| 	volume.TestVolumeClient(cs, config, nil, "" /* fsType */, tests) | ||||
|  | ||||
| 	framework.VolumeTestCleanup(f, config) | ||||
| 	volume.TestCleanup(f, config) | ||||
| } | ||||
|  | ||||
| // installFlex installs the driver found at filePath on the node, and restarts | ||||
| @@ -180,7 +181,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { | ||||
| 	var cs clientset.Interface | ||||
| 	var ns *v1.Namespace | ||||
| 	var node v1.Node | ||||
| 	var config framework.VolumeTestConfig | ||||
| 	var config volume.TestConfig | ||||
| 	var suffix string | ||||
|  | ||||
| 	BeforeEach(func() { | ||||
| @@ -193,7 +194,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { | ||||
| 		ns = f.Namespace | ||||
| 		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) | ||||
| 		node = nodes.Items[rand.Intn(len(nodes.Items))] | ||||
| 		config = framework.VolumeTestConfig{ | ||||
| 		config = volume.TestConfig{ | ||||
| 			Namespace:      ns.Name, | ||||
| 			Prefix:         "flex", | ||||
| 			ClientNodeName: node.Name, | ||||
|   | ||||
| @@ -28,6 +28,7 @@ import ( | ||||
| 	utilerrors "k8s.io/apimachinery/pkg/util/errors" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| ) | ||||
|  | ||||
| @@ -66,7 +67,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { | ||||
| 		volLabel = labels.Set{framework.VolumeSelectorKey: ns} | ||||
| 		selector = metav1.SetAsLabelSelector(volLabel) | ||||
| 		// Start the NFS server pod. | ||||
| 		_, nfsServerPod, nfsServerIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) | ||||
| 		_, nfsServerPod, nfsServerIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) | ||||
| 		nfsPVconfig = framework.PersistentVolumeConfig{ | ||||
| 			NamePrefix: "nfs-", | ||||
| 			Labels:     volLabel, | ||||
| @@ -120,7 +121,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { | ||||
| 			framework.SkipUnlessSSHKeyPresent() | ||||
|  | ||||
| 			By("Initializing first PD with PVPVC binding") | ||||
| 			pvSource1, diskName1 = framework.CreateGCEVolume() | ||||
| 			pvSource1, diskName1 = volume.CreateGCEVolume() | ||||
| 			framework.ExpectNoError(err) | ||||
| 			pvConfig1 = framework.PersistentVolumeConfig{ | ||||
| 				NamePrefix: "gce-", | ||||
| @@ -133,7 +134,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { | ||||
| 			framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv1, pvc1)) | ||||
|  | ||||
| 			By("Initializing second PD with PVPVC binding") | ||||
| 			pvSource2, diskName2 = framework.CreateGCEVolume() | ||||
| 			pvSource2, diskName2 = volume.CreateGCEVolume() | ||||
| 			framework.ExpectNoError(err) | ||||
| 			pvConfig2 = framework.PersistentVolumeConfig{ | ||||
| 				NamePrefix: "gce-", | ||||
|   | ||||
| @@ -29,6 +29,7 @@ import ( | ||||
| 	utilerrors "k8s.io/apimachinery/pkg/util/errors" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	imageutils "k8s.io/kubernetes/test/utils/image" | ||||
| ) | ||||
| @@ -121,7 +122,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { | ||||
| 		) | ||||
|  | ||||
| 		BeforeEach(func() { | ||||
| 			_, nfsServerPod, serverIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) | ||||
| 			_, nfsServerPod, serverIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) | ||||
| 			pvConfig = framework.PersistentVolumeConfig{ | ||||
| 				NamePrefix: "nfs-", | ||||
| 				Labels:     volLabel, | ||||
|   | ||||
| @@ -7,7 +7,7 @@ go_library( | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = [ | ||||
|         "//staging/src/k8s.io/api/core/v1:go_default_library", | ||||
|         "//test/e2e/framework:go_default_library", | ||||
|         "//test/e2e/framework/volume:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
|   | ||||
| @@ -18,19 +18,19 @@ package testpatterns | ||||
|  | ||||
| import ( | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// MinFileSize represents minimum file size (1 MiB) for testing | ||||
| 	MinFileSize = 1 * framework.MiB | ||||
| 	MinFileSize = 1 * volume.MiB | ||||
|  | ||||
| 	// FileSizeSmall represents small file size (1 MiB) for testing | ||||
| 	FileSizeSmall = 1 * framework.MiB | ||||
| 	FileSizeSmall = 1 * volume.MiB | ||||
| 	// FileSizeMedium represents medium file size (100 MiB) for testing | ||||
| 	FileSizeMedium = 100 * framework.MiB | ||||
| 	FileSizeMedium = 100 * volume.MiB | ||||
| 	// FileSizeLarge represents large file size (1 GiB) for testing | ||||
| 	FileSizeLarge = 1 * framework.GiB | ||||
| 	FileSizeLarge = 1 * volume.GiB | ||||
| ) | ||||
|  | ||||
| // TestVolType represents a volume type to be tested in a TestSuite | ||||
|   | ||||
| @@ -34,6 +34,7 @@ go_library( | ||||
|         "//staging/src/k8s.io/client-go/kubernetes:go_default_library", | ||||
|         "//test/e2e/framework:go_default_library", | ||||
|         "//test/e2e/framework/podlogs:go_default_library", | ||||
|         "//test/e2e/framework/volume:go_default_library", | ||||
|         "//test/e2e/storage/testpatterns:go_default_library", | ||||
|         "//test/e2e/storage/utils:go_default_library", | ||||
|         "//test/utils/image:go_default_library", | ||||
|   | ||||
| @@ -34,6 +34,7 @@ import ( | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/podlogs" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/testpatterns" | ||||
| ) | ||||
|  | ||||
| @@ -388,15 +389,15 @@ func deleteStorageClass(cs clientset.Interface, className string) { | ||||
| // dynamically created config for the volume server. | ||||
| // | ||||
| // This is done because TestConfig is the public API for | ||||
| // the testsuites package whereas framework.VolumeTestConfig is merely | ||||
| // the testsuites package whereas volume.TestConfig is merely | ||||
| // an implementation detail. It contains fields that have no effect, | ||||
| // which makes it unsuitable for use in the testsuits public API. | ||||
| func convertTestConfig(in *PerTestConfig) framework.VolumeTestConfig { | ||||
| func convertTestConfig(in *PerTestConfig) volume.TestConfig { | ||||
| 	if in.ServerConfig != nil { | ||||
| 		return *in.ServerConfig | ||||
| 	} | ||||
|  | ||||
| 	return framework.VolumeTestConfig{ | ||||
| 	return volume.TestConfig{ | ||||
| 		Namespace:      in.Framework.Namespace.Name, | ||||
| 		Prefix:         in.Prefix, | ||||
| 		ClientNodeName: in.ClientNodeName, | ||||
|   | ||||
| @@ -34,6 +34,7 @@ import ( | ||||
| 	"k8s.io/client-go/dynamic" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/testpatterns" | ||||
| ) | ||||
|  | ||||
| @@ -549,8 +550,8 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command | ||||
| 			Containers: []v1.Container{ | ||||
| 				{ | ||||
| 					Name:    "volume-tester", | ||||
| 					Image:   framework.GetTestImage(framework.BusyBoxImage), | ||||
| 					Command: framework.GenerateScriptCmd(command), | ||||
| 					Image:   volume.GetTestImage(framework.BusyBoxImage), | ||||
| 					Command: volume.GenerateScriptCmd(command), | ||||
| 					VolumeMounts: []v1.VolumeMount{ | ||||
| 						{ | ||||
| 							Name:      "my-volume", | ||||
|   | ||||
| @@ -22,6 +22,7 @@ import ( | ||||
| 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||||
| 	"k8s.io/apimachinery/pkg/util/sets" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/testpatterns" | ||||
| ) | ||||
|  | ||||
| @@ -167,7 +168,7 @@ type PerTestConfig struct { | ||||
| 	// Some test drivers initialize a storage server. This is | ||||
| 	// the configuration that then has to be used to run tests. | ||||
| 	// The values above are ignored for such tests. | ||||
| 	ServerConfig *framework.VolumeTestConfig | ||||
| 	ServerConfig *volume.TestConfig | ||||
| } | ||||
|  | ||||
| // GetUniqueDriverName returns unique driver name that can be used parallelly in tests | ||||
|   | ||||
| @@ -34,6 +34,7 @@ import ( | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/testpatterns" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| ) | ||||
| @@ -154,7 +155,7 @@ func createFileSizes(maxFileSize int64) []int64 { | ||||
| } | ||||
|  | ||||
| // Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env. | ||||
| func makePodSpec(config framework.VolumeTestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { | ||||
| func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { | ||||
| 	var gracePeriod int64 = 1 | ||||
| 	volName := fmt.Sprintf("io-volume-%s", config.Namespace) | ||||
| 	return &v1.Pod{ | ||||
| @@ -278,7 +279,7 @@ func deleteFile(pod *v1.Pod, fpath string) { | ||||
| // Note: nil can be passed for the podSecContext parm, in which case it is ignored. | ||||
| // Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize` | ||||
| //   bytes. | ||||
| func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { | ||||
| func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { | ||||
| 	ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace)) | ||||
| 	writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value | ||||
| 	loopCnt := testpatterns.MinFileSize / int64(len(writeBlk)) | ||||
| @@ -306,7 +307,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framewo | ||||
| 			} | ||||
| 		} else { | ||||
| 			framework.Logf("sleeping a bit so kubelet can unmount and detach the volume") | ||||
| 			time.Sleep(framework.PodCleanupTimeout) | ||||
| 			time.Sleep(volume.PodCleanupTimeout) | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
|   | ||||
| @@ -29,6 +29,7 @@ import ( | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/testpatterns" | ||||
| 	imageutils "k8s.io/kubernetes/test/utils/image" | ||||
| ) | ||||
| @@ -136,11 +137,11 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T | ||||
| 		skipPersistenceTest(driver) | ||||
| 		init() | ||||
| 		defer func() { | ||||
| 			framework.VolumeTestCleanup(f, convertTestConfig(l.config)) | ||||
| 			volume.TestCleanup(f, convertTestConfig(l.config)) | ||||
| 			cleanup() | ||||
| 		}() | ||||
|  | ||||
| 		tests := []framework.VolumeTest{ | ||||
| 		tests := []volume.Test{ | ||||
| 			{ | ||||
| 				Volume: *l.resource.volSource, | ||||
| 				File:   "index.html", | ||||
| @@ -159,8 +160,8 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T | ||||
| 		// local), plugin skips setting fsGroup if volume is already mounted | ||||
| 		// and we don't have reliable way to detect volumes are unmounted or | ||||
| 		// not before starting the second pod. | ||||
| 		framework.InjectHTML(f.ClientSet, config, fsGroup, tests[0].Volume, tests[0].ExpectedContent) | ||||
| 		framework.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests) | ||||
| 		volume.InjectHTML(f.ClientSet, config, fsGroup, tests[0].Volume, tests[0].ExpectedContent) | ||||
| 		volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests) | ||||
| 	}) | ||||
|  | ||||
| 	It("should allow exec of files on the volume", func() { | ||||
| @@ -190,7 +191,7 @@ func testScriptInPod( | ||||
| 	} else { | ||||
| 		content = fmt.Sprintf("ls %s", volPath) | ||||
| 	} | ||||
| 	command := framework.GenerateWriteandExecuteScriptFileCmd(content, fileName, volPath) | ||||
| 	command := volume.GenerateWriteandExecuteScriptFileCmd(content, fileName, volPath) | ||||
| 	pod := &v1.Pod{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:      fmt.Sprintf("exec-volume-test-%s", suffix), | ||||
| @@ -200,7 +201,7 @@ func testScriptInPod( | ||||
| 			Containers: []v1.Container{ | ||||
| 				{ | ||||
| 					Name:    fmt.Sprintf("exec-container-%s", suffix), | ||||
| 					Image:   framework.GetTestImage(imageutils.GetE2EImage(imageutils.Nginx)), | ||||
| 					Image:   volume.GetTestImage(imageutils.GetE2EImage(imageutils.Nginx)), | ||||
| 					Command: command, | ||||
| 					VolumeMounts: []v1.VolumeMount{ | ||||
| 						{ | ||||
|   | ||||
| @@ -23,6 +23,7 @@ import ( | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| ) | ||||
|  | ||||
| @@ -42,12 +43,12 @@ var _ = utils.SIGDescribe("Volumes", func() { | ||||
|  | ||||
| 	Describe("ConfigMap", func() { | ||||
| 		It("should be mountable", func() { | ||||
| 			config := framework.VolumeTestConfig{ | ||||
| 			config := volume.TestConfig{ | ||||
| 				Namespace: namespace.Name, | ||||
| 				Prefix:    "configmap", | ||||
| 			} | ||||
|  | ||||
| 			defer framework.VolumeTestCleanup(f, config) | ||||
| 			defer volume.TestCleanup(f, config) | ||||
| 			configMap := &v1.ConfigMap{ | ||||
| 				TypeMeta: metav1.TypeMeta{ | ||||
| 					Kind:       "ConfigMap", | ||||
| @@ -70,7 +71,7 @@ var _ = utils.SIGDescribe("Volumes", func() { | ||||
| 			}() | ||||
|  | ||||
| 			// Test one ConfigMap mounted several times to test #28502 | ||||
| 			tests := []framework.VolumeTest{ | ||||
| 			tests := []volume.Test{ | ||||
| 				{ | ||||
| 					Volume: v1.VolumeSource{ | ||||
| 						ConfigMap: &v1.ConfigMapVolumeSource{ | ||||
| @@ -106,7 +107,7 @@ var _ = utils.SIGDescribe("Volumes", func() { | ||||
| 					ExpectedContent: "this is the second file", | ||||
| 				}, | ||||
| 			} | ||||
| 			framework.TestVolumeClient(cs, config, nil, "" /* fsType */, tests) | ||||
| 			volume.TestVolumeClient(cs, config, nil, "" /* fsType */, tests) | ||||
| 		}) | ||||
| 	}) | ||||
| }) | ||||
|   | ||||
| @@ -18,6 +18,7 @@ go_library( | ||||
|         "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", | ||||
|         "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", | ||||
|         "//test/e2e/framework:go_default_library", | ||||
|         "//test/e2e/framework/volume:go_default_library", | ||||
|         "//test/e2e/storage/utils:go_default_library", | ||||
|         "//test/e2e/upgrades:go_default_library", | ||||
|         "//vendor/github.com/onsi/ginkgo:go_default_library", | ||||
|   | ||||
| @@ -20,6 +20,7 @@ import ( | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	utilerrors "k8s.io/apimachinery/pkg/util/errors" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo" | ||||
| 	"github.com/onsi/gomega" | ||||
| @@ -57,7 +58,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) { | ||||
| 	ns := f.Namespace.Name | ||||
|  | ||||
| 	ginkgo.By("Initializing PV source") | ||||
| 	t.pvSource, _ = framework.CreateGCEVolume() | ||||
| 	t.pvSource, _ = volume.CreateGCEVolume() | ||||
| 	pvConfig := framework.PersistentVolumeConfig{ | ||||
| 		NamePrefix: "pv-upgrade", | ||||
| 		PVSource:   *t.pvSource, | ||||
|   | ||||
| @@ -158,6 +158,7 @@ go_test( | ||||
|         "//test/e2e/common:go_default_library", | ||||
|         "//test/e2e/framework:go_default_library", | ||||
|         "//test/e2e/framework/metrics:go_default_library", | ||||
|         "//test/e2e/framework/volume:go_default_library", | ||||
|         "//test/e2e_node/perf/workloads:go_default_library", | ||||
|         "//test/e2e_node/services:go_default_library", | ||||
|         "//test/utils:go_default_library", | ||||
|   | ||||
| @@ -24,6 +24,7 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/metrics" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
|  | ||||
| 	"github.com/prometheus/common/model" | ||||
|  | ||||
| @@ -74,7 +75,7 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() { | ||||
| 					"": boundedSample(1, 1E6), | ||||
| 				}), | ||||
| 				"node_memory_working_set_bytes": gstruct.MatchAllElements(nodeId, gstruct.Elements{ | ||||
| 					"": boundedSample(10*framework.Mb, memoryLimit), | ||||
| 					"": boundedSample(10*volume.Mb, memoryLimit), | ||||
| 				}), | ||||
|  | ||||
| 				"container_cpu_usage_seconds_total": gstruct.MatchElements(containerId, gstruct.IgnoreExtras, gstruct.Elements{ | ||||
| @@ -83,8 +84,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() { | ||||
| 				}), | ||||
|  | ||||
| 				"container_memory_working_set_bytes": gstruct.MatchAllElements(containerId, gstruct.Elements{ | ||||
| 					fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod0, "busybox-container"): boundedSample(10*framework.Kb, 80*framework.Mb), | ||||
| 					fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*framework.Kb, 80*framework.Mb), | ||||
| 					fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod0, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb), | ||||
| 					fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb), | ||||
| 				}), | ||||
| 			}) | ||||
| 			By("Giving pods a minute to start up and produce metrics") | ||||
|   | ||||
| @@ -28,6 +28,7 @@ import ( | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework/volume" | ||||
|  | ||||
| 	systemdutil "github.com/coreos/go-systemd/util" | ||||
| 	. "github.com/onsi/ginkgo" | ||||
| @@ -80,7 +81,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 			node := getLocalNode(f) | ||||
| 			memoryCapacity := node.Status.Capacity["memory"] | ||||
| 			memoryLimit := memoryCapacity.Value() | ||||
| 			fsCapacityBounds := bounded(100*framework.Mb, 10*framework.Tb) | ||||
| 			fsCapacityBounds := bounded(100*volume.Mb, 10*volume.Tb) | ||||
| 			// Expectations for system containers. | ||||
| 			sysContExpectations := func() types.GomegaMatcher { | ||||
| 				return gstruct.MatchAllFields(gstruct.Fields{ | ||||
| @@ -95,10 +96,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 						"Time": recent(maxStatsAge), | ||||
| 						// We don't limit system container memory. | ||||
| 						"AvailableBytes":  BeNil(), | ||||
| 						"UsageBytes":      bounded(1*framework.Mb, memoryLimit), | ||||
| 						"WorkingSetBytes": bounded(1*framework.Mb, memoryLimit), | ||||
| 						"UsageBytes":      bounded(1*volume.Mb, memoryLimit), | ||||
| 						"WorkingSetBytes": bounded(1*volume.Mb, memoryLimit), | ||||
| 						// this now returns /sys/fs/cgroup/memory.stat total_rss | ||||
| 						"RSSBytes":        bounded(1*framework.Mb, memoryLimit), | ||||
| 						"RSSBytes":        bounded(1*volume.Mb, memoryLimit), | ||||
| 						"PageFaults":      bounded(1000, 1E9), | ||||
| 						"MajorPageFaults": bounded(0, 100000), | ||||
| 					}), | ||||
| @@ -112,10 +113,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 			podsContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{ | ||||
| 				"Time": recent(maxStatsAge), | ||||
| 				// Pods are limited by Node Allocatable | ||||
| 				"AvailableBytes":  bounded(1*framework.Kb, memoryLimit), | ||||
| 				"UsageBytes":      bounded(10*framework.Kb, memoryLimit), | ||||
| 				"WorkingSetBytes": bounded(10*framework.Kb, memoryLimit), | ||||
| 				"RSSBytes":        bounded(1*framework.Kb, memoryLimit), | ||||
| 				"AvailableBytes":  bounded(1*volume.Kb, memoryLimit), | ||||
| 				"UsageBytes":      bounded(10*volume.Kb, memoryLimit), | ||||
| 				"WorkingSetBytes": bounded(10*volume.Kb, memoryLimit), | ||||
| 				"RSSBytes":        bounded(1*volume.Kb, memoryLimit), | ||||
| 				"PageFaults":      bounded(0, 1000000), | ||||
| 				"MajorPageFaults": bounded(0, 10), | ||||
| 			}) | ||||
| @@ -157,9 +158,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 					"Time": recent(maxStatsAge), | ||||
| 					// We don't limit system container memory. | ||||
| 					"AvailableBytes":  BeNil(), | ||||
| 					"UsageBytes":      bounded(100*framework.Kb, memoryLimit), | ||||
| 					"WorkingSetBytes": bounded(100*framework.Kb, memoryLimit), | ||||
| 					"RSSBytes":        bounded(100*framework.Kb, memoryLimit), | ||||
| 					"UsageBytes":      bounded(100*volume.Kb, memoryLimit), | ||||
| 					"WorkingSetBytes": bounded(100*volume.Kb, memoryLimit), | ||||
| 					"RSSBytes":        bounded(100*volume.Kb, memoryLimit), | ||||
| 					"PageFaults":      bounded(1000, 1E9), | ||||
| 					"MajorPageFaults": bounded(0, 100000), | ||||
| 				}) | ||||
| @@ -180,10 +181,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 						}), | ||||
| 						"Memory": ptrMatchAllFields(gstruct.Fields{ | ||||
| 							"Time":            recent(maxStatsAge), | ||||
| 							"AvailableBytes":  bounded(1*framework.Kb, 80*framework.Mb), | ||||
| 							"UsageBytes":      bounded(10*framework.Kb, 80*framework.Mb), | ||||
| 							"WorkingSetBytes": bounded(10*framework.Kb, 80*framework.Mb), | ||||
| 							"RSSBytes":        bounded(1*framework.Kb, 80*framework.Mb), | ||||
| 							"AvailableBytes":  bounded(1*volume.Kb, 80*volume.Mb), | ||||
| 							"UsageBytes":      bounded(10*volume.Kb, 80*volume.Mb), | ||||
| 							"WorkingSetBytes": bounded(10*volume.Kb, 80*volume.Mb), | ||||
| 							"RSSBytes":        bounded(1*volume.Kb, 80*volume.Mb), | ||||
| 							"PageFaults":      bounded(100, 1000000), | ||||
| 							"MajorPageFaults": bounded(0, 10), | ||||
| 						}), | ||||
| @@ -192,7 +193,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 							"Time":           recent(maxStatsAge), | ||||
| 							"AvailableBytes": fsCapacityBounds, | ||||
| 							"CapacityBytes":  fsCapacityBounds, | ||||
| 							"UsedBytes":      bounded(framework.Kb, 10*framework.Mb), | ||||
| 							"UsedBytes":      bounded(volume.Kb, 10*volume.Mb), | ||||
| 							"InodesFree":     bounded(1E4, 1E8), | ||||
| 							"Inodes":         bounded(1E4, 1E8), | ||||
| 							"InodesUsed":     bounded(0, 1E8), | ||||
| @@ -201,7 +202,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 							"Time":           recent(maxStatsAge), | ||||
| 							"AvailableBytes": fsCapacityBounds, | ||||
| 							"CapacityBytes":  fsCapacityBounds, | ||||
| 							"UsedBytes":      bounded(framework.Kb, 10*framework.Mb), | ||||
| 							"UsedBytes":      bounded(volume.Kb, 10*volume.Mb), | ||||
| 							"InodesFree":     bounded(1E4, 1E8), | ||||
| 							"Inodes":         bounded(1E4, 1E8), | ||||
| 							"InodesUsed":     bounded(0, 1E8), | ||||
| @@ -213,9 +214,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 					"Time": recent(maxStatsAge), | ||||
| 					"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{ | ||||
| 						"Name":     Equal("eth0"), | ||||
| 						"RxBytes":  bounded(10, 10*framework.Mb), | ||||
| 						"RxBytes":  bounded(10, 10*volume.Mb), | ||||
| 						"RxErrors": bounded(0, 1000), | ||||
| 						"TxBytes":  bounded(10, 10*framework.Mb), | ||||
| 						"TxBytes":  bounded(10, 10*volume.Mb), | ||||
| 						"TxErrors": bounded(0, 1000), | ||||
| 					}), | ||||
| 					"Interfaces": Not(BeNil()), | ||||
| @@ -227,10 +228,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 				}), | ||||
| 				"Memory": ptrMatchAllFields(gstruct.Fields{ | ||||
| 					"Time":            recent(maxStatsAge), | ||||
| 					"AvailableBytes":  bounded(1*framework.Kb, 80*framework.Mb), | ||||
| 					"UsageBytes":      bounded(10*framework.Kb, 80*framework.Mb), | ||||
| 					"WorkingSetBytes": bounded(10*framework.Kb, 80*framework.Mb), | ||||
| 					"RSSBytes":        bounded(1*framework.Kb, 80*framework.Mb), | ||||
| 					"AvailableBytes":  bounded(1*volume.Kb, 80*volume.Mb), | ||||
| 					"UsageBytes":      bounded(10*volume.Kb, 80*volume.Mb), | ||||
| 					"WorkingSetBytes": bounded(10*volume.Kb, 80*volume.Mb), | ||||
| 					"RSSBytes":        bounded(1*volume.Kb, 80*volume.Mb), | ||||
| 					"PageFaults":      bounded(0, 1000000), | ||||
| 					"MajorPageFaults": bounded(0, 10), | ||||
| 				}), | ||||
| @@ -242,7 +243,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 							"Time":           recent(maxStatsAge), | ||||
| 							"AvailableBytes": fsCapacityBounds, | ||||
| 							"CapacityBytes":  fsCapacityBounds, | ||||
| 							"UsedBytes":      bounded(framework.Kb, 1*framework.Mb), | ||||
| 							"UsedBytes":      bounded(volume.Kb, 1*volume.Mb), | ||||
| 							"InodesFree":     bounded(1E4, 1E8), | ||||
| 							"Inodes":         bounded(1E4, 1E8), | ||||
| 							"InodesUsed":     bounded(0, 1E8), | ||||
| @@ -253,7 +254,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 					"Time":           recent(maxStatsAge), | ||||
| 					"AvailableBytes": fsCapacityBounds, | ||||
| 					"CapacityBytes":  fsCapacityBounds, | ||||
| 					"UsedBytes":      bounded(framework.Kb, 21*framework.Mb), | ||||
| 					"UsedBytes":      bounded(volume.Kb, 21*volume.Mb), | ||||
| 					"InodesFree":     bounded(1E4, 1E8), | ||||
| 					"Inodes":         bounded(1E4, 1E8), | ||||
| 					"InodesUsed":     bounded(0, 1E8), | ||||
| @@ -272,11 +273,11 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 					}), | ||||
| 					"Memory": ptrMatchAllFields(gstruct.Fields{ | ||||
| 						"Time":            recent(maxStatsAge), | ||||
| 						"AvailableBytes":  bounded(100*framework.Mb, memoryLimit), | ||||
| 						"UsageBytes":      bounded(10*framework.Mb, memoryLimit), | ||||
| 						"WorkingSetBytes": bounded(10*framework.Mb, memoryLimit), | ||||
| 						"AvailableBytes":  bounded(100*volume.Mb, memoryLimit), | ||||
| 						"UsageBytes":      bounded(10*volume.Mb, memoryLimit), | ||||
| 						"WorkingSetBytes": bounded(10*volume.Mb, memoryLimit), | ||||
| 						// this now returns /sys/fs/cgroup/memory.stat total_rss | ||||
| 						"RSSBytes":        bounded(1*framework.Kb, memoryLimit), | ||||
| 						"RSSBytes":        bounded(1*volume.Kb, memoryLimit), | ||||
| 						"PageFaults":      bounded(1000, 1E9), | ||||
| 						"MajorPageFaults": bounded(0, 100000), | ||||
| 					}), | ||||
| @@ -285,9 +286,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 						"Time": recent(maxStatsAge), | ||||
| 						"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{ | ||||
| 							"Name":     Or(BeEmpty(), Equal("eth0")), | ||||
| 							"RxBytes":  Or(BeNil(), bounded(1*framework.Mb, 100*framework.Gb)), | ||||
| 							"RxBytes":  Or(BeNil(), bounded(1*volume.Mb, 100*volume.Gb)), | ||||
| 							"RxErrors": Or(BeNil(), bounded(0, 100000)), | ||||
| 							"TxBytes":  Or(BeNil(), bounded(10*framework.Kb, 10*framework.Gb)), | ||||
| 							"TxBytes":  Or(BeNil(), bounded(10*volume.Kb, 10*volume.Gb)), | ||||
| 							"TxErrors": Or(BeNil(), bounded(0, 100000)), | ||||
| 						}), | ||||
| 						"Interfaces": Not(BeNil()), | ||||
| @@ -297,7 +298,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 						"AvailableBytes": fsCapacityBounds, | ||||
| 						"CapacityBytes":  fsCapacityBounds, | ||||
| 						// we assume we are not running tests on machines < 10tb of disk | ||||
| 						"UsedBytes":  bounded(framework.Kb, 10*framework.Tb), | ||||
| 						"UsedBytes":  bounded(volume.Kb, 10*volume.Tb), | ||||
| 						"InodesFree": bounded(1E4, 1E8), | ||||
| 						"Inodes":     bounded(1E4, 1E8), | ||||
| 						"InodesUsed": bounded(0, 1E8), | ||||
| @@ -308,7 +309,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { | ||||
| 							"AvailableBytes": fsCapacityBounds, | ||||
| 							"CapacityBytes":  fsCapacityBounds, | ||||
| 							// we assume we are not running tests on machines < 10tb of disk | ||||
| 							"UsedBytes":  bounded(framework.Kb, 10*framework.Tb), | ||||
| 							"UsedBytes":  bounded(volume.Kb, 10*volume.Tb), | ||||
| 							"InodesFree": bounded(1E4, 1E8), | ||||
| 							"Inodes":     bounded(1E4, 1E8), | ||||
| 							"InodesUsed": bounded(0, 1E8), | ||||
|   | ||||
		Reference in New Issue
	
	Block a user