mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
volume i/o tests for storage plugins
This commit is contained in:
parent
ebf24c14a9
commit
a113d8ac41
@ -44,11 +44,11 @@ package common
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// These tests need privileged containers, which are disabled by default. Run
|
// These tests need privileged containers, which are disabled by default. Run
|
||||||
@ -73,27 +73,15 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// NFS
|
// NFS
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
framework.KubeDescribe("NFSv4", func() {
|
framework.KubeDescribe("NFSv4", func() {
|
||||||
It("should be mountable for NFSv4 [sig-storage]", func() {
|
It("should be mountable for NFSv4 [sig-storage]", func() {
|
||||||
|
config, _, serverIP := framework.NewNFSServer(c, namespace.Name, []string{})
|
||||||
config := framework.VolumeTestConfig{
|
|
||||||
Namespace: namespace.Name,
|
|
||||||
Prefix: "nfs",
|
|
||||||
ServerImage: framework.NfsServerImage,
|
|
||||||
ServerPorts: []int{2049},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.VolumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
pod := framework.StartVolumeServer(c, config)
|
|
||||||
serverIP := pod.Status.PodIP
|
|
||||||
framework.Logf("NFS server IP address: %v", serverIP)
|
|
||||||
|
|
||||||
tests := []framework.VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
Volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
@ -115,21 +103,13 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||||||
|
|
||||||
framework.KubeDescribe("NFSv3", func() {
|
framework.KubeDescribe("NFSv3", func() {
|
||||||
It("should be mountable for NFSv3 [sig-storage]", func() {
|
It("should be mountable for NFSv3 [sig-storage]", func() {
|
||||||
config := framework.VolumeTestConfig{
|
config, _, serverIP := framework.NewNFSServer(c, namespace.Name, []string{})
|
||||||
Namespace: namespace.Name,
|
|
||||||
Prefix: "nfs",
|
|
||||||
ServerImage: framework.NfsServerImage,
|
|
||||||
ServerPorts: []int{2049},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.VolumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := framework.StartVolumeServer(c, config)
|
|
||||||
serverIP := pod.Status.PodIP
|
|
||||||
framework.Logf("NFS server IP address: %v", serverIP)
|
|
||||||
tests := []framework.VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
Volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
@ -151,70 +131,24 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// Gluster
|
// Gluster
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
framework.KubeDescribe("GlusterFS", func() {
|
framework.KubeDescribe("GlusterFS", func() {
|
||||||
It("should be mountable [sig-storage]", func() {
|
It("should be mountable [sig-storage]", func() {
|
||||||
config := framework.VolumeTestConfig{
|
// create gluster server and endpoints
|
||||||
Namespace: namespace.Name,
|
config, _, _ := framework.NewGlusterfsServer(c, namespace.Name)
|
||||||
Prefix: "gluster",
|
name := config.Prefix + "-server"
|
||||||
ServerImage: framework.GlusterfsServerImage,
|
|
||||||
ServerPorts: []int{24007, 24008, 49152},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.VolumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
|
err := c.Core().Endpoints(namespace.Name).Delete(name, nil)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
pod := framework.StartVolumeServer(c, config)
|
|
||||||
serverIP := pod.Status.PodIP
|
|
||||||
framework.Logf("Gluster server IP address: %v", serverIP)
|
|
||||||
|
|
||||||
// create Endpoints for the server
|
|
||||||
endpoints := v1.Endpoints{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "Endpoints",
|
|
||||||
APIVersion: "v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: config.Prefix + "-server",
|
|
||||||
},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{
|
|
||||||
{
|
|
||||||
IP: serverIP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Ports: []v1.EndpointPort{
|
|
||||||
{
|
|
||||||
Name: "gluster",
|
|
||||||
Port: 24007,
|
|
||||||
Protocol: v1.ProtocolTCP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
endClient := f.ClientSet.CoreV1().Endpoints(config.Namespace)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if clean {
|
|
||||||
endClient.Delete(config.Prefix+"-server", nil)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _, err := endClient.Create(&endpoints); err != nil {
|
|
||||||
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []framework.VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
Volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||||
EndpointsName: config.Prefix + "-server",
|
EndpointsName: name,
|
||||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||||
Path: "test_vol",
|
Path: "test_vol",
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
|
@ -64,6 +64,17 @@ const (
|
|||||||
BusyBoxImage string = "gcr.io/google_containers/busybox:1.24"
|
BusyBoxImage string = "gcr.io/google_containers/busybox:1.24"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Kb int64 = 1000
|
||||||
|
Mb int64 = 1000 * Kb
|
||||||
|
Gb int64 = 1000 * Mb
|
||||||
|
Tb int64 = 1000 * Gb
|
||||||
|
KiB int64 = 1024
|
||||||
|
MiB int64 = 1024 * KiB
|
||||||
|
GiB int64 = 1024 * MiB
|
||||||
|
TiB int64 = 1024 * GiB
|
||||||
|
)
|
||||||
|
|
||||||
// Configuration of one tests. The test consist of:
|
// Configuration of one tests. The test consist of:
|
||||||
// - server pod - runs serverImage, exports ports[]
|
// - server pod - runs serverImage, exports ports[]
|
||||||
// - client pod - does not need any special configuration
|
// - client pod - does not need any special configuration
|
||||||
@ -97,6 +108,106 @@ type VolumeTest struct {
|
|||||||
ExpectedContent string
|
ExpectedContent string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NFS-specific wrapper for CreateStorageServer.
|
||||||
|
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||||
|
config = VolumeTestConfig{
|
||||||
|
Namespace: namespace,
|
||||||
|
Prefix: "nfs",
|
||||||
|
ServerImage: NfsServerImage,
|
||||||
|
ServerPorts: []int{2049},
|
||||||
|
}
|
||||||
|
if len(args) > 0 {
|
||||||
|
config.ServerArgs = args
|
||||||
|
}
|
||||||
|
pod, ip = CreateStorageServer(cs, config)
|
||||||
|
return config, pod, ip
|
||||||
|
}
|
||||||
|
|
||||||
|
// GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
|
||||||
|
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||||
|
config = VolumeTestConfig{
|
||||||
|
Namespace: namespace,
|
||||||
|
Prefix: "gluster",
|
||||||
|
ServerImage: GlusterfsServerImage,
|
||||||
|
ServerPorts: []int{24007, 24008, 49152},
|
||||||
|
}
|
||||||
|
pod, ip = CreateStorageServer(cs, config)
|
||||||
|
|
||||||
|
By("creating Gluster endpoints")
|
||||||
|
endpoints := &v1.Endpoints{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Endpoints",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: config.Prefix + "-server",
|
||||||
|
},
|
||||||
|
Subsets: []v1.EndpointSubset{
|
||||||
|
{
|
||||||
|
Addresses: []v1.EndpointAddress{
|
||||||
|
{
|
||||||
|
IP: ip,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Ports: []v1.EndpointPort{
|
||||||
|
{
|
||||||
|
Name: "gluster",
|
||||||
|
Port: 24007,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
endpoints, err := cs.Core().Endpoints(namespace).Create(endpoints)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create endpoints for Gluster server")
|
||||||
|
|
||||||
|
return config, pod, ip
|
||||||
|
}
|
||||||
|
|
||||||
|
// iSCSI-specific wrapper for CreateStorageServer.
|
||||||
|
func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||||
|
config = VolumeTestConfig{
|
||||||
|
Namespace: namespace,
|
||||||
|
Prefix: "iscsi",
|
||||||
|
ServerImage: IscsiServerImage,
|
||||||
|
ServerPorts: []int{3260},
|
||||||
|
ServerVolumes: map[string]string{
|
||||||
|
// iSCSI container needs to insert modules from the host
|
||||||
|
"/lib/modules": "/lib/modules",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod, ip = CreateStorageServer(cs, config)
|
||||||
|
return config, pod, ip
|
||||||
|
}
|
||||||
|
|
||||||
|
// CephRBD-specific wrapper for CreateStorageServer.
|
||||||
|
func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||||
|
config = VolumeTestConfig{
|
||||||
|
Namespace: namespace,
|
||||||
|
Prefix: "rbd",
|
||||||
|
ServerImage: RbdServerImage,
|
||||||
|
ServerPorts: []int{6789},
|
||||||
|
ServerVolumes: map[string]string{
|
||||||
|
"/lib/modules": "/lib/modules",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod, ip = CreateStorageServer(cs, config)
|
||||||
|
return config, pod, ip
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||||
|
// and ip address string are returned.
|
||||||
|
// Note: Expect() is called so no error is returned.
|
||||||
|
func CreateStorageServer(cs clientset.Interface, config VolumeTestConfig) (pod *v1.Pod, ip string) {
|
||||||
|
pod = StartVolumeServer(cs, config)
|
||||||
|
Expect(pod).NotTo(BeNil(), "storage server pod should not be nil")
|
||||||
|
ip = pod.Status.PodIP
|
||||||
|
Expect(len(ip)).NotTo(BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
|
||||||
|
Logf("%s server pod IP address: %s", config.Prefix, ip)
|
||||||
|
return pod, ip
|
||||||
|
}
|
||||||
|
|
||||||
// Starts a container specified by config.serverImage and exports all
|
// Starts a container specified by config.serverImage and exports all
|
||||||
// config.serverPorts from it. The returned pod should be used to get the server
|
// config.serverPorts from it. The returned pod should be used to get the server
|
||||||
// IP address and create appropriate VolumeSource.
|
// IP address and create appropriate VolumeSource.
|
||||||
|
@ -130,19 +130,6 @@ func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRem
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calls startVolumeServer to create and run a nfs-server pod. Returns server pod and its
|
|
||||||
// ip address.
|
|
||||||
// Note: startVolumeServer() waits for the nfs-server pod to be Running and sleeps some
|
|
||||||
// so that the nfs server can start up.
|
|
||||||
func createNfsServerPod(c clientset.Interface, config framework.VolumeTestConfig) (*v1.Pod, string) {
|
|
||||||
pod := framework.StartVolumeServer(c, config)
|
|
||||||
Expect(pod).NotTo(BeNil())
|
|
||||||
ip := pod.Status.PodIP
|
|
||||||
Expect(len(ip)).NotTo(BeZero())
|
|
||||||
framework.Logf("NFS server IP address: %v", ip)
|
|
||||||
return pod, ip
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restart the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 1` command in the
|
// Restart the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 1` command in the
|
||||||
// pod's (only) container. This command changes the number of nfs server threads from
|
// pod's (only) container. This command changes the number of nfs server threads from
|
||||||
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
|
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
|
||||||
@ -431,14 +418,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
|||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||||
NFSconfig = framework.VolumeTestConfig{
|
NFSconfig, nfsServerPod, nfsIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||||
Namespace: ns,
|
|
||||||
Prefix: "nfs",
|
|
||||||
ServerImage: framework.NfsServerImage,
|
|
||||||
ServerPorts: []int{2049},
|
|
||||||
ServerArgs: []string{"-G", "777", "/exports"},
|
|
||||||
}
|
|
||||||
nfsServerPod, nfsIP = createNfsServerPod(c, NFSconfig)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
|
@ -20,6 +20,7 @@ go_library(
|
|||||||
"persistent_volumes-vsphere.go",
|
"persistent_volumes-vsphere.go",
|
||||||
"pv_reclaimpolicy.go",
|
"pv_reclaimpolicy.go",
|
||||||
"pvc_label_selector.go",
|
"pvc_label_selector.go",
|
||||||
|
"volume_io.go",
|
||||||
"volume_provisioning.go",
|
"volume_provisioning.go",
|
||||||
"volumes.go",
|
"volumes.go",
|
||||||
"vsphere_utils.go",
|
"vsphere_utils.go",
|
||||||
|
@ -53,7 +53,7 @@ const (
|
|||||||
var _ = SIGDescribe("EmptyDir wrapper volumes", func() {
|
var _ = SIGDescribe("EmptyDir wrapper volumes", func() {
|
||||||
f := framework.NewDefaultFramework("emptydir-wrapper")
|
f := framework.NewDefaultFramework("emptydir-wrapper")
|
||||||
|
|
||||||
It("should not conflict [Volume]", func() {
|
It("should not conflict", func() {
|
||||||
name := "emptydir-wrapper-test-" + string(uuid.NewUUID())
|
name := "emptydir-wrapper-test-" + string(uuid.NewUUID())
|
||||||
volumeName := "secret-volume"
|
volumeName := "secret-volume"
|
||||||
volumeMountPath := "/etc/secret-volume"
|
volumeMountPath := "/etc/secret-volume"
|
||||||
@ -152,7 +152,7 @@ var _ = SIGDescribe("EmptyDir wrapper volumes", func() {
|
|||||||
// but these cases are harder because tmpfs-based emptyDir
|
// but these cases are harder because tmpfs-based emptyDir
|
||||||
// appears to be less prone to the race problem.
|
// appears to be less prone to the race problem.
|
||||||
|
|
||||||
It("should not cause race condition when used for configmaps [Serial] [Slow] [Volume]", func() {
|
It("should not cause race condition when used for configmaps [Serial] [Slow]", func() {
|
||||||
configMapNames := createConfigmapsForRace(f)
|
configMapNames := createConfigmapsForRace(f)
|
||||||
defer deleteConfigMaps(f, configMapNames)
|
defer deleteConfigMaps(f, configMapNames)
|
||||||
volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
|
volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
|
||||||
@ -161,7 +161,7 @@ var _ = SIGDescribe("EmptyDir wrapper volumes", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should not cause race condition when used for git_repo [Serial] [Slow] [Volume]", func() {
|
It("should not cause race condition when used for git_repo [Serial] [Slow]", func() {
|
||||||
gitURL, gitRepo, cleanup := createGitServer(f)
|
gitURL, gitRepo, cleanup := createGitServer(f)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
|
volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
|
||||||
|
@ -60,6 +60,7 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
|
|||||||
volLabel labels.Set
|
volLabel labels.Set
|
||||||
selector *metav1.LabelSelector
|
selector *metav1.LabelSelector
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
// To protect the NFS volume pod from the kubelet restart, we isolate it on its own node.
|
// To protect the NFS volume pod from the kubelet restart, we isolate it on its own node.
|
||||||
framework.SkipUnlessNodeCountIsAtLeast(MinNodes)
|
framework.SkipUnlessNodeCountIsAtLeast(MinNodes)
|
||||||
@ -69,14 +70,8 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
|
|||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
|
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
|
||||||
selector = metav1.SetAsLabelSelector(volLabel)
|
selector = metav1.SetAsLabelSelector(volLabel)
|
||||||
|
|
||||||
// Start the NFS server pod.
|
// Start the NFS server pod.
|
||||||
framework.Logf("[BeforeEach] Creating NFS Server Pod")
|
_, nfsServerPod, nfsServerIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||||
nfsServerPod = initNFSserverPod(c, ns)
|
|
||||||
framework.Logf("NFS server Pod %q created on Node %q", nfsServerPod.Name, nfsServerPod.Spec.NodeName)
|
|
||||||
framework.Logf("[BeforeEach] Configuring PersistentVolume")
|
|
||||||
nfsServerIP = nfsServerPod.Status.PodIP
|
|
||||||
Expect(nfsServerIP).NotTo(BeEmpty())
|
|
||||||
nfsPVconfig = framework.PersistentVolumeConfig{
|
nfsPVconfig = framework.PersistentVolumeConfig{
|
||||||
NamePrefix: "nfs-",
|
NamePrefix: "nfs-",
|
||||||
Labels: volLabel,
|
Labels: volLabel,
|
||||||
@ -108,25 +103,29 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
|
|||||||
Expect(clientNodeIP).NotTo(BeEmpty())
|
Expect(clientNodeIP).NotTo(BeEmpty())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
framework.DeletePodWithWait(f, c, nfsServerPod)
|
framework.DeletePodWithWait(f, c, nfsServerPod)
|
||||||
})
|
})
|
||||||
Context("when kubelet restarts", func() {
|
|
||||||
|
|
||||||
|
Context("when kubelet restarts", func() {
|
||||||
var (
|
var (
|
||||||
clientPod *v1.Pod
|
clientPod *v1.Pod
|
||||||
pv *v1.PersistentVolume
|
pv *v1.PersistentVolume
|
||||||
pvc *v1.PersistentVolumeClaim
|
pvc *v1.PersistentVolumeClaim
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
framework.Logf("Initializing test spec")
|
framework.Logf("Initializing test spec")
|
||||||
clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name)
|
clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
framework.Logf("Tearing down test spec")
|
framework.Logf("Tearing down test spec")
|
||||||
tearDownTestCase(c, f, ns, clientPod, pvc, pv)
|
tearDownTestCase(c, f, ns, clientPod, pvc, pv)
|
||||||
pv, pvc, clientPod = nil, nil, nil
|
pv, pvc, clientPod = nil, nil, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
// Test table housing the It() title string and test spec. runTest is type testBody, defined at
|
// Test table housing the It() title string and test spec. runTest is type testBody, defined at
|
||||||
// the start of this file. To add tests, define a function mirroring the testBody signature and assign
|
// the start of this file. To add tests, define a function mirroring the testBody signature and assign
|
||||||
// to runTest.
|
// to runTest.
|
||||||
@ -140,6 +139,7 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
|
|||||||
runTest: testVolumeUnmountsFromDeletedPod,
|
runTest: testVolumeUnmountsFromDeletedPod,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test loop executes each disruptiveTest iteratively.
|
// Test loop executes each disruptiveTest iteratively.
|
||||||
for _, test := range disruptiveTestTable {
|
for _, test := range disruptiveTestTable {
|
||||||
func(t disruptiveTest) {
|
func(t disruptiveTest) {
|
||||||
|
@ -85,18 +85,6 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initNFSserverPod wraps volumes.go's startVolumeServer to return a running nfs host pod
|
|
||||||
// commonly used by persistent volume testing
|
|
||||||
func initNFSserverPod(c clientset.Interface, ns string) *v1.Pod {
|
|
||||||
return framework.StartVolumeServer(c, framework.VolumeTestConfig{
|
|
||||||
Namespace: ns,
|
|
||||||
Prefix: "nfs",
|
|
||||||
ServerImage: framework.NfsServerImage,
|
|
||||||
ServerPorts: []int{2049},
|
|
||||||
ServerArgs: []string{"-G", "777", "/exports"},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ = SIGDescribe("PersistentVolumes", func() {
|
var _ = SIGDescribe("PersistentVolumes", func() {
|
||||||
|
|
||||||
// global vars for the Context()s and It()'s below
|
// global vars for the Context()s and It()'s below
|
||||||
@ -131,10 +119,7 @@ var _ = SIGDescribe("PersistentVolumes", func() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
framework.Logf("[BeforeEach] Creating NFS Server Pod")
|
_, nfsServerPod, serverIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||||
nfsServerPod = initNFSserverPod(c, ns)
|
|
||||||
serverIP = nfsServerPod.Status.PodIP
|
|
||||||
framework.Logf("[BeforeEach] Configuring PersistentVolume")
|
|
||||||
pvConfig = framework.PersistentVolumeConfig{
|
pvConfig = framework.PersistentVolumeConfig{
|
||||||
NamePrefix: "nfs-",
|
NamePrefix: "nfs-",
|
||||||
Labels: volLabel,
|
Labels: volLabel,
|
||||||
@ -218,6 +203,10 @@ var _ = SIGDescribe("PersistentVolumes", func() {
|
|||||||
// a) pre-binding, b) create pvcs before pvs, c) create pvcs and pods
|
// a) pre-binding, b) create pvcs before pvs, c) create pvcs and pods
|
||||||
// in different namespaces.
|
// in different namespaces.
|
||||||
Context("with multiple PVs and PVCs all in same ns", func() {
|
Context("with multiple PVs and PVCs all in same ns", func() {
|
||||||
|
|
||||||
|
// define the maximum number of PVs and PVCs supported by these tests
|
||||||
|
const maxNumPVs = 10
|
||||||
|
const maxNumPVCs = 10
|
||||||
// scope the pv and pvc maps to be available in the AfterEach
|
// scope the pv and pvc maps to be available in the AfterEach
|
||||||
// note: these maps are created fresh in CreatePVsPVCs()
|
// note: these maps are created fresh in CreatePVsPVCs()
|
||||||
var pvols framework.PVMap
|
var pvols framework.PVMap
|
||||||
|
436
test/e2e/storage/volume_io.go
Normal file
436
test/e2e/storage/volume_io.go
Normal file
@ -0,0 +1,436 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This test checks that the plugin VolumeSources are working when pseudo-streaming
|
||||||
|
* various write sizes to mounted files. Note that the plugin is defined inline in
|
||||||
|
* the pod spec, not via a persistent volume and claim.
|
||||||
|
*
|
||||||
|
* These tests work only when privileged containers are allowed, exporting various
|
||||||
|
* filesystems (NFS, GlusterFS, ...) usually needs some mounting or other privileged
|
||||||
|
* magic in the server pod. Note that the server containers are for testing purposes
|
||||||
|
* only and should not be used in production.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
)
|
||||||
|
|
||||||
|
const minFileSize = 1 * framework.MiB
|
||||||
|
|
||||||
|
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
|
||||||
|
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
|
||||||
|
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
|
||||||
|
|
||||||
|
return &v1.Pod{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: config.Prefix + "-io-client",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"role": config.Prefix + "-io-client",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: config.Prefix + "-io-init",
|
||||||
|
Image: framework.BusyBoxImage,
|
||||||
|
Command: []string{
|
||||||
|
"/bin/sh",
|
||||||
|
"-c",
|
||||||
|
initCmd,
|
||||||
|
},
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: volName,
|
||||||
|
MountPath: dir,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: config.Prefix + "-io-client",
|
||||||
|
Image: framework.BusyBoxImage,
|
||||||
|
Command: []string{
|
||||||
|
"/bin/sh",
|
||||||
|
"-c",
|
||||||
|
"sleep 3600", // keep pod alive until explicitly deleted
|
||||||
|
},
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: volName,
|
||||||
|
MountPath: dir,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SecurityContext: podSecContext,
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: volName,
|
||||||
|
VolumeSource: volsrc,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write `fsize` bytes to `fpath` in the pod, using dd and the `dd_input` file.
|
||||||
|
func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error {
|
||||||
|
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
|
||||||
|
loopCnt := fsize / minFileSize
|
||||||
|
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, dd_input, minFileSize, fpath)
|
||||||
|
_, err := podExec(pod, writeCmd)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the test file is the expected size and contains the expected content.
|
||||||
|
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) error {
|
||||||
|
By("verifying file size")
|
||||||
|
rtnstr, err := podExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
|
||||||
|
if err != nil || rtnstr == "" {
|
||||||
|
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
|
||||||
|
}
|
||||||
|
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
|
||||||
|
}
|
||||||
|
if int64(size) != expectSize {
|
||||||
|
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
By("verifying file content")
|
||||||
|
// use `grep ... -f` rather than the expected content in a variable to reduce logging
|
||||||
|
rtnstr, err = podExec(pod, fmt.Sprintf("grep -c -m1 -f %s %s", dd_input, fpath))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to test file content via `grep %s`: %v", fpath, err)
|
||||||
|
}
|
||||||
|
foundCnt, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
|
||||||
|
}
|
||||||
|
if foundCnt == 0 {
|
||||||
|
rtnstr, err = podExec(pod, fmt.Sprintf("cat %s", dd_input))
|
||||||
|
if err != nil || len(rtnstr) == 0 {
|
||||||
|
return fmt.Errorf("string not found in file %s and unable to read dd's input file %s: %v", fpath, dd_input, err)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("string %q not found in file %s", rtnstr, fpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
|
||||||
|
func deleteFile(pod *v1.Pod, fpath string) {
|
||||||
|
By(fmt.Sprintf("deleting test file %s...", fpath))
|
||||||
|
_, err := podExec(pod, fmt.Sprintf("rm -f %s", fpath))
|
||||||
|
if err != nil {
|
||||||
|
// keep going, the test dir will be deleted when the volume is unmounted
|
||||||
|
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the
|
||||||
|
// client pod and the new files when done.
|
||||||
|
// Note: the file name is appended to "/opt/<Prefix>/<namespace>", eg. "/opt/nfs/e2e-.../<file>".
|
||||||
|
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
|
||||||
|
// Note: `fsizes` values are enforced to each be at least `minFileSize` and a multiple of `minFileSize`
|
||||||
|
// bytes.
|
||||||
|
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
|
||||||
|
dir := path.Join("/opt", config.Prefix, config.Namespace)
|
||||||
|
dd_input := path.Join(dir, "dd_if")
|
||||||
|
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
|
||||||
|
loopCnt := minFileSize / int64(len(writeBlk))
|
||||||
|
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
|
||||||
|
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
|
||||||
|
// used to create a 1MiB file in the target directory.
|
||||||
|
initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, dd_input)
|
||||||
|
|
||||||
|
clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext)
|
||||||
|
|
||||||
|
By(fmt.Sprintf("starting %s", clientPod.Name))
|
||||||
|
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
|
||||||
|
clientPod, err = podsNamespacer.Create(clientPod)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
// note the test dir will be removed when the kubelet unmounts it
|
||||||
|
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
|
||||||
|
e := framework.DeletePodWithWait(f, cs, clientPod)
|
||||||
|
if e != nil {
|
||||||
|
framework.Logf("client pod failed to delete: %v", e)
|
||||||
|
if err == nil { // delete err is returned if err is not set
|
||||||
|
err = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create files of the passed-in file sizes and verify test file size and content
|
||||||
|
for _, fsize := range fsizes {
|
||||||
|
// file sizes must be a multiple of `minFileSize`
|
||||||
|
if math.Mod(float64(fsize), float64(minFileSize)) != 0 {
|
||||||
|
fsize = fsize/minFileSize + minFileSize
|
||||||
|
}
|
||||||
|
fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize))
|
||||||
|
if err = writeToFile(clientPod, fpath, dd_input, fsize); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = verifyFile(clientPod, fpath, fsize, dd_input); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
deleteFile(clientPod, fpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// These tests need privileged containers which are disabled by default.
|
||||||
|
// TODO: support all of the plugins tested in storage/volumes.go
|
||||||
|
var _ = SIGDescribe("Volume plugin streaming [Slow]", func() {
|
||||||
|
f := framework.NewDefaultFramework("volume-io")
|
||||||
|
var (
|
||||||
|
config framework.VolumeTestConfig
|
||||||
|
cs clientset.Interface
|
||||||
|
ns string
|
||||||
|
serverIP string
|
||||||
|
serverPod *v1.Pod
|
||||||
|
volSource v1.VolumeSource
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
cs = f.ClientSet
|
||||||
|
ns = f.Namespace.Name
|
||||||
|
})
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
// NFS
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
SIGDescribe("NFS", func() {
|
||||||
|
testFile := "nfs_io_test"
|
||||||
|
// client pod uses selinux
|
||||||
|
podSec := v1.PodSecurityContext{
|
||||||
|
SELinuxOptions: &v1.SELinuxOptions{
|
||||||
|
Level: "s0:c0,c1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
config, serverPod, serverIP = framework.NewNFSServer(cs, ns, []string{})
|
||||||
|
volSource = v1.VolumeSource{
|
||||||
|
NFS: &v1.NFSVolumeSource{
|
||||||
|
Server: serverIP,
|
||||||
|
Path: "/",
|
||||||
|
ReadOnly: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
framework.Logf("AfterEach: deleting NFS server pod %q...", serverPod.Name)
|
||||||
|
err := framework.DeletePodWithWait(f, cs, serverPod)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "AfterEach: NFS server pod failed to delete")
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should write files of various sizes, verify size, validate content", func() {
|
||||||
|
fileSizes := []int64{1 * framework.MiB, 100 * framework.MiB, 1 * framework.GiB}
|
||||||
|
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
// Gluster
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
SIGDescribe("GlusterFS", func() {
|
||||||
|
var name string
|
||||||
|
testFile := "gluster_io_test"
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
framework.SkipUnlessNodeOSDistroIs("gci")
|
||||||
|
// create gluster server and endpoints
|
||||||
|
config, serverPod, serverIP = framework.NewGlusterfsServer(cs, ns)
|
||||||
|
name = config.Prefix + "-server"
|
||||||
|
volSource = v1.VolumeSource{
|
||||||
|
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||||
|
EndpointsName: name,
|
||||||
|
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||||
|
Path: "test_vol",
|
||||||
|
ReadOnly: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
framework.Logf("AfterEach: deleting Gluster endpoints %q...", name)
|
||||||
|
epErr := cs.Core().Endpoints(ns).Delete(name, nil)
|
||||||
|
framework.Logf("AfterEach: deleting Gluster server pod %q...", serverPod.Name)
|
||||||
|
err := framework.DeletePodWithWait(f, cs, serverPod)
|
||||||
|
if epErr != nil || err != nil {
|
||||||
|
if epErr != nil {
|
||||||
|
framework.Logf("AfterEach: Gluster delete endpoints failed: %v", err)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
framework.Logf("AfterEach: Gluster server pod delete failed: %v", err)
|
||||||
|
}
|
||||||
|
framework.Failf("AfterEach: cleanup failed")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should write files of various sizes, verify size, validate content", func() {
|
||||||
|
fileSizes := []int64{1 * framework.MiB, 100 * framework.MiB}
|
||||||
|
err := testVolumeIO(f, cs, config, volSource, nil /*no secContext*/, testFile, fileSizes)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
// iSCSI
|
||||||
|
// The iscsiadm utility and iscsi target kernel modules must be installed on all nodes.
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
SIGDescribe("iSCSI [Feature:Volumes]", func() {
|
||||||
|
testFile := "iscsi_io_test"
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
config, serverPod, serverIP = framework.NewISCSIServer(cs, ns)
|
||||||
|
volSource = v1.VolumeSource{
|
||||||
|
ISCSI: &v1.ISCSIVolumeSource{
|
||||||
|
TargetPortal: serverIP + ":3260",
|
||||||
|
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
|
||||||
|
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
|
||||||
|
Lun: 0,
|
||||||
|
FSType: "ext2",
|
||||||
|
ReadOnly: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
framework.Logf("AfterEach: deleting iSCSI server pod %q...", serverPod.Name)
|
||||||
|
err := framework.DeletePodWithWait(f, cs, serverPod)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "AfterEach: iSCSI server pod failed to delete")
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should write files of various sizes, verify size, validate content", func() {
|
||||||
|
fileSizes := []int64{1 * framework.MiB, 100 * framework.MiB}
|
||||||
|
fsGroup := int64(1234)
|
||||||
|
podSec := v1.PodSecurityContext{
|
||||||
|
FSGroup: &fsGroup,
|
||||||
|
}
|
||||||
|
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
// Ceph RBD
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
SIGDescribe("Ceph-RBD [Feature:Volumes]", func() {
|
||||||
|
var (
|
||||||
|
secret *v1.Secret
|
||||||
|
name string
|
||||||
|
)
|
||||||
|
testFile := "ceph-rbd_io_test"
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
config, serverPod, serverIP = framework.NewRBDServer(cs, ns)
|
||||||
|
name = config.Prefix + "-server"
|
||||||
|
|
||||||
|
// create server secret
|
||||||
|
secret = &v1.Secret{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Secret",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Data: map[string][]byte{
|
||||||
|
// from test/images/volumes-tester/rbd/keyring
|
||||||
|
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
|
||||||
|
},
|
||||||
|
Type: "kubernetes.io/rbd",
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
secret, err = cs.Core().Secrets(ns).Create(secret)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("BeforeEach: failed to create secret %q for Ceph-RBD: %v", name, err))
|
||||||
|
|
||||||
|
volSource = v1.VolumeSource{
|
||||||
|
RBD: &v1.RBDVolumeSource{
|
||||||
|
CephMonitors: []string{serverIP},
|
||||||
|
RBDPool: "rbd",
|
||||||
|
RBDImage: "foo",
|
||||||
|
RadosUser: "admin",
|
||||||
|
SecretRef: &v1.LocalObjectReference{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
FSType: "ext2",
|
||||||
|
ReadOnly: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", name)
|
||||||
|
secErr := cs.Core().Secrets(ns).Delete(name, &metav1.DeleteOptions{})
|
||||||
|
framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name)
|
||||||
|
err := framework.DeletePodWithWait(f, cs, serverPod)
|
||||||
|
if secErr != nil || err != nil {
|
||||||
|
if secErr != nil {
|
||||||
|
framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", err)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err)
|
||||||
|
}
|
||||||
|
framework.Failf("AfterEach: cleanup failed")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should write files of various sizes, verify size, validate content", func() {
|
||||||
|
fileSizes := []int64{1 * framework.MiB, 100 * framework.MiB}
|
||||||
|
fsGroup := int64(1234)
|
||||||
|
podSec := v1.PodSecurityContext{
|
||||||
|
FSGroup: &fsGroup,
|
||||||
|
}
|
||||||
|
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
@ -85,7 +85,7 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
// If 'false', the test won't clear its volumes upon completion. Useful for debugging,
|
// If 'false', the test won't clear its volumes upon completion. Useful for debugging,
|
||||||
// note that namespace deletion is handled by delete-namespace flag
|
// note that namespace deletion is handled by delete-namespace flag
|
||||||
clean := true
|
clean := true
|
||||||
// filled in BeforeEach
|
// filled inside BeforeEach
|
||||||
var cs clientset.Interface
|
var cs clientset.Interface
|
||||||
var namespace *v1.Namespace
|
var namespace *v1.Namespace
|
||||||
|
|
||||||
@ -100,21 +100,12 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
|
|
||||||
SIGDescribe("NFS", func() {
|
SIGDescribe("NFS", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := framework.VolumeTestConfig{
|
config, _, serverIP := framework.NewNFSServer(cs, namespace.Name, []string{})
|
||||||
Namespace: namespace.Name,
|
|
||||||
Prefix: "nfs",
|
|
||||||
ServerImage: framework.NfsServerImage,
|
|
||||||
ServerPorts: []int{2049},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.VolumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := framework.StartVolumeServer(cs, config)
|
|
||||||
serverIP := pod.Status.PodIP
|
|
||||||
framework.Logf("NFS server IP address: %v", serverIP)
|
|
||||||
|
|
||||||
tests := []framework.VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
@ -138,71 +129,26 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
// Gluster
|
// Gluster
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
SIGDescribe("GlusterFS [Feature:Volumes]", func() {
|
SIGDescribe("GlusterFS", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
//TODO (copejon) GFS is not supported on debian image.
|
//TODO (copejon) GFS is not supported on debian image.
|
||||||
framework.SkipUnlessNodeOSDistroIs("gci")
|
framework.SkipUnlessNodeOSDistroIs("gci")
|
||||||
|
// create gluster server and endpoints
|
||||||
config := framework.VolumeTestConfig{
|
config, _, _ := framework.NewGlusterfsServer(cs, namespace.Name)
|
||||||
Namespace: namespace.Name,
|
name := config.Prefix + "-server"
|
||||||
Prefix: "gluster",
|
|
||||||
ServerImage: framework.GlusterfsServerImage,
|
|
||||||
ServerPorts: []int{24007, 24008, 49152},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.VolumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
|
err := cs.Core().Endpoints(namespace.Name).Delete(name, nil)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := framework.StartVolumeServer(cs, config)
|
|
||||||
serverIP := pod.Status.PodIP
|
|
||||||
framework.Logf("Gluster server IP address: %v", serverIP)
|
|
||||||
|
|
||||||
// create Endpoints for the server
|
|
||||||
endpoints := v1.Endpoints{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "Endpoints",
|
|
||||||
APIVersion: "v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: config.Prefix + "-server",
|
|
||||||
},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{
|
|
||||||
{
|
|
||||||
IP: serverIP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Ports: []v1.EndpointPort{
|
|
||||||
{
|
|
||||||
Name: "gluster",
|
|
||||||
Port: 24007,
|
|
||||||
Protocol: v1.ProtocolTCP,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
endClient := cs.Core().Endpoints(config.Namespace)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if clean {
|
|
||||||
endClient.Delete(config.Prefix+"-server", nil)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _, err := endClient.Create(&endpoints); err != nil {
|
|
||||||
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []framework.VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
Volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||||
EndpointsName: config.Prefix + "-server",
|
EndpointsName: name,
|
||||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||||
Path: "test_vol",
|
Path: "test_vol",
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
@ -228,25 +174,12 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
|
|
||||||
SIGDescribe("iSCSI [Feature:Volumes]", func() {
|
SIGDescribe("iSCSI [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := framework.VolumeTestConfig{
|
config, _, serverIP := framework.NewISCSIServer(cs, namespace.Name)
|
||||||
Namespace: namespace.Name,
|
|
||||||
Prefix: "iscsi",
|
|
||||||
ServerImage: framework.IscsiServerImage,
|
|
||||||
ServerPorts: []int{3260},
|
|
||||||
ServerVolumes: map[string]string{
|
|
||||||
// iSCSI container needs to insert modules from the host
|
|
||||||
"/lib/modules": "/lib/modules",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.VolumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := framework.StartVolumeServer(cs, config)
|
|
||||||
serverIP := pod.Status.PodIP
|
|
||||||
framework.Logf("iSCSI server IP address: %v", serverIP)
|
|
||||||
|
|
||||||
tests := []framework.VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
@ -275,26 +208,12 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
|
|
||||||
SIGDescribe("Ceph RBD [Feature:Volumes]", func() {
|
SIGDescribe("Ceph RBD [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := framework.VolumeTestConfig{
|
config, _, serverIP := framework.NewRBDServer(cs, namespace.Name)
|
||||||
Namespace: namespace.Name,
|
|
||||||
Prefix: "rbd",
|
|
||||||
ServerImage: framework.RbdServerImage,
|
|
||||||
ServerPorts: []int{6789},
|
|
||||||
ServerVolumes: map[string]string{
|
|
||||||
// iSCSI container needs to insert modules from the host
|
|
||||||
"/lib/modules": "/lib/modules",
|
|
||||||
"/sys": "/sys",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.VolumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := framework.StartVolumeServer(cs, config)
|
|
||||||
serverIP := pod.Status.PodIP
|
|
||||||
framework.Logf("Ceph server IP address: %v", serverIP)
|
|
||||||
|
|
||||||
// create secrets for the server
|
// create secrets for the server
|
||||||
secret := v1.Secret{
|
secret := v1.Secret{
|
||||||
@ -347,10 +266,10 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// Ceph
|
// Ceph
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
SIGDescribe("CephFS [Feature:Volumes]", func() {
|
SIGDescribe("CephFS [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := framework.VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
@ -365,9 +284,7 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
framework.VolumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := framework.StartVolumeServer(cs, config)
|
_, serverIP := framework.CreateStorageServer(cs, config)
|
||||||
serverIP := pod.Status.PodIP
|
|
||||||
framework.Logf("Ceph server IP address: %v", serverIP)
|
|
||||||
By("sleeping a bit to give ceph server time to initialize")
|
By("sleeping a bit to give ceph server time to initialize")
|
||||||
time.Sleep(20 * time.Second)
|
time.Sleep(20 * time.Second)
|
||||||
|
|
||||||
@ -428,7 +345,6 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
|
// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
|
||||||
// and that the usual OpenStack authentication env. variables are set
|
// and that the usual OpenStack authentication env. variables are set
|
||||||
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
|
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
|
||||||
|
|
||||||
SIGDescribe("Cinder [Feature:Volumes]", func() {
|
SIGDescribe("Cinder [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
framework.SkipUnlessProviderIs("openstack")
|
framework.SkipUnlessProviderIs("openstack")
|
||||||
@ -504,7 +420,6 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// GCE PD
|
// GCE PD
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
SIGDescribe("PD", func() {
|
SIGDescribe("PD", func() {
|
||||||
// Flaky issue: #43977
|
// Flaky issue: #43977
|
||||||
It("should be mountable [Flaky]", func() {
|
It("should be mountable [Flaky]", func() {
|
||||||
@ -558,7 +473,6 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// ConfigMap
|
// ConfigMap
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
SIGDescribe("ConfigMap", func() {
|
SIGDescribe("ConfigMap", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := framework.VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
@ -636,7 +550,6 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// vSphere
|
// vSphere
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
SIGDescribe("vsphere [Feature:Volumes]", func() {
|
SIGDescribe("vsphere [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
framework.SkipUnlessProviderIs("vsphere")
|
framework.SkipUnlessProviderIs("vsphere")
|
||||||
@ -686,6 +599,7 @@ var _ = SIGDescribe("Volumes", func() {
|
|||||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// Azure Disk
|
// Azure Disk
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -72,15 +72,10 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
|
|
||||||
// Setup expectations.
|
// Setup expectations.
|
||||||
const (
|
const (
|
||||||
kb int64 = 1000
|
|
||||||
mb int64 = 1000 * kb
|
|
||||||
gb int64 = 1000 * mb
|
|
||||||
tb int64 = 1000 * gb
|
|
||||||
|
|
||||||
maxStartAge = time.Hour * 24 * 365 // 1 year
|
maxStartAge = time.Hour * 24 * 365 // 1 year
|
||||||
maxStatsAge = time.Minute
|
maxStatsAge = time.Minute
|
||||||
)
|
)
|
||||||
fsCapacityBounds := bounded(100*mb, 100*gb)
|
fsCapacityBounds := bounded(100*framework.Mb, 100*framework.Gb)
|
||||||
// Expectations for system containers.
|
// Expectations for system containers.
|
||||||
sysContExpectations := func() types.GomegaMatcher {
|
sysContExpectations := func() types.GomegaMatcher {
|
||||||
return gstruct.MatchAllFields(gstruct.Fields{
|
return gstruct.MatchAllFields(gstruct.Fields{
|
||||||
@ -95,8 +90,8 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
// We don't limit system container memory.
|
// We don't limit system container memory.
|
||||||
"AvailableBytes": BeNil(),
|
"AvailableBytes": BeNil(),
|
||||||
"UsageBytes": bounded(1*mb, 10*gb),
|
"UsageBytes": bounded(1*framework.Mb, 10*framework.Gb),
|
||||||
"WorkingSetBytes": bounded(1*mb, 10*gb),
|
"WorkingSetBytes": bounded(1*framework.Mb, 10*framework.Gb),
|
||||||
// today, this returns the value reported
|
// today, this returns the value reported
|
||||||
// in /sys/fs/cgroup/memory.stat for rss
|
// in /sys/fs/cgroup/memory.stat for rss
|
||||||
// this value should really return /sys/fs/cgroup/memory.stat total_rss
|
// this value should really return /sys/fs/cgroup/memory.stat total_rss
|
||||||
@ -104,7 +99,7 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
// for now, i am updating the bounding box to the value as coded, but the
|
// for now, i am updating the bounding box to the value as coded, but the
|
||||||
// value reported needs to change.
|
// value reported needs to change.
|
||||||
// rss only makes sense if you are leaf cgroup
|
// rss only makes sense if you are leaf cgroup
|
||||||
"RSSBytes": bounded(0, 1*gb),
|
"RSSBytes": bounded(0, 1*framework.Gb),
|
||||||
"PageFaults": bounded(1000, 1E9),
|
"PageFaults": bounded(1000, 1E9),
|
||||||
"MajorPageFaults": bounded(0, 100000),
|
"MajorPageFaults": bounded(0, 100000),
|
||||||
}),
|
}),
|
||||||
@ -126,9 +121,9 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
// We don't limit system container memory.
|
// We don't limit system container memory.
|
||||||
"AvailableBytes": BeNil(),
|
"AvailableBytes": BeNil(),
|
||||||
"UsageBytes": bounded(100*kb, 10*gb),
|
"UsageBytes": bounded(100*framework.Kb, 10*framework.Gb),
|
||||||
"WorkingSetBytes": bounded(100*kb, 10*gb),
|
"WorkingSetBytes": bounded(100*framework.Kb, 10*framework.Gb),
|
||||||
"RSSBytes": bounded(100*kb, 1*gb),
|
"RSSBytes": bounded(100*framework.Kb, 1*framework.Gb),
|
||||||
"PageFaults": bounded(1000, 1E9),
|
"PageFaults": bounded(1000, 1E9),
|
||||||
"MajorPageFaults": bounded(0, 100000),
|
"MajorPageFaults": bounded(0, 100000),
|
||||||
})
|
})
|
||||||
@ -149,10 +144,10 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
}),
|
}),
|
||||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"AvailableBytes": bounded(10*kb, 10*mb),
|
"AvailableBytes": bounded(1*framework.Kb, 10*framework.Mb),
|
||||||
"UsageBytes": bounded(10*kb, 20*mb),
|
"UsageBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||||
"WorkingSetBytes": bounded(10*kb, 20*mb),
|
"WorkingSetBytes": bounded(10*framework.Kb, 20*framework.Mb),
|
||||||
"RSSBytes": bounded(1*kb, mb),
|
"RSSBytes": bounded(1*framework.Kb, framework.Mb),
|
||||||
"PageFaults": bounded(100, 1000000),
|
"PageFaults": bounded(100, 1000000),
|
||||||
"MajorPageFaults": bounded(0, 10),
|
"MajorPageFaults": bounded(0, 10),
|
||||||
}),
|
}),
|
||||||
@ -160,7 +155,7 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"AvailableBytes": fsCapacityBounds,
|
"AvailableBytes": fsCapacityBounds,
|
||||||
"CapacityBytes": fsCapacityBounds,
|
"CapacityBytes": fsCapacityBounds,
|
||||||
"UsedBytes": bounded(kb, 10*mb),
|
"UsedBytes": bounded(framework.Kb, 10*framework.Mb),
|
||||||
"InodesFree": bounded(1E4, 1E8),
|
"InodesFree": bounded(1E4, 1E8),
|
||||||
"Inodes": bounded(1E4, 1E8),
|
"Inodes": bounded(1E4, 1E8),
|
||||||
"InodesUsed": bounded(0, 1E8),
|
"InodesUsed": bounded(0, 1E8),
|
||||||
@ -169,7 +164,7 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"AvailableBytes": fsCapacityBounds,
|
"AvailableBytes": fsCapacityBounds,
|
||||||
"CapacityBytes": fsCapacityBounds,
|
"CapacityBytes": fsCapacityBounds,
|
||||||
"UsedBytes": bounded(kb, 10*mb),
|
"UsedBytes": bounded(framework.Kb, 10*framework.Mb),
|
||||||
"InodesFree": bounded(1E4, 1E8),
|
"InodesFree": bounded(1E4, 1E8),
|
||||||
"Inodes": bounded(1E4, 1E8),
|
"Inodes": bounded(1E4, 1E8),
|
||||||
"InodesUsed": bounded(0, 1E8),
|
"InodesUsed": bounded(0, 1E8),
|
||||||
@ -179,9 +174,9 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
}),
|
}),
|
||||||
"Network": ptrMatchAllFields(gstruct.Fields{
|
"Network": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"RxBytes": bounded(10, 10*mb),
|
"RxBytes": bounded(10, 10*framework.Mb),
|
||||||
"RxErrors": bounded(0, 1000),
|
"RxErrors": bounded(0, 1000),
|
||||||
"TxBytes": bounded(10, 10*mb),
|
"TxBytes": bounded(10, 10*framework.Mb),
|
||||||
"TxErrors": bounded(0, 1000),
|
"TxErrors": bounded(0, 1000),
|
||||||
}),
|
}),
|
||||||
"VolumeStats": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
|
"VolumeStats": gstruct.MatchAllElements(summaryObjectID, gstruct.Elements{
|
||||||
@ -191,7 +186,7 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"AvailableBytes": fsCapacityBounds,
|
"AvailableBytes": fsCapacityBounds,
|
||||||
"CapacityBytes": fsCapacityBounds,
|
"CapacityBytes": fsCapacityBounds,
|
||||||
"UsedBytes": bounded(kb, 1*mb),
|
"UsedBytes": bounded(framework.Kb, 1*framework.Mb),
|
||||||
"InodesFree": bounded(1E4, 1E8),
|
"InodesFree": bounded(1E4, 1E8),
|
||||||
"Inodes": bounded(1E4, 1E8),
|
"Inodes": bounded(1E4, 1E8),
|
||||||
"InodesUsed": bounded(0, 1E8),
|
"InodesUsed": bounded(0, 1E8),
|
||||||
@ -211,9 +206,9 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
}),
|
}),
|
||||||
"Memory": ptrMatchAllFields(gstruct.Fields{
|
"Memory": ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"AvailableBytes": bounded(100*mb, 100*gb),
|
"AvailableBytes": bounded(100*framework.Mb, 100*framework.Gb),
|
||||||
"UsageBytes": bounded(10*mb, 10*gb),
|
"UsageBytes": bounded(10*framework.Mb, 10*framework.Gb),
|
||||||
"WorkingSetBytes": bounded(10*mb, 10*gb),
|
"WorkingSetBytes": bounded(10*framework.Mb, 10*framework.Gb),
|
||||||
// today, this returns the value reported
|
// today, this returns the value reported
|
||||||
// in /sys/fs/cgroup/memory.stat for rss
|
// in /sys/fs/cgroup/memory.stat for rss
|
||||||
// this value should really return /sys/fs/cgroup/memory.stat total_rss
|
// this value should really return /sys/fs/cgroup/memory.stat total_rss
|
||||||
@ -221,16 +216,16 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
// for now, i am updating the bounding box to the value as coded, but the
|
// for now, i am updating the bounding box to the value as coded, but the
|
||||||
// value reported needs to change.
|
// value reported needs to change.
|
||||||
// rss only makes sense if you are leaf cgroup
|
// rss only makes sense if you are leaf cgroup
|
||||||
"RSSBytes": bounded(0, 1*gb),
|
"RSSBytes": bounded(0, 1*framework.Gb),
|
||||||
"PageFaults": bounded(1000, 1E9),
|
"PageFaults": bounded(1000, 1E9),
|
||||||
"MajorPageFaults": bounded(0, 100000),
|
"MajorPageFaults": bounded(0, 100000),
|
||||||
}),
|
}),
|
||||||
// TODO(#28407): Handle non-eth0 network interface names.
|
// TODO(#28407): Handle non-eth0 network interface names.
|
||||||
"Network": Or(BeNil(), ptrMatchAllFields(gstruct.Fields{
|
"Network": Or(BeNil(), ptrMatchAllFields(gstruct.Fields{
|
||||||
"Time": recent(maxStatsAge),
|
"Time": recent(maxStatsAge),
|
||||||
"RxBytes": bounded(1*mb, 100*gb),
|
"RxBytes": bounded(1*framework.Mb, 100*framework.Gb),
|
||||||
"RxErrors": bounded(0, 100000),
|
"RxErrors": bounded(0, 100000),
|
||||||
"TxBytes": bounded(10*kb, 10*gb),
|
"TxBytes": bounded(10*framework.Kb, 10*framework.Gb),
|
||||||
"TxErrors": bounded(0, 100000),
|
"TxErrors": bounded(0, 100000),
|
||||||
})),
|
})),
|
||||||
"Fs": ptrMatchAllFields(gstruct.Fields{
|
"Fs": ptrMatchAllFields(gstruct.Fields{
|
||||||
@ -238,7 +233,7 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
"AvailableBytes": fsCapacityBounds,
|
"AvailableBytes": fsCapacityBounds,
|
||||||
"CapacityBytes": fsCapacityBounds,
|
"CapacityBytes": fsCapacityBounds,
|
||||||
// we assume we are not running tests on machines < 10tb of disk
|
// we assume we are not running tests on machines < 10tb of disk
|
||||||
"UsedBytes": bounded(kb, 10*tb),
|
"UsedBytes": bounded(framework.Kb, 10*framework.Tb),
|
||||||
"InodesFree": bounded(1E4, 1E8),
|
"InodesFree": bounded(1E4, 1E8),
|
||||||
"Inodes": bounded(1E4, 1E8),
|
"Inodes": bounded(1E4, 1E8),
|
||||||
"InodesUsed": bounded(0, 1E8),
|
"InodesUsed": bounded(0, 1E8),
|
||||||
@ -249,7 +244,7 @@ var _ = framework.KubeDescribe("Summary API", func() {
|
|||||||
"AvailableBytes": fsCapacityBounds,
|
"AvailableBytes": fsCapacityBounds,
|
||||||
"CapacityBytes": fsCapacityBounds,
|
"CapacityBytes": fsCapacityBounds,
|
||||||
// we assume we are not running tests on machines < 10tb of disk
|
// we assume we are not running tests on machines < 10tb of disk
|
||||||
"UsedBytes": bounded(kb, 10*tb),
|
"UsedBytes": bounded(framework.Kb, 10*framework.Tb),
|
||||||
"InodesFree": bounded(1E4, 1E8),
|
"InodesFree": bounded(1E4, 1E8),
|
||||||
"Inodes": bounded(1E4, 1E8),
|
"Inodes": bounded(1E4, 1E8),
|
||||||
"InodesUsed": bounded(0, 1E8),
|
"InodesUsed": bounded(0, 1E8),
|
||||||
|
Loading…
Reference in New Issue
Block a user