Merge pull request #78916 from mkimuram/rem-dot-import

Remove . import from e2e test
This commit is contained in:
Kubernetes Prow Robot 2019-06-15 16:34:08 -07:00 committed by GitHub
commit c5087f25c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 424 additions and 424 deletions

View File

@ -27,7 +27,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/gomega"
"github.com/onsi/gomega"
)
const (
@ -139,7 +139,7 @@ done`, testCmd)
// Verify Pod affinity colocated the Pods.
loader := getRunningLoaderPod(f)
Expect(pod.Spec.NodeName).To(Equal(loader.Spec.NodeName))
gomega.Expect(pod.Spec.NodeName).To(gomega.Equal(loader.Spec.NodeName))
return pod
}

View File

@ -37,7 +37,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework/replicaset"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
scaleclient "k8s.io/client-go/scale"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -188,7 +188,7 @@ func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
}
func (rc *ResourceConsumer) makeConsumeCPURequests() {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
rc.stopWaitGroup.Add(1)
defer rc.stopWaitGroup.Done()
sleepTime := time.Duration(0)
@ -209,7 +209,7 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() {
}
func (rc *ResourceConsumer) makeConsumeMemRequests() {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
rc.stopWaitGroup.Add(1)
defer rc.stopWaitGroup.Done()
sleepTime := time.Duration(0)
@ -230,7 +230,7 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() {
}
func (rc *ResourceConsumer) makeConsumeCustomMetric() {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
rc.stopWaitGroup.Add(1)
defer rc.stopWaitGroup.Done()
sleepTime := time.Duration(0)
@ -406,7 +406,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, max
// Pause stops background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Pause() {
By(fmt.Sprintf("HPA pausing RC %s", rc.name))
ginkgo.By(fmt.Sprintf("HPA pausing RC %s", rc.name))
rc.stopCPU <- 0
rc.stopMem <- 0
rc.stopCustomMetric <- 0
@ -415,14 +415,14 @@ func (rc *ResourceConsumer) Pause() {
// Pause starts background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Resume() {
By(fmt.Sprintf("HPA resuming RC %s", rc.name))
ginkgo.By(fmt.Sprintf("HPA resuming RC %s", rc.name))
go rc.makeConsumeCPURequests()
go rc.makeConsumeMemRequests()
go rc.makeConsumeCustomMetric()
}
func (rc *ResourceConsumer) CleanUp() {
By(fmt.Sprintf("Removing consuming RC %s", rc.name))
ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name))
close(rc.stopCPU)
close(rc.stopMem)
close(rc.stopCustomMetric)
@ -437,7 +437,7 @@ func (rc *ResourceConsumer) CleanUp() {
}
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) {
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -484,14 +484,14 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
rsConfig := testutils.ReplicaSetConfig{
RCConfig: rcConfig,
}
By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig))
break
default:
framework.Failf(invalidKind)
}
By(fmt.Sprintf("Running controller"))
ginkgo.By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl"
_, err = c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{

View File

@ -25,10 +25,10 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
var _ = Describe("[sig-node] ConfigMap", func() {
var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
@ -39,7 +39,7 @@ var _ = Describe("[sig-node] ConfigMap", func() {
framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -87,7 +87,7 @@ var _ = Describe("[sig-node] ConfigMap", func() {
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newEnvFromConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -161,6 +161,6 @@ func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) {
},
}
By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name))
return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
}

View File

@ -20,8 +20,8 @@ import (
"fmt"
"path"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -30,7 +30,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = Describe("[sig-storage] ConfigMap", func() {
var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
@ -53,7 +53,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
doConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
})
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
doConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode)
})
@ -68,7 +68,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() {
doConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
})
@ -102,7 +102,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
doConfigMapE2EWithMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() {
doConfigMapE2EWithMappings(f, 1000, 1001, nil)
})
@ -130,7 +130,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
}
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -170,23 +170,23 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
By("waiting to observe update in volume")
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
})
/*
@ -217,7 +217,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
}
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -269,7 +269,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
pollLogs1 := func() (string, error) {
@ -279,10 +279,10 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName2)
}
By("Waiting for pod with text data")
Eventually(pollLogs1, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By("Waiting for pod with binary data")
Eventually(pollLogs2, podLogTimeout, framework.Poll).Should(ContainSubstring("de ca fe ba d0 fe ff"))
ginkgo.By("Waiting for pod with text data")
gomega.Eventually(pollLogs1, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By("Waiting for pod with binary data")
gomega.Eventually(pollLogs2, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("de ca fe ba d0 fe ff"))
})
/*
@ -335,13 +335,13 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
}
By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
@ -427,45 +427,45 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
By("waiting to observe update in volume")
ginkgo.By("waiting to observe update in volume")
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1"))
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1"))
})
/*
@ -483,7 +483,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -548,7 +548,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
//The pod is in pending during volume creation until the configMap objects are available
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/configmap-volumes"
podName := "pod-configmaps-" + string(uuid.NewUUID())
err := createNonOptionalConfigMapPod(f, volumeMountPath, podName)
@ -558,7 +558,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
//ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/configmap-volumes"
podName := "pod-configmaps-" + string(uuid.NewUUID())
err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath, podName)
@ -591,7 +591,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -667,7 +667,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
@ -788,7 +788,7 @@ func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath, podN
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
pod = f.PodClient().Create(pod)
return f.WaitForPodRunning(pod.Name)
}
@ -803,7 +803,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
createVolumeName := "createcm-volume"
configMap := newConfigMap(f, createName)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -850,7 +850,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
pod = f.PodClient().Create(pod)
return f.WaitForPodRunning(pod.Name)
}

View File

@ -33,8 +33,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -49,7 +49,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
var podClient *framework.PodClient
probe := webserverProbeBuilder{}
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
@ -67,7 +67,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
Expect(isReady).To(BeTrue(), "pod should be ready")
gomega.Expect(isReady).To(gomega.BeTrue(), "pod should be ready")
// We assume the pod became ready when the container became ready. This
// is true for a single container pod.
@ -83,7 +83,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
}
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
gomega.Expect(restartCount == 0).To(gomega.BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
/*
@ -94,22 +94,22 @@ var _ = framework.KubeDescribe("Probing container", func() {
*/
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() {
p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80))
Consistently(func() (bool, error) {
gomega.Consistently(func() (bool, error) {
p, err := podClient.Get(p.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return podutil.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready")
p, err := podClient.Get(p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
Expect(isReady).NotTo(BeTrue(), "pod should be not ready")
gomega.Expect(isReady).NotTo(gomega.BeTrue(), "pod should be not ready")
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
gomega.Expect(restartCount == 0).To(gomega.BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
/*
@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Testname: Pod liveness probe, using tcp socket, no restart
Description: A Pod is created with liveness probe on tcp socket 8080. The http handler on port 8080 will return http errors after 10 seconds, but socket will remain open. Liveness probe MUST not fail to check health and the restart count should remain 0.
*/
It("should *not* be restarted with a tcp:8080 liveness probe [NodeConformance]", func() {
ginkgo.It("should *not* be restarted with a tcp:8080 liveness probe [NodeConformance]", func() {
livenessProbe := &v1.Probe{
Handler: tcpSocketHandler(8080),
InitialDelaySeconds: 15,
@ -210,7 +210,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Testname: Pod liveness probe, docker exec, restart
Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod.
*/
It("should be restarted with a docker exec liveness probe with timeout ", func() {
ginkgo.It("should be restarted with a docker exec liveness probe with timeout ", func() {
// TODO: enable this test once the default exec handler supports timeout.
framework.Skipf("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
cmd := []string{"/bin/sh", "-c", "sleep 600"}
@ -229,7 +229,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Testname: Pod http liveness probe, redirected to a local address
Description: A Pod is created with liveness probe on http endpoint /redirect?loc=healthz. The http handler on the /redirect will redirect to the /healthz endpoint, which will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
It("should be restarted with a local redirect http liveness probe", func() {
ginkgo.It("should be restarted with a local redirect http liveness probe", func() {
livenessProbe := &v1.Probe{
Handler: httpGetHandler("/redirect?loc="+url.QueryEscape("/healthz"), 8080),
InitialDelaySeconds: 15,
@ -244,7 +244,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Testname: Pod http liveness probe, redirected to a non-local address
Description: A Pod is created with liveness probe on http endpoint /redirect with a redirect to http://0.0.0.0/. The http handler on the /redirect should not follow the redirect, but instead treat it as a success and generate an event.
*/
It("should *not* be restarted with a non-local redirect http liveness probe", func() {
ginkgo.It("should *not* be restarted with a non-local redirect http liveness probe", func() {
livenessProbe := &v1.Probe{
Handler: httpGetHandler("/redirect?loc="+url.QueryEscape("http://0.0.0.0/"), 8080),
InitialDelaySeconds: 15,
@ -407,14 +407,14 @@ func (b webserverProbeBuilder) build() *v1.Probe {
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient()
ns := f.Namespace.Name
Expect(pod.Spec.Containers).NotTo(BeEmpty())
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
containerName := pod.Spec.Containers[0].Name
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
ginkgo.By("deleting the pod")
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
// Wait until the pod is not pending. (Here we need to check for something other than
@ -425,7 +425,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
e2elog.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present")
ginkgo.By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount

View File

@ -26,10 +26,10 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
var _ = Describe("[sig-node] Downward API", func() {
var _ = ginkgo.Describe("[sig-node] Downward API", func() {
f := framework.NewDefaultFramework("downward-api")
/*
@ -235,12 +235,12 @@ var _ = Describe("[sig-node] Downward API", func() {
var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive] [NodeFeature:EphemeralStorage]", func() {
f := framework.NewDefaultFramework("downward-api")
Context("Downward API tests for local ephemeral storage", func() {
BeforeEach(func() {
ginkgo.Context("Downward API tests for local ephemeral storage", func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessLocalEphemeralStorageEnabled()
})
It("should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars", func() {
ginkgo.It("should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
@ -268,7 +268,7 @@ var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive] [NodeFeature:
testDownwardAPIForEphemeralStorage(f, podName, env, expectations)
})
It("should provide default limits.ephemeral-storage from node allocatable", func() {
ginkgo.It("should provide default limits.ephemeral-storage from node allocatable", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{

View File

@ -28,16 +28,16 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Downward API volume", func() {
var _ = ginkgo.Describe("[sig-storage] Downward API volume", func() {
// How long to wait for a log pod to be displayed
const podLogTimeout = 3 * time.Minute
f := framework.NewDefaultFramework("downward-api")
var podClient *framework.PodClient
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
@ -87,7 +87,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
})
It("should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", func() {
ginkgo.It("should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
@ -101,7 +101,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
})
It("should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", func() {
ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
@ -129,23 +129,23 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
podName := "labelsupdate" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
containerName := "client-container"
By("Creating the pod")
ginkgo.By("Creating the pod")
podClient.CreateSync(pod)
Eventually(func() (string, error) {
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n"))
//modify labels
podClient.Update(podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3"
})
Eventually(func() (string, error) {
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n"))
})
/*
@ -160,26 +160,26 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
pod := downwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/podinfo/annotations")
containerName := "client-container"
By("Creating the pod")
ginkgo.By("Creating the pod")
podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
Eventually(func() (string, error) {
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n"))
//modify annotations
podClient.Update(podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo"
})
Eventually(func() (string, error) {
gomega.Eventually(func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n"))
})
/*

View File

@ -21,7 +21,7 @@ import (
"path"
"time"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -38,31 +38,31 @@ var (
testImageNonRootUid = imageutils.GetE2EImage(imageutils.MounttestUser)
)
var _ = Describe("[sig-storage] EmptyDir volumes", func() {
var _ = ginkgo.Describe("[sig-storage] EmptyDir volumes", func() {
f := framework.NewDefaultFramework("emptydir")
Context("when FSGroup is specified [NodeFeature:FSGroup]", func() {
It("new files should be created with FSGroup ownership when container is root", func() {
ginkgo.Context("when FSGroup is specified [NodeFeature:FSGroup]", func() {
ginkgo.It("new files should be created with FSGroup ownership when container is root", func() {
doTestSetgidFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("new files should be created with FSGroup ownership when container is non-root", func() {
ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func() {
doTestSetgidFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func() {
ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func() {
doTestSubPathFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
doTest0644FSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("volume on default medium should have the correct mode using FSGroup", func() {
ginkgo.It("volume on default medium should have the correct mode using FSGroup", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumDefault)
})
It("volume on tmpfs should have the correct mode using FSGroup", func() {
ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
})
@ -272,18 +272,18 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
}
var err error
By("Creating Pod")
ginkgo.By("Creating Pod")
pod = f.PodClient().CreateSync(pod)
By("Waiting for the pod running")
ginkgo.By("Waiting for the pod running")
err = f.WaitForPodRunning(pod.Name)
framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name)
By("Geting the pod")
ginkgo.By("Geting the pod")
pod, err = f.PodClient().Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %s", pod.Name)
By("Reading file content from the nginx-container")
ginkgo.By("Reading file content from the nginx-container")
resultString, err = framework.LookForStringInFile(f.Namespace.Name, pod.Name, busyBoxMainContainerName, busyBoxMainVolumeFilePath, message, 30*time.Second)
framework.ExpectNoError(err, "failed to match expected string %s with %s", message, resultString)
})

View File

@ -30,8 +30,8 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
type Action func() error
@ -64,7 +64,7 @@ func ObserveNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodeP
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
n, ok := newObj.(*v1.Node)
Expect(ok).To(Equal(true))
gomega.Expect(ok).To(gomega.Equal(true))
if nodePredicate(n) {
observedMatchingNode = true
}
@ -120,8 +120,8 @@ func ObserveEventAfterAction(f *framework.Framework, eventPredicate func(*v1.Eve
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e, ok := obj.(*v1.Event)
By(fmt.Sprintf("Considering event: \nType = [%s], Name = [%s], Reason = [%s], Message = [%s]", e.Type, e.Name, e.Reason, e.Message))
Expect(ok).To(Equal(true))
ginkgo.By(fmt.Sprintf("Considering event: \nType = [%s], Name = [%s], Reason = [%s], Message = [%s]", e.Type, e.Name, e.Reason, e.Message))
gomega.Expect(ok).To(gomega.Equal(true))
if ok && eventPredicate(e) {
observedMatchingEvent = true
}

View File

@ -29,8 +29,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
)
// These tests exercise the Kubernetes expansion syntax $(VAR).
@ -160,7 +159,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Description: Make sure a container's subpath can be set using an
expansion of environment variables.
*/
It("should allow substituting values in a volume subpath [sig-storage][NodeFeature:VolumeSubpathEnvExpansion]", func() {
ginkgo.It("should allow substituting values in a volume subpath [sig-storage][NodeFeature:VolumeSubpathEnvExpansion]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -220,7 +219,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Description: Make sure a container's subpath can not be set using an
expansion of environment variables when backticks are supplied.
*/
It("should fail substituting values in a volume subpath with backticks [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
ginkgo.It("should fail substituting values in a volume subpath with backticks [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
@ -269,7 +268,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Description: Make sure a container's subpath can not be set using an
expansion of environment variables when absolute path is supplied.
*/
It("should fail substituting values in a volume subpath with absolute path [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
ginkgo.It("should fail substituting values in a volume subpath with absolute path [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
@ -317,7 +316,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Testname: var-expansion-subpath-ready-from-failed-state
Description: Verify that a failing subpath expansion can be modified during the lifecycle of a container.
*/
It("should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
ginkgo.It("should verify that a failing subpath expansion can be modified during the lifecycle of a container [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
containerName := "dapi-container"
@ -378,7 +377,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
},
}
By("creating the pod with failed condition")
ginkgo.By("creating the pod with failed condition")
var podClient *framework.PodClient
podClient = f.PodClient()
pod = podClient.Create(pod)
@ -386,16 +385,16 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectError(err, "while waiting for pod to be running")
By("updating the pod")
ginkgo.By("updating the pod")
podClient.Update(podName, func(pod *v1.Pod) {
pod.ObjectMeta.Annotations = map[string]string{"mysubpath": "mypath"}
})
By("waiting for pod running")
ginkgo.By("waiting for pod running")
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for pod to be running")
By("deleting the pod gracefully")
ginkgo.By("deleting the pod gracefully")
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
framework.ExpectNoError(err, "failed to delete pod")
})
@ -408,7 +407,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
3. successful expansion of the subpathexpr isn't required for volume cleanup
*/
It("should succeed in writing subpaths in container [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
ginkgo.It("should succeed in writing subpaths in container [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
containerName := "dapi-container"
@ -470,39 +469,39 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
},
}
By("creating the pod")
ginkgo.By("creating the pod")
var podClient *framework.PodClient
podClient = f.PodClient()
pod = podClient.Create(pod)
By("waiting for pod running")
ginkgo.By("waiting for pod running")
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for pod to be running")
By("creating a file in subpath")
ginkgo.By("creating a file in subpath")
cmd := "touch /volume_mount/mypath/foo/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to write to subpath")
}
By("test for file in mounted path")
ginkgo.By("test for file in mounted path")
cmd = "test -f /subpath_mount/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil {
framework.Failf("expected to be able to verify file")
}
By("updating the annotation value")
ginkgo.By("updating the annotation value")
podClient.Update(podName, func(pod *v1.Pod) {
pod.ObjectMeta.Annotations["mysubpath"] = "mynewpath"
})
By("waiting for annotated pod running")
ginkgo.By("waiting for annotated pod running")
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "while waiting for annotated pod to be running")
By("deleting the pod gracefully")
ginkgo.By("deleting the pod gracefully")
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
framework.ExpectNoError(err, "failed to delete pod")
})
@ -517,7 +516,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
*/
It("should not change the subpath mount on a container restart if the environment variable changes [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
ginkgo.It("should not change the subpath mount on a container restart if the environment variable changes [sig-storage][NodeFeature:VolumeSubpathEnvExpansion][Slow]", func() {
suffix := string(uuid.NewUUID())
podName := fmt.Sprintf("var-expansion-%s", suffix)
@ -608,7 +607,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
}
// Start pod
By(fmt.Sprintf("Creating pod %s", pod.Name))
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
var podClient *framework.PodClient
podClient = f.PodClient()
pod = podClient.Create(pod)
@ -616,17 +615,17 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
framework.ExpectNoError(err, "while waiting for pod to be running")
By("updating the pod")
ginkgo.By("updating the pod")
podClient.Update(podName, func(pod *v1.Pod) {
pod.ObjectMeta.Annotations = map[string]string{"mysubpath": "newsubpath"}
})
By("waiting for pod and container restart")
ginkgo.By("waiting for pod and container restart")
waitForPodContainerRestart(f, pod, "/volume_mount/foo/test.log")
By("test for subpath mounted with old value")
ginkgo.By("test for subpath mounted with old value")
cmd := "test -f /volume_mount/foo/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil {
@ -658,14 +657,14 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
// Tests that the existing subpath mount is detected when a container restarts
func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount string) {
By("Failing liveness probe")
ginkgo.By("Failing liveness probe")
stdout, stderr, err := f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("rm %v", volumeMount))
e2elog.Logf("Pod exec output: %v / %v", stdout, stderr)
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe")
framework.ExpectNoError(err, "while failing liveness probe")
// Check that container has restarted
By("Waiting for container to restart")
ginkgo.By("Waiting for container to restart")
restarts := int32(0)
err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
@ -684,16 +683,16 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
}
return false, nil
})
Expect(err).ToNot(HaveOccurred(), "while waiting for container to restart")
framework.ExpectNoError(err, "while waiting for container to restart")
// Fix liveness probe
By("Rewriting the file")
ginkgo.By("Rewriting the file")
stdout, _, err = f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("echo test-after > %v", volumeMount))
e2elog.Logf("Pod exec output: %v", stdout)
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file")
framework.ExpectNoError(err, "while rewriting the probe file")
// Wait for container restarts to stabilize
By("Waiting for container to stop restarting")
ginkgo.By("Waiting for container to stop restarting")
stableCount := int(0)
stableThreshold := int(time.Minute / framework.Poll)
err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
@ -719,5 +718,5 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
}
return false, nil
})
Expect(err).ToNot(HaveOccurred(), "while waiting for container to stabilize")
framework.ExpectNoError(err, "while waiting for container to stabilize")
}

View File

@ -26,15 +26,15 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
//TODO : Consolidate this code with the code for emptyDir.
//This will require some smart.
var _ = Describe("[sig-storage] HostPath", func() {
var _ = ginkgo.Describe("[sig-storage] HostPath", func() {
f := framework.NewDefaultFramework("hostpath")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
// TODO permission denied cleanup failures
//cleanup before running the test.
_ = os.Remove("/tmp/test-file")
@ -62,7 +62,7 @@ var _ = Describe("[sig-storage] HostPath", func() {
})
// This test requires mounting a folder into a container with write privileges.
It("should support r/w [NodeConformance]", func() {
ginkgo.It("should support r/w [NodeConformance]", func() {
filePath := path.Join(volumePath, "test-file")
retryDuration := 180
source := &v1.HostPathVolumeSource{
@ -88,7 +88,7 @@ var _ = Describe("[sig-storage] HostPath", func() {
})
})
It("should support subPath [NodeConformance]", func() {
ginkgo.It("should support subPath [NodeConformance]", func() {
subPath := "sub-path"
fileName := "test-file"
retryDuration := 180

View File

@ -34,14 +34,14 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
f := framework.NewDefaultFramework("init-container")
var podClient *framework.PodClient
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
when Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should invoke init containers on a RestartNever pod", func() {
By("creating the pod")
ginkgo.By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
@ -96,19 +96,19 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodCompleted)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodSucceeded))
gomega.Expect(endPod.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionTrue))
gomega.Expect(init).NotTo(gomega.BeNil())
gomega.Expect(init.Status).To(gomega.Equal(v1.ConditionTrue))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
gomega.Expect(len(endPod.Status.InitContainerStatuses)).To(gomega.Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
Expect(status.Ready).To(BeTrue())
Expect(status.State.Terminated).NotTo(BeNil())
Expect(status.State.Terminated.ExitCode).To(BeZero())
gomega.Expect(status.Ready).To(gomega.BeTrue())
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
}
})
@ -121,7 +121,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
when Pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should invoke init containers on a RestartAlways pod", func() {
By("creating the pod")
ginkgo.By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
@ -167,19 +167,19 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodRunning)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodRunning))
gomega.Expect(endPod.Status.Phase).To(gomega.Equal(v1.PodRunning))
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionTrue))
gomega.Expect(init).NotTo(gomega.BeNil())
gomega.Expect(init.Status).To(gomega.Equal(v1.ConditionTrue))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
gomega.Expect(len(endPod.Status.InitContainerStatuses)).To(gomega.Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
Expect(status.Ready).To(BeTrue())
Expect(status.State.Terminated).NotTo(BeNil())
Expect(status.State.Terminated.ExitCode).To(BeZero())
gomega.Expect(status.Ready).To(gomega.BeTrue())
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
}
})
@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
and pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should not start app containers if init containers fail on a RestartAlways pod", func() {
By("creating the pod")
ginkgo.By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -289,16 +289,16 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
},
)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodPending))
gomega.Expect(endPod.Status.Phase).To(gomega.Equal(v1.PodPending))
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
gomega.Expect(init).NotTo(gomega.BeNil())
gomega.Expect(init.Status).To(gomega.Equal(v1.ConditionFalse))
gomega.Expect(init.Reason).To(gomega.Equal("ContainersNotInitialized"))
gomega.Expect(init.Message).To(gomega.Equal("containers with incomplete status: [init1 init2]"))
gomega.Expect(len(endPod.Status.InitContainerStatuses)).To(gomega.Equal(2))
})
/*
@ -308,7 +308,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
when at least one InitContainer fails to start and Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
By("creating the pod")
ginkgo.By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
@ -398,17 +398,17 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
},
conditions.PodCompleted,
)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodFailed))
gomega.Expect(endPod.Status.Phase).To(gomega.Equal(v1.PodFailed))
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
Expect(endPod.Status.ContainerStatuses[0].State.Waiting).ToNot(BeNil())
gomega.Expect(init).NotTo(gomega.BeNil())
gomega.Expect(init.Status).To(gomega.Equal(v1.ConditionFalse))
gomega.Expect(init.Reason).To(gomega.Equal("ContainersNotInitialized"))
gomega.Expect(init.Message).To(gomega.Equal("containers with incomplete status: [init2]"))
gomega.Expect(len(endPod.Status.InitContainerStatuses)).To(gomega.Equal(2))
gomega.Expect(endPod.Status.ContainerStatuses[0].State.Waiting).ToNot(gomega.BeNil())
})
})

View File

@ -27,17 +27,17 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Kubelet", func() {
f := framework.NewDefaultFramework("kubelet-test")
var podClient *framework.PodClient
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
Context("when scheduling a busybox command in a pod", func() {
ginkgo.Context("when scheduling a busybox command in a pod", func() {
podName := "busybox-scheduling-" + string(uuid.NewUUID())
/*
@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
},
},
})
Eventually(func() string {
gomega.Eventually(func() string {
sinceTime := metav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream()
if err != nil {
@ -72,13 +72,13 @@ var _ = framework.KubeDescribe("Kubelet", func() {
buf := new(bytes.Buffer)
buf.ReadFrom(rc)
return buf.String()
}, time.Minute, time.Second*4).Should(Equal("Hello World\n"))
}, time.Minute, time.Second*4).Should(gomega.Equal("Hello World\n"))
})
})
Context("when scheduling a busybox command that always fails in a pod", func() {
ginkgo.Context("when scheduling a busybox command that always fails in a pod", func() {
var podName string
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podName = "bin-false" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason.
*/
framework.ConformanceIt("should have an terminated reason [NodeConformance]", func() {
Eventually(func() error {
gomega.Eventually(func() error {
podData, err := podClient.Get(podName, metav1.GetOptions{})
if err != nil {
return err
@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
return fmt.Errorf("expected non-zero exitCode and non-empty terminated state reason. Got exitCode: %+v and terminated state reason: %+v", contTerminatedState.ExitCode, contTerminatedState.Reason)
}
return nil
}, time.Minute, time.Second*4).Should(BeNil())
}, time.Minute, time.Second*4).Should(gomega.BeNil())
})
/*
@ -130,10 +130,10 @@ var _ = framework.KubeDescribe("Kubelet", func() {
*/
framework.ConformanceIt("should be possible to delete [NodeConformance]", func() {
err := podClient.Delete(podName, &metav1.DeleteOptions{})
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
})
})
Context("when scheduling a busybox Pod with hostAliases", func() {
ginkgo.Context("when scheduling a busybox Pod with hostAliases", func() {
podName := "busybox-host-aliases" + string(uuid.NewUUID())
/*
@ -166,7 +166,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
},
})
Eventually(func() error {
gomega.Eventually(func() error {
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
defer rc.Close()
if err != nil {
@ -181,10 +181,10 @@ var _ = framework.KubeDescribe("Kubelet", func() {
}
return nil
}, time.Minute, time.Second*4).Should(BeNil())
}, time.Minute, time.Second*4).Should(gomega.BeNil())
})
})
Context("when scheduling a read only busybox container", func() {
ginkgo.Context("when scheduling a read only busybox container", func() {
podName := "busybox-readonly-fs" + string(uuid.NewUUID())
/*
@ -214,7 +214,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
},
},
})
Eventually(func() string {
gomega.Eventually(func() string {
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
if err != nil {
return ""
@ -223,7 +223,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
buf := new(bytes.Buffer)
buf.ReadFrom(rc)
return buf.String()
}, time.Minute, time.Second*4).Should(Equal("/bin/sh: can't create /file: Read-only file system\n"))
}, time.Minute, time.Second*4).Should(gomega.Equal("/bin/sh: can't create /file: Read-only file system\n"))
})
})
})

View File

@ -20,7 +20,7 @@ import (
"strings"
"time"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
@ -60,32 +60,32 @@ var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
This test is marked LinuxOnly since Windows cannot mount individual files in Containers.
*/
framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func() {
By("Setting up the test")
ginkgo.By("Setting up the test")
config.setup()
By("Running the test")
ginkgo.By("Running the test")
config.verifyEtcHosts()
})
})
func (config *KubeletManagedHostConfig) verifyEtcHosts() {
By("Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false")
ginkgo.By("Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false")
assertManagedStatus(config, etcHostsPodName, true, "busybox-1")
assertManagedStatus(config, etcHostsPodName, true, "busybox-2")
By("Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount")
ginkgo.By("Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount")
assertManagedStatus(config, etcHostsPodName, false, "busybox-3")
By("Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true")
ginkgo.By("Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true")
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-1")
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2")
}
func (config *KubeletManagedHostConfig) setup() {
By("Creating hostNetwork=false pod")
ginkgo.By("Creating hostNetwork=false pod")
config.createPodWithoutHostNetwork()
By("Creating hostNetwork=true pod")
ginkgo.By("Creating hostNetwork=true pod")
config.createPodWithHostNetwork()
}

View File

@ -27,8 +27,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
postStartWaitTimeout = 2 * time.Minute
preStopWaitTimeout = 30 * time.Second
)
Context("when create a pod with lifecycle hook", func() {
ginkgo.Context("when create a pod with lifecycle hook", func() {
var targetIP, targetURL string
podHandleHookRequest := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -60,9 +60,9 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
},
},
}
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
By("create the container to handle the HTTPGet hook request.")
ginkgo.By("create the container to handle the HTTPGet hook request.")
newPod := podClient.CreateSync(podHandleHookRequest)
targetIP = newPod.Status.PodIP
targetURL = targetIP
@ -71,23 +71,23 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
}
})
testPodWithHook := func(podWithHook *v1.Pod) {
By("create the pod with lifecycle hook")
ginkgo.By("create the pod with lifecycle hook")
podClient.CreateSync(podWithHook)
if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil {
By("check poststart hook")
Eventually(func() error {
ginkgo.By("check poststart hook")
gomega.Eventually(func() error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
`GET /echo\?msg=poststart`)
}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
}
By("delete the pod with lifecycle hook")
ginkgo.By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
By("check prestop hook")
Eventually(func() error {
ginkgo.By("check prestop hook")
gomega.Eventually(func() error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
`GET /echo\?msg=prestop`)
}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
}, preStopWaitTimeout, podCheckInterval).Should(gomega.BeNil())
}
}
/*

View File

@ -17,15 +17,15 @@ limitations under the License.
package common
import (
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = Describe("[sig-network] Networking", func() {
var _ = ginkgo.Describe("[sig-network] Networking", func() {
f := framework.NewDefaultFramework("pod-network-test")
Describe("Granular Checks: Pods", func() {
ginkgo.Describe("Granular Checks: Pods", func() {
// Try to hit all endpoints through a test container, retry 5 times,
// expect exactly one unique hostname. Each of these endpoints reports

View File

@ -31,40 +31,40 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("NodeLease", func() {
var nodeName string
f := framework.NewDefaultFramework("node-lease-test")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero())
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero())
nodeName = nodes.Items[0].ObjectMeta.Name
})
Context("when the NodeLease feature is enabled", func() {
It("the kubelet should create and update a lease in the kube-node-lease namespace", func() {
ginkgo.Context("when the NodeLease feature is enabled", func() {
ginkgo.It("the kubelet should create and update a lease in the kube-node-lease namespace", func() {
leaseClient := f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
var (
err error
lease *coordv1beta1.Lease
)
By("check that lease for this Kubelet exists in the kube-node-lease namespace")
Eventually(func() error {
ginkgo.By("check that lease for this Kubelet exists in the kube-node-lease namespace")
gomega.Eventually(func() error {
lease, err = leaseClient.Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}, 5*time.Minute, 5*time.Second).Should(BeNil())
}, 5*time.Minute, 5*time.Second).Should(gomega.BeNil())
// check basic expectations for the lease
Expect(expectLease(lease, nodeName)).To(BeNil())
gomega.Expect(expectLease(lease, nodeName)).To(gomega.BeNil())
By("check that node lease is updated at least once within the lease duration")
Eventually(func() error {
ginkgo.By("check that node lease is updated at least once within the lease duration")
gomega.Eventually(func() error {
newLease, err := leaseClient.Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
@ -84,25 +84,25 @@ var _ = framework.KubeDescribe("NodeLease", func() {
time.Duration(*lease.Spec.LeaseDurationSeconds/4)*time.Second)
})
It("the kubelet should report node status infrequently", func() {
By("wait until node is ready")
ginkgo.It("the kubelet should report node status infrequently", func() {
ginkgo.By("wait until node is ready")
framework.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute)
By("wait until there is node lease")
ginkgo.By("wait until there is node lease")
var err error
var lease *coordv1beta1.Lease
Eventually(func() error {
gomega.Eventually(func() error {
lease, err = f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease).Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}, 5*time.Minute, 5*time.Second).Should(BeNil())
}, 5*time.Minute, 5*time.Second).Should(gomega.BeNil())
// check basic expectations for the lease
Expect(expectLease(lease, nodeName)).To(BeNil())
gomega.Expect(expectLease(lease, nodeName)).To(gomega.BeNil())
leaseDuration := time.Duration(*lease.Spec.LeaseDurationSeconds) * time.Second
By("verify NodeStatus report period is longer than lease duration")
ginkgo.By("verify NodeStatus report period is longer than lease duration")
// NodeStatus is reported from node to master when there is some change or
// enough time has passed. So for here, keep checking the time diff
// between 2 NodeStatus report, until it is longer than lease duration
@ -147,23 +147,23 @@ var _ = framework.KubeDescribe("NodeLease", func() {
framework.ExpectNoError(err, "error waiting for infrequent nodestatus update")
}
By("verify node is still in ready status even though node status report is infrequent")
ginkgo.By("verify node is still in ready status even though node status report is infrequent")
// This check on node status is only meaningful when this e2e test is
// running as cluster e2e test, because node e2e test does not create and
// run controller manager, i.e., no node lifecycle controller.
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
_, readyCondition := testutils.GetNodeCondition(&node.Status, corev1.NodeReady)
Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue))
gomega.Expect(readyCondition.Status).To(gomega.Equal(corev1.ConditionTrue))
})
})
})
func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, corev1.NodeStatus) {
node, err := clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
_, readyCondition := testutils.GetNodeCondition(&node.Status, corev1.NodeReady)
Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue))
gomega.Expect(readyCondition.Status).To(gomega.Equal(corev1.ConditionTrue))
heartbeatTime := readyCondition.LastHeartbeatTime.Time
readyCondition.LastHeartbeatTime = metav1.Time{}
return heartbeatTime, node.Status

View File

@ -25,10 +25,10 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
var _ = Describe("[sig-api-machinery] Secrets", func() {
var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
@ -40,7 +40,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
name := "secret-test-" + string(uuid.NewUUID())
secret := secretForTest(f.Namespace.Name, name)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
@ -88,7 +88,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
name := "secret-test-" + string(uuid.NewUUID())
secret := newEnvFromSecret(f.Namespace.Name, name)
By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
@ -161,6 +161,6 @@ func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) {
"": []byte("value-1\n"),
},
}
By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
}

View File

@ -27,11 +27,11 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Secrets", func() {
var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
@ -131,7 +131,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
@ -242,13 +242,13 @@ var _ = Describe("[sig-storage] Secrets", func() {
},
}
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
@ -328,51 +328,51 @@ var _ = Describe("[sig-storage] Secrets", func() {
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
pollCreateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
By("waiting to observe update in volume")
ginkgo.By("waiting to observe update in volume")
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
})
//The secret is in pending during volume creation until the secret objects are available
//or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timout exception unless it is marked optional.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
@ -382,7 +382,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
//Secret object defined for the pod, If a key is specified which is not present in the secret,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)
@ -412,7 +412,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
secret = secretForTest(f.Namespace.Name, secretName)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
@ -481,7 +481,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
@ -583,7 +583,7 @@ func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
pod = f.PodClient().Create(pod)
return f.WaitForPodRunning(pod.Name)
}
@ -599,7 +599,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
secret := secretForTest(f.Namespace.Name, createName)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
@ -644,7 +644,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
pod = f.PodClient().Create(pod)
return f.WaitForPodRunning(pod.Name)
}

View File

@ -30,18 +30,18 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/pointer"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
var podClient *framework.PodClient
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
Context("When creating a container with runAsUser", func() {
ginkgo.Context("When creating a container with runAsUser", func() {
makeUserPod := func(podName, image string, command []string, userid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -90,12 +90,12 @@ var _ = framework.KubeDescribe("Security Context", func() {
This e2e can not be promoted to Conformance because a Conformant platform may not allow to run containers with 'uid 0' or running privileged operations.
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID.
*/
It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func() {
createAndWaitUserPod(0)
})
})
Context("When creating a container with runAsNonRoot", func() {
ginkgo.Context("When creating a container with runAsNonRoot", func() {
rootImage := imageutils.GetE2EImage(imageutils.BusyBox)
nonRootImage := imageutils.GetE2EImage(imageutils.NonRoot)
makeNonRootPod := func(podName, image string, userid *int64) *v1.Pod {
@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
}
It("should run with an explicit non-root user ID", func() {
ginkgo.It("should run with an explicit non-root user ID", func() {
name := "explicit-nonroot-uid"
pod := makeNonRootPod(name, rootImage, pointer.Int64Ptr(1234))
pod = podClient.Create(pod)
@ -128,17 +128,17 @@ var _ = framework.KubeDescribe("Security Context", func() {
podClient.WaitForSuccess(name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1234"))
})
It("should not run with an explicit root user ID", func() {
ginkgo.It("should not run with an explicit root user ID", func() {
name := "explicit-root-uid"
pod := makeNonRootPod(name, nonRootImage, pointer.Int64Ptr(0))
pod = podClient.Create(pod)
ev, err := podClient.WaitForErrorEventOrSuccess(pod)
framework.ExpectNoError(err)
Expect(ev).NotTo(BeNil())
Expect(ev.Reason).To(Equal(events.FailedToCreateContainer))
gomega.Expect(ev).NotTo(gomega.BeNil())
gomega.Expect(ev.Reason).To(gomega.Equal(events.FailedToCreateContainer))
})
It("should run with an image specified user ID", func() {
ginkgo.It("should run with an image specified user ID", func() {
name := "implicit-nonroot-uid"
pod := makeNonRootPod(name, nonRootImage, nil)
pod = podClient.Create(pod)
@ -146,19 +146,19 @@ var _ = framework.KubeDescribe("Security Context", func() {
podClient.WaitForSuccess(name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1234"))
})
It("should not run without a specified user ID", func() {
ginkgo.It("should not run without a specified user ID", func() {
name := "implicit-root-uid"
pod := makeNonRootPod(name, rootImage, nil)
pod = podClient.Create(pod)
ev, err := podClient.WaitForErrorEventOrSuccess(pod)
framework.ExpectNoError(err)
Expect(ev).NotTo(BeNil())
Expect(ev.Reason).To(Equal(events.FailedToCreateContainer))
gomega.Expect(ev).NotTo(gomega.BeNil())
gomega.Expect(ev.Reason).To(gomega.Equal(events.FailedToCreateContainer))
})
})
Context("When creating a pod with readOnlyRootFilesystem", func() {
ginkgo.Context("When creating a pod with readOnlyRootFilesystem", func() {
makeUserPod := func(podName, image string, command []string, readOnlyRootFilesystem bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -204,7 +204,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
At this moment we are not considering this test for Conformance due to use of SecurityContext.
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support creating containers with read-only access.
*/
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func() {
createAndWaitUserPod(true)
})
@ -219,7 +219,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
})
})
Context("When creating a pod with privileged", func() {
ginkgo.Context("When creating a pod with privileged", func() {
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -270,7 +270,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
})
})
Context("when creating containers with AllowPrivilegeEscalation", func() {
ginkgo.Context("when creating containers with AllowPrivilegeEscalation", func() {
makeAllowPrivilegeEscalationPod := func(podName string, allowPrivilegeEscalation *bool, uid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -309,7 +309,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
This e2e Can not be promoted to Conformance as it is Container Runtime dependent and not all conformant platforms will require this behavior.
[LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID, or privilege escalation.
*/
It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func() {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
@ -341,7 +341,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
This e2e Can not be promoted to Conformance as it is Container Runtime dependent and runtime may not allow to run.
[LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID.
*/
It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func() {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {

View File

@ -25,8 +25,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
@ -54,11 +54,11 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
return &pod
}
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
It("should support sysctls", func() {
ginkgo.It("should support sysctls", func() {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{
@ -70,10 +70,10 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
}
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
ginkgo.By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
ginkgo.By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
@ -82,26 +82,27 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev).To(BeNil())
gomega.Expect(ev).To(gomega.BeNil())
By("Waiting for pod completion")
ginkgo.By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
framework.ExpectNoError(err)
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
ginkgo.By("Checking that the pod succeeded")
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
By("Getting logs from the pod")
ginkgo.By("Getting logs from the pod")
ginkgo.By("Getting logs from the pod")
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
framework.ExpectNoError(err)
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
ginkgo.By("Checking that the sysctl is actually updated")
gomega.Expect(log).To(gomega.ContainSubstring("kernel.shm_rmid_forced = 1"))
})
It("should support unsafe sysctls which are actually whitelisted", func() {
ginkgo.It("should support unsafe sysctls which are actually whitelisted", func() {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{
@ -113,10 +114,10 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
}
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
ginkgo.By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
ginkgo.By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
@ -125,26 +126,26 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev).To(BeNil())
gomega.Expect(ev).To(gomega.BeNil())
By("Waiting for pod completion")
ginkgo.By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
framework.ExpectNoError(err)
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
ginkgo.By("Checking that the pod succeeded")
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
By("Getting logs from the pod")
ginkgo.By("Getting logs from the pod")
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
framework.ExpectNoError(err)
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
ginkgo.By("Checking that the sysctl is actually updated")
gomega.Expect(log).To(gomega.ContainSubstring("kernel.shm_rmid_forced = 1"))
})
It("should reject invalid sysctls", func() {
ginkgo.It("should reject invalid sysctls", func() {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{
@ -168,18 +169,18 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
},
}
By("Creating a pod with one valid and two invalid sysctls")
ginkgo.By("Creating a pod with one valid and two invalid sysctls")
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := client.Create(pod)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(ContainSubstring(`Invalid value: "foo-"`))
Expect(err.Error()).To(ContainSubstring(`Invalid value: "bar.."`))
Expect(err.Error()).NotTo(ContainSubstring(`safe-and-unsafe`))
Expect(err.Error()).NotTo(ContainSubstring("kernel.shmmax"))
gomega.Expect(err).NotTo(gomega.BeNil())
gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "foo-"`))
gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "bar.."`))
gomega.Expect(err.Error()).NotTo(gomega.ContainSubstring(`safe-and-unsafe`))
gomega.Expect(err.Error()).NotTo(gomega.ContainSubstring("kernel.shmmax"))
})
It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() {
ginkgo.It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{
@ -190,10 +191,10 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
},
}
By("Creating a pod with a greylisted, but not whitelisted sysctl on the node")
ginkgo.By("Creating a pod with a greylisted, but not whitelisted sysctl on the node")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
ginkgo.By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
@ -203,8 +204,8 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
framework.Skipf("No sysctl support in Docker <1.12")
}
By("Checking that the pod was rejected")
Expect(ev).ToNot(BeNil())
Expect(ev.Reason).To(Equal("SysctlForbidden"))
ginkgo.By("Checking that the pod was rejected")
gomega.Expect(ev).ToNot(gomega.BeNil())
gomega.Expect(ev.Reason).To(gomega.Equal("SysctlForbidden"))
})
})

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
type Suite string
@ -135,7 +135,7 @@ func NewSVCByName(c clientset.Interface, ns, name string) error {
// NewRCByName creates a replication controller with a selector by name of name.
func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) {
By(fmt.Sprintf("creating replication controller %s", name))
ginkgo.By(fmt.Sprintf("creating replication controller %s", name))
return c.CoreV1().ReplicationControllers(ns).Create(framework.RcByNamePort(
name, replicas, framework.ServeHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
}

View File

@ -48,12 +48,12 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/volume"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
// These tests need privileged containers, which are disabled by default. Run
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
var _ = Describe("[sig-storage] GCP Volumes", func() {
var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
f := framework.NewDefaultFramework("gcp-volume")
// note that namespace deletion is handled by delete-namespace flag
@ -61,7 +61,7 @@ var _ = Describe("[sig-storage] GCP Volumes", func() {
var namespace *v1.Namespace
var c clientset.Interface
BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
namespace = f.Namespace
@ -71,8 +71,8 @@ var _ = Describe("[sig-storage] GCP Volumes", func() {
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFSv4", func() {
It("should be mountable for NFSv4", func() {
ginkgo.Describe("NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func() {
config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{})
defer volume.TestCleanup(f, config)
@ -95,8 +95,8 @@ var _ = Describe("[sig-storage] GCP Volumes", func() {
})
})
Describe("NFSv3", func() {
It("should be mountable for NFSv3", func() {
ginkgo.Describe("NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func() {
config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{})
defer volume.TestCleanup(f, config)
@ -121,8 +121,8 @@ var _ = Describe("[sig-storage] GCP Volumes", func() {
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
It("should be mountable", func() {
ginkgo.Describe("GlusterFS", func() {
ginkgo.It("should be mountable", func() {
// create gluster server and endpoints
config, _, _ := volume.NewGlusterfsServer(c, namespace.Name)
name := config.Prefix + "-server"

View File

@ -20,7 +20,7 @@ import (
"testing"
"time"
. "github.com/onsi/gomega"
"github.com/onsi/gomega"
)
var currentTime time.Time
@ -40,7 +40,7 @@ func testUsageWithDefer(timer *TestPhaseTimer) {
}
func TestTimer(t *testing.T) {
RegisterTestingT(t)
gomega.RegisterTestingT(t)
timer := NewTestPhaseTimer()
setCurrentTimeSinceEpoch(1 * time.Second)
@ -48,7 +48,7 @@ func TestTimer(t *testing.T) {
setCurrentTimeSinceEpoch(3 * time.Second)
testUsageWithDefer(timer)
Expect(timer.PrintJSON()).To(MatchJSON(`{
gomega.Expect(timer.PrintJSON()).To(gomega.MatchJSON(`{
"version": "v1",
"dataItems": [
{
@ -64,14 +64,14 @@ func TestTimer(t *testing.T) {
}
]
}`))
Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 5.5s so far
gomega.Expect(timer.PrintHumanReadable()).To(gomega.Equal(`Phase 001-one: 5.5s so far
Phase 033-two: 3.5s
`))
setCurrentTimeSinceEpoch(7*time.Second + 500*time.Millisecond)
phaseOne.End()
Expect(timer.PrintJSON()).To(MatchJSON(`{
gomega.Expect(timer.PrintJSON()).To(gomega.MatchJSON(`{
"version": "v1",
"dataItems": [
{
@ -86,7 +86,7 @@ Phase 033-two: 3.5s
}
]
}`))
Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 6.5s
gomega.Expect(timer.PrintHumanReadable()).To(gomega.Equal(`Phase 001-one: 6.5s
Phase 033-two: 3.5s
`))
}

View File

@ -17,8 +17,8 @@ limitations under the License.
package bootstrap
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
@ -37,20 +37,20 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
var c clientset.Interface
f := framework.NewDefaultFramework("bootstrap-signer")
AfterEach(func() {
ginkgo.AfterEach(func() {
if len(secretNeedClean) > 0 {
By("delete the bootstrap token secret")
ginkgo.By("delete the bootstrap token secret")
err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{})
framework.ExpectNoError(err)
secretNeedClean = ""
}
})
BeforeEach(func() {
ginkgo.BeforeEach(func() {
c = f.ClientSet
})
It("should sign the new added bootstrap tokens", func() {
By("create a new bootstrap token secret")
ginkgo.It("should sign the new added bootstrap tokens", func() {
ginkgo.By("create a new bootstrap token secret")
tokenId, err := GenerateTokenId()
framework.ExpectNoError(err)
secret := newTokenSecret(tokenId, "tokenSecret")
@ -59,28 +59,28 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
framework.ExpectNoError(err)
By("wait for the bootstrap token secret be signed")
ginkgo.By("wait for the bootstrap token secret be signed")
err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId)
framework.ExpectNoError(err)
})
It("should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial][Disruptive]", func() {
By("create a new bootstrap token secret")
ginkgo.It("should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial][Disruptive]", func() {
ginkgo.By("create a new bootstrap token secret")
tokenId, err := GenerateTokenId()
framework.ExpectNoError(err)
secret := newTokenSecret(tokenId, "tokenSecret")
secret, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId
By("wait for the bootstrap token secret be signed")
ginkgo.By("wait for the bootstrap token secret be signed")
err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId)
cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
framework.ExpectNoError(err)
signedToken, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenId]
Expect(ok).Should(Equal(true))
gomega.Expect(ok).Should(gomega.Equal(true))
By("update the cluster-info ConfigMap")
ginkgo.By("update the cluster-info ConfigMap")
originalData := cfgMap.Data[bootstrapapi.KubeConfigKey]
updatedKubeConfig, err := randBytes(20)
framework.ExpectNoError(err)
@ -88,7 +88,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
_, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(cfgMap)
framework.ExpectNoError(err)
defer func() {
By("update back the cluster-info ConfigMap")
ginkgo.By("update back the cluster-info ConfigMap")
cfgMap, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
framework.ExpectNoError(err)
cfgMap.Data[bootstrapapi.KubeConfigKey] = originalData
@ -96,28 +96,28 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
framework.ExpectNoError(err)
}()
By("wait for signed bootstrap token updated")
ginkgo.By("wait for signed bootstrap token updated")
err = WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c, tokenId, signedToken)
framework.ExpectNoError(err)
})
It("should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted", func() {
By("create a new bootstrap token secret")
ginkgo.It("should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted", func() {
ginkgo.By("create a new bootstrap token secret")
tokenId, err := GenerateTokenId()
framework.ExpectNoError(err)
secret := newTokenSecret(tokenId, "tokenSecret")
_, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
framework.ExpectNoError(err)
By("wait for the bootstrap secret be signed")
ginkgo.By("wait for the bootstrap secret be signed")
err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId)
framework.ExpectNoError(err)
By("delete the bootstrap token secret")
ginkgo.By("delete the bootstrap token secret")
err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(bootstrapapi.BootstrapTokenSecretPrefix+tokenId, &metav1.DeleteOptions{})
framework.ExpectNoError(err)
By("wait for the bootstrap token removed from cluster-info ConfigMap")
ginkgo.By("wait for the bootstrap token removed from cluster-info ConfigMap")
err = WaitForSignedClusterInfoByBootstrapTokenToDisappear(c, tokenId)
framework.ExpectNoError(err)
})

View File

@ -19,7 +19,7 @@ package bootstrap
import (
"time"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
@ -35,20 +35,20 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
f := framework.NewDefaultFramework("bootstrap-token-cleaner")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
c = f.ClientSet
})
AfterEach(func() {
ginkgo.AfterEach(func() {
if len(secretNeedClean) > 0 {
By("delete the bootstrap token secret")
ginkgo.By("delete the bootstrap token secret")
err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{})
secretNeedClean = ""
framework.ExpectNoError(err)
}
})
It("should delete the token secret when the secret expired", func() {
By("create a new expired bootstrap token secret")
ginkgo.It("should delete the token secret when the secret expired", func() {
ginkgo.By("create a new expired bootstrap token secret")
tokenId, err := GenerateTokenId()
framework.ExpectNoError(err)
tokenSecret, err := GenerateTokenSecret()
@ -60,13 +60,13 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
framework.ExpectNoError(err)
By("wait for the bootstrap token secret be deleted")
ginkgo.By("wait for the bootstrap token secret be deleted")
err = WaitForBootstrapTokenSecretToDisappear(c, tokenId)
framework.ExpectNoError(err)
})
It("should not delete the token secret when the secret is not expired", func() {
By("create a new expired bootstrap token secret")
ginkgo.It("should not delete the token secret when the secret is not expired", func() {
ginkgo.By("create a new expired bootstrap token secret")
tokenId, err := GenerateTokenId()
framework.ExpectNoError(err)
tokenSecret, err := GenerateTokenSecret()
@ -77,7 +77,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId
framework.ExpectNoError(err)
By("wait for the bootstrap token secret not be deleted")
ginkgo.By("wait for the bootstrap token secret not be deleted")
err = WaitForBootstrapTokenSecretNotDisappear(c, tokenId, 20*time.Second)
framework.ExpectNoError(err)
})

View File

@ -49,8 +49,8 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -251,8 +251,8 @@ func computeAverage(sample []float64) float64 {
}
func computeQuantile(sample []float64, quantile float64) float64 {
Expect(sort.Float64sAreSorted(sample)).To(Equal(true))
Expect(quantile >= 0.0 && quantile <= 1.0).To(Equal(true))
gomega.Expect(sort.Float64sAreSorted(sample)).To(gomega.Equal(true))
gomega.Expect(quantile >= 0.0 && quantile <= 1.0).To(gomega.Equal(true))
index := int(quantile*float64(len(sample))) - 1
if index < 0 {
return math.NaN()
@ -296,7 +296,7 @@ func logPodStartupStatus(
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer, scheduleThroughputs *[]float64) time.Duration {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
// Create all secrets, configmaps and daemons.
dtc.runSecretConfigs(testPhaseDurations.StartPhase(250, "secrets creation"))
@ -317,7 +317,7 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
for i := range dtc.Configs {
config := dtc.Configs[i]
go func() {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
// Call wg.Done() in defer to avoid blocking whole test
// in case of error from RunRC.
defer wg.Done()
@ -342,7 +342,7 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation")
defer printPodAllocationPhase.End()
// Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data")
ginkgo.By("Printing Pod to Node allocation data")
podList, err := dtc.ClientSets[0].CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int)
@ -367,17 +367,17 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
}
func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
podCleanupPhase := testPhaseDurations.StartPhase(900, "latency pods deletion")
defer podCleanupPhase.End()
By("Deleting created Collections")
ginkgo.By("Deleting created Collections")
numberOfClients := len(dtc.ClientSets)
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
for i := range dtc.Configs {
name := dtc.Configs[i].GetName()
namespace := dtc.Configs[i].GetNamespace()
kind := dtc.Configs[i].GetKind()
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
ginkgo.By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
framework.ExpectNoError(err)
}
@ -414,7 +414,7 @@ var _ = SIGDescribe("Density", func() {
var etcdMetricsCollector *framework.EtcdMetricsCollector
// Gathers data prior to framework namespace teardown
AfterEach(func() {
ginkgo.AfterEach(func() {
// Stop apiserver CPU profile gatherer and gather memory allocations profile.
close(profileGathererStopCh)
wg := sync.WaitGroup{}
@ -426,7 +426,7 @@ var _ = SIGDescribe("Density", func() {
if saturationThreshold < MinSaturationThreshold {
saturationThreshold = MinSaturationThreshold
}
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
gomega.Expect(e2eStartupTime).NotTo(gomega.BeNumerically(">", saturationThreshold))
saturationData := saturationTime{
TimeToSaturate: e2eStartupTime,
NumberOfNodes: nodeCount,
@ -472,9 +472,9 @@ var _ = SIGDescribe("Density", func() {
framework.PrintSummaries(summaries, testCaseBaseName)
// Fail if there were some high-latency requests.
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
gomega.Expect(highLatencyRequests).NotTo(gomega.BeNumerically(">", 0), "There should be no high-latency requests")
// Fail if more than the allowed threshold of measurements were missing in the latencyTest.
Expect(missingMeasurements <= MaxMissingPodStartupMeasurements).To(Equal(true))
gomega.Expect(missingMeasurements <= MaxMissingPodStartupMeasurements).To(gomega.Equal(true))
})
options := framework.Options{
@ -486,7 +486,7 @@ var _ = SIGDescribe("Density", func() {
f := framework.NewFramework(testCaseBaseName, options, nil)
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
testPhaseDurations = timer.NewTestPhaseTimer()
@ -505,7 +505,7 @@ var _ = SIGDescribe("Density", func() {
_, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
// Compute node capacity, leaving some slack for addon pods.
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue() - 100
@ -611,7 +611,7 @@ var _ = SIGDescribe("Density", func() {
name += " with quotas"
}
itArg := testArg
It(name, func() {
ginkgo.It(name, func() {
nodePrepPhase := testPhaseDurations.StartPhase(100, "node preparation")
defer nodePrepPhase.End()
nodePreparer := framework.NewE2ETestNodePreparer(
@ -757,7 +757,7 @@ var _ = SIGDescribe("Density", func() {
// Pick latencyPodsIterations so that:
// latencyPodsIterations * nodeCount >= MinPodStartupMeasurements.
latencyPodsIterations := (MinPodStartupMeasurements + nodeCount - 1) / nodeCount
By(fmt.Sprintf("Scheduling additional %d Pods to measure startup latencies", latencyPodsIterations*nodeCount))
ginkgo.By(fmt.Sprintf("Scheduling additional %d Pods to measure startup latencies", latencyPodsIterations*nodeCount))
createTimes := make(map[string]metav1.Time, 0)
nodeNames := make(map[string]string, 0)
@ -769,7 +769,7 @@ var _ = SIGDescribe("Density", func() {
checkPod := func(p *v1.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
if p.Status.Phase == v1.PodRunning {
if _, found := watchTimes[p.Name]; !found {
@ -819,7 +819,7 @@ var _ = SIGDescribe("Density", func() {
if !ok {
e2elog.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
gomega.Expect(ok).To(gomega.Equal(true))
go checkPod(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
@ -827,7 +827,7 @@ var _ = SIGDescribe("Density", func() {
if !ok {
e2elog.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
gomega.Expect(ok).To(gomega.Equal(true))
go checkPod(p)
},
},
@ -872,7 +872,7 @@ var _ = SIGDescribe("Density", func() {
latencyMeasurementPhase := testPhaseDurations.StartPhase(801+latencyPodsIteration*10, "pod startup latencies measurement")
defer latencyMeasurementPhase.End()
By("Waiting for all Pods begin observed by the watch...")
ginkgo.By("Waiting for all Pods begin observed by the watch...")
waitTimeout := 10 * time.Minute
for start := time.Now(); len(watchTimes) < watchTimesLen+nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < waitTimeout {
@ -894,11 +894,11 @@ var _ = SIGDescribe("Density", func() {
}
latencyMeasurementPhase.End()
By("Removing additional replication controllers")
ginkgo.By("Removing additional replication controllers")
podDeletionPhase := testPhaseDurations.StartPhase(802+latencyPodsIteration*10, "latency pods deletion")
defer podDeletionPhase.End()
deleteRC := func(i int) {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i+1)
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
}
@ -999,7 +999,7 @@ var _ = SIGDescribe("Density", func() {
})
func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
defer wg.Done()
labels := map[string]string{
"type": podType,

View File

@ -54,8 +54,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -102,7 +102,7 @@ var _ = SIGDescribe("Load capacity", func() {
// Gathers metrics before teardown
// TODO add flag that allows to skip cleanup on failure
AfterEach(func() {
ginkgo.AfterEach(func() {
// Stop apiserver CPU profile gatherer and gather memory allocations profile.
close(profileGathererStopCh)
wg := sync.WaitGroup{}
@ -118,7 +118,7 @@ var _ = SIGDescribe("Load capacity", func() {
summaries = append(summaries, metrics)
summaries = append(summaries, testPhaseDurations)
framework.PrintSummaries(summaries, testCaseBaseName)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
gomega.Expect(highLatencyRequests).NotTo(gomega.BeNumerically(">", 0), "There should be no high-latency requests")
}
})
@ -141,14 +141,14 @@ var _ = SIGDescribe("Load capacity", func() {
f := framework.NewFramework(testCaseBaseName, options, nil)
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
ginkgo.BeforeEach(func() {
testPhaseDurations = timer.NewTestPhaseTimer()
clientset = f.ClientSet
ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(clientset)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
@ -219,7 +219,7 @@ var _ = SIGDescribe("Load capacity", func() {
itArg := testArg
itArg.services = os.Getenv("CREATE_SERVICES") != "false"
It(name, func() {
ginkgo.It(name, func() {
// Create a number of namespaces.
namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode), testPhaseDurations.StartPhase(110, "namespace creation"))
@ -240,7 +240,7 @@ var _ = SIGDescribe("Load capacity", func() {
e2elog.Logf("Creating services")
services := generateServicesForConfigs(configs)
createService := func(i int) {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
framework.ExpectNoError(testutils.CreateServiceWithRetries(clientset, services[i].Namespace, services[i]))
}
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), createService)
@ -250,7 +250,7 @@ var _ = SIGDescribe("Load capacity", func() {
defer serviceCleanupPhase.End()
e2elog.Logf("Starting to delete services...")
deleteService := func(i int) {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
framework.ExpectNoError(testutils.DeleteResourceWithRetries(clientset, api.Kind("Service"), services[i].Namespace, services[i].Name, nil))
}
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), deleteService)
@ -318,7 +318,7 @@ var _ = SIGDescribe("Load capacity", func() {
creatingTime := time.Duration(totalPods/throughput) * time.Second
createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation"))
By("============================================================================")
ginkgo.By("============================================================================")
// We would like to spread scaling replication controllers over time
// to make it possible to create/schedule & delete them in the meantime.
@ -329,7 +329,7 @@ var _ = SIGDescribe("Load capacity", func() {
scalingTime := time.Duration(totalPods/(4*throughput)) * time.Second
e2elog.Logf("Starting to scale %v objects first time...", itArg.kind)
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
By("============================================================================")
ginkgo.By("============================================================================")
// Cleanup all created replication controllers.
// Currently we assume <throughput> pods/second average deletion throughput.
@ -643,7 +643,7 @@ func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.D
}
func createResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, creatingTime time.Duration) {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
defer wg.Done()
sleepUpTo(creatingTime)
@ -663,7 +663,7 @@ func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Dur
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
// Scaling happens always based on original size, not the current size.
func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scalingTime time.Duration) {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
defer wg.Done()
sleepUpTo(scalingTime)
@ -711,7 +711,7 @@ func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.D
}
func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deletingTime time.Duration) {
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
defer wg.Done()
sleepUpTo(deletingTime)