Merge pull request #77715 from danielqsj/t2

fix golint error make test error checking more readable in test/e2e/node
This commit is contained in:
Kubernetes Prow Robot 2019-05-13 19:47:09 -07:00 committed by GitHub
commit a60d2126ad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 209 additions and 209 deletions

View File

@ -604,7 +604,6 @@ test/e2e/chaosmonkey
test/e2e/common test/e2e/common
test/e2e/framework test/e2e/framework
test/e2e/lifecycle/bootstrap test/e2e/lifecycle/bootstrap
test/e2e/node
test/e2e/scalability test/e2e/scalability
test/e2e/scheduling test/e2e/scheduling
test/e2e/storage/drivers test/e2e/storage/drivers

View File

@ -21,29 +21,29 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
var _ = SIGDescribe("AppArmor", func() { var _ = SIGDescribe("AppArmor", func() {
f := framework.NewDefaultFramework("apparmor") f := framework.NewDefaultFramework("apparmor")
Context("load AppArmor profiles", func() { ginkgo.Context("load AppArmor profiles", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
common.SkipIfAppArmorNotSupported() common.SkipIfAppArmorNotSupported()
common.LoadAppArmorProfiles(f) common.LoadAppArmorProfiles(f)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
if !CurrentGinkgoTestDescription().Failed { if !ginkgo.CurrentGinkgoTestDescription().Failed {
return return
} }
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf) framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
}) })
It("should enforce an AppArmor profile", func() { ginkgo.It("should enforce an AppArmor profile", func() {
common.CreateAppArmorTestPod(f, false, true) common.CreateAppArmorTestPod(f, false, true)
}) })
It("can disable an AppArmor profile, using unconfined", func() { ginkgo.It("can disable an AppArmor profile, using unconfined", func() {
common.CreateAppArmorTestPod(f, true, true) common.CreateAppArmorTestPod(f, true, true)
}) })
}) })

View File

@ -24,22 +24,22 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
var _ = SIGDescribe("crictl", func() { var _ = SIGDescribe("crictl", func() {
f := framework.NewDefaultFramework("crictl") f := framework.NewDefaultFramework("crictl")
BeforeEach(func() { ginkgo.BeforeEach(func() {
// `crictl` is not available on all cloud providers. // `crictl` is not available on all cloud providers.
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
// The test requires $HOME/.ssh/id_rsa key to be present. // The test requires $HOME/.ssh/id_rsa key to be present.
framework.SkipUnlessSSHKeyPresent() framework.SkipUnlessSSHKeyPresent()
}) })
It("should be able to run crictl on the node", func() { ginkgo.It("should be able to run crictl on the node", func() {
// Get all nodes' external IPs. // Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses") ginkgo.By("Getting all nodes' SSH-able IP addresses")
hosts, err := e2essh.NodeSSHHosts(f.ClientSet) hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
if err != nil { if err != nil {
framework.Failf("Error getting node hostnames: %v", err) framework.Failf("Error getting node hostnames: %v", err)
@ -55,7 +55,7 @@ var _ = SIGDescribe("crictl", func() {
for _, testCase := range testCases { for _, testCase := range testCases {
// Choose an arbitrary node to test. // Choose an arbitrary node to test.
host := hosts[0] host := hosts[0]
By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd)) ginkgo.By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd))
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr) stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)

View File

@ -29,8 +29,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
var _ = SIGDescribe("Events", func() { var _ = SIGDescribe("Events", func() {
@ -45,7 +45,7 @@ var _ = SIGDescribe("Events", func() {
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
By("creating the pod") ginkgo.By("creating the pod")
name := "send-events-" + string(uuid.NewUUID()) name := "send-events-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{ pod := &v1.Pod{
@ -67,9 +67,9 @@ var _ = SIGDescribe("Events", func() {
}, },
} }
By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
defer func() { defer func() {
By("deleting the pod") ginkgo.By("deleting the pod")
podClient.Delete(pod.Name, nil) podClient.Delete(pod.Name, nil)
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
@ -78,25 +78,25 @@ var _ = SIGDescribe("Events", func() {
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes") ginkgo.By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
Expect(len(pods.Items)).To(Equal(1)) gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
By("retrieving the pod") ginkgo.By("retrieving the pod")
podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{}) podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get pod: %v", err) framework.Failf("Failed to get pod: %v", err)
} }
e2elog.Logf("%+v\n", podWithUid) e2elog.Logf("%+v\n", podWithUID)
var events *v1.EventList var events *v1.EventList
// Check for scheduler event about the pod. // Check for scheduler event about the pod.
By("checking for scheduler event about the pod") ginkgo.By("checking for scheduler event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{ selector := fields.Set{
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.uid": string(podWithUid.UID), "involvedObject.uid": string(podWithUID.UID),
"involvedObject.namespace": f.Namespace.Name, "involvedObject.namespace": f.Namespace.Name,
"source": v1.DefaultSchedulerName, "source": v1.DefaultSchedulerName,
}.AsSelector().String() }.AsSelector().String()
@ -112,10 +112,10 @@ var _ = SIGDescribe("Events", func() {
return false, nil return false, nil
})) }))
// Check for kubelet event about the pod. // Check for kubelet event about the pod.
By("checking for kubelet event about the pod") ginkgo.By("checking for kubelet event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{ selector := fields.Set{
"involvedObject.uid": string(podWithUid.UID), "involvedObject.uid": string(podWithUID.UID),
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.namespace": f.Namespace.Name, "involvedObject.namespace": f.Namespace.Name,
"source": "kubelet", "source": "kubelet",

View File

@ -18,6 +18,7 @@ package node
import "k8s.io/kubernetes/test/e2e/framework" import "k8s.io/kubernetes/test/e2e/framework"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool { func SIGDescribe(text string, body func()) bool {
return framework.KubeDescribe("[sig-node] "+text, body) return framework.KubeDescribe("[sig-node] "+text, body)
} }

View File

@ -35,8 +35,8 @@ import (
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const ( const (
@ -119,7 +119,7 @@ func stopNfsServer(serverPod *v1.Pod) {
// will execute the passed in shell cmd. Waits for the pod to start. // will execute the passed in shell cmd. Waits for the pod to start.
// Note: the nfs plugin is defined inline, no PV or PVC. // Note: the nfs plugin is defined inline, no PV or PVC.
func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod { func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod {
By("create pod using nfs volume") ginkgo.By("create pod using nfs volume")
isPrivileged := true isPrivileged := true
cmdLine := []string{"-c", cmd} cmdLine := []string{"-c", cmd}
@ -166,13 +166,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP,
}, },
} }
rtnPod, err := c.CoreV1().Pods(ns).Create(pod) rtnPod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = f.WaitForPodReady(rtnPod.Name) // running & ready err = f.WaitForPodReady(rtnPod.Name) // running & ready
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
return rtnPod return rtnPod
} }
@ -189,7 +189,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs") mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
// use ip rather than hostname in GCE // use ip rather than hostname in GCE
nodeIP, err := framework.GetHostExternalAddress(c, pod) nodeIP, err := framework.GetHostExternalAddress(c, pod)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
condMsg := "deleted" condMsg := "deleted"
if !expectClean { if !expectClean {
@ -216,7 +216,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg) e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
err = wait.Poll(poll, timeout, func() (bool, error) { err = wait.Poll(poll, timeout, func() (bool, error) {
result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider) result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2essh.LogResult(result) e2essh.LogResult(result)
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0) ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
if expectClean && ok { // keep trying if expectClean && ok { // keep trying
@ -227,7 +227,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
} }
return true, nil // done, host is as expected return true, nil // done, host is as expected
}) })
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
} }
if expectClean { if expectClean {
@ -244,7 +244,7 @@ var _ = SIGDescribe("kubelet", func() {
) )
f := framework.NewDefaultFramework("kubelet") f := framework.NewDefaultFramework("kubelet")
BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
}) })
@ -265,14 +265,14 @@ var _ = SIGDescribe("kubelet", func() {
{podsPerNode: 10, timeout: 1 * time.Minute}, {podsPerNode: 10, timeout: 1 * time.Minute},
} }
BeforeEach(func() { ginkgo.BeforeEach(func() {
// Use node labels to restrict the pods to be assigned only to the // Use node labels to restrict the pods to be assigned only to the
// nodes we observe initially. // nodes we observe initially.
nodeLabels = make(map[string]string) nodeLabels = make(map[string]string)
nodeLabels["kubelet_cleanup"] = "true" nodeLabels["kubelet_cleanup"] = "true"
nodes := framework.GetReadySchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(c)
numNodes = len(nodes.Items) numNodes = len(nodes.Items)
Expect(numNodes).NotTo(BeZero()) gomega.Expect(numNodes).NotTo(gomega.BeZero())
nodeNames = sets.NewString() nodeNames = sets.NewString()
// If there are a lot of nodes, we don't want to use all of them // If there are a lot of nodes, we don't want to use all of them
// (if there are 1000 nodes in the cluster, starting 10 pods/node // (if there are 1000 nodes in the cluster, starting 10 pods/node
@ -297,7 +297,7 @@ var _ = SIGDescribe("kubelet", func() {
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
if resourceMonitor != nil { if resourceMonitor != nil {
resourceMonitor.Stop() resourceMonitor.Stop()
} }
@ -312,30 +312,30 @@ var _ = SIGDescribe("kubelet", func() {
for _, itArg := range deleteTests { for _, itArg := range deleteTests {
name := fmt.Sprintf( name := fmt.Sprintf(
"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout) "kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
It(name, func() { ginkgo.It(name, func() {
totalPods := itArg.podsPerNode * numNodes totalPods := itArg.podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID())) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
Expect(framework.RunRC(testutils.RCConfig{ gomega.Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),
Replicas: totalPods, Replicas: totalPods,
NodeSelector: nodeLabels, NodeSelector: nodeLabels,
})).NotTo(HaveOccurred()) })).NotTo(gomega.HaveOccurred())
// Perform a sanity check so that we know all desired pods are // Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to // running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because framework.RunRC already waited for all pods to // only 30 seconds here because framework.RunRC already waited for all pods to
// transition to the running status. // transition to the running status.
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods,
time.Second*30)).NotTo(HaveOccurred()) time.Second*30)).NotTo(gomega.HaveOccurred())
if resourceMonitor != nil { if resourceMonitor != nil {
resourceMonitor.LogLatest() resourceMonitor.LogLatest()
} }
By("Deleting the RC") ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the // Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its // node. The /runningpods handler checks the container runtime (or its
@ -345,8 +345,8 @@ var _ = SIGDescribe("kubelet", func() {
// - a bug in graceful termination (if it is enabled) // - a bug in graceful termination (if it is enabled)
// - docker slow to delete pods (or resource problems causing slowness) // - docker slow to delete pods (or resource problems causing slowness)
start := time.Now() start := time.Now()
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
itArg.timeout)).NotTo(HaveOccurred()) itArg.timeout)).NotTo(gomega.HaveOccurred())
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames), e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start)) time.Since(start))
if resourceMonitor != nil { if resourceMonitor != nil {
@ -369,7 +369,7 @@ var _ = SIGDescribe("kubelet", func() {
// If the nfs-server pod is deleted the client pod's mount can not be unmounted. // If the nfs-server pod is deleted the client pod's mount can not be unmounted.
// If the nfs-server pod is deleted and re-created, due to having a different ip // If the nfs-server pod is deleted and re-created, due to having a different ip
// addr, the client pod's mount still cannot be unmounted. // addr, the client pod's mount still cannot be unmounted.
Context("Host cleanup after disrupting NFS volume [NFS]", func() { ginkgo.Context("Host cleanup after disrupting NFS volume [NFS]", func() {
// issue #31272 // issue #31272
var ( var (
nfsServerPod *v1.Pod nfsServerPod *v1.Pod
@ -389,38 +389,38 @@ var _ = SIGDescribe("kubelet", func() {
}, },
} }
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
_, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) _, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
err := framework.DeletePodWithWait(f, c, pod) err := framework.DeletePodWithWait(f, c, pod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name)
err = framework.DeletePodWithWait(f, c, nfsServerPod) err = framework.DeletePodWithWait(f, c, nfsServerPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
}) })
// execute It blocks from above table of tests // execute It blocks from above table of tests
for _, t := range testTbl { for _, t := range testTbl {
It(t.itDescr, func() { ginkgo.It(t.itDescr, func() {
pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd) pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd)
By("Stop the NFS server") ginkgo.By("Stop the NFS server")
stopNfsServer(nfsServerPod) stopNfsServer(nfsServerPod)
By("Delete the pod mounted to the NFS volume -- expect failure") ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
err := framework.DeletePodWithWait(f, c, pod) err := framework.DeletePodWithWait(f, c, pod)
Expect(err).To(HaveOccurred()) framework.ExpectError(err)
// pod object is now stale, but is intentionally not nil // pod object is now stale, but is intentionally not nil
By("Check if pod's host has been cleaned up -- expect not") ginkgo.By("Check if pod's host has been cleaned up -- expect not")
checkPodCleanup(c, pod, false) checkPodCleanup(c, pod, false)
By("Restart the nfs server") ginkgo.By("Restart the nfs server")
restartNfsServer(nfsServerPod) restartNfsServer(nfsServerPod)
By("Verify that the deleted client pod is now cleaned up") ginkgo.By("Verify that the deleted client pod is now cleaned up")
checkPodCleanup(c, pod, true) checkPodCleanup(c, pod, true)
}) })
} }

View File

@ -30,8 +30,8 @@ import (
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const ( const (
@ -66,23 +66,23 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) { expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
numNodes := nodeNames.Len() numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes totalPods := podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID())) rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))
// TODO: Use a more realistic workload // TODO: Use a more realistic workload
Expect(framework.RunRC(testutils.RCConfig{ gomega.Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),
Replicas: totalPods, Replicas: totalPods,
})).NotTo(HaveOccurred()) })).NotTo(gomega.HaveOccurred())
// Log once and flush the stats. // Log once and flush the stats.
rm.LogLatest() rm.LogLatest()
rm.Reset() rm.Reset()
By("Start monitoring resource usage") ginkgo.By("Start monitoring resource usage")
// Periodically dump the cpu summary until the deadline is met. // Periodically dump the cpu summary until the deadline is met.
// Note that without calling framework.ResourceMonitor.Reset(), the stats // Note that without calling framework.ResourceMonitor.Reset(), the stats
// would occupy increasingly more memory. This should be fine // would occupy increasingly more memory. This should be fine
@ -100,10 +100,10 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
logPodsOnNodes(f.ClientSet, nodeNames.List()) logPodsOnNodes(f.ClientSet, nodeNames.List())
} }
By("Reporting overall resource usage") ginkgo.By("Reporting overall resource usage")
logPodsOnNodes(f.ClientSet, nodeNames.List()) logPodsOnNodes(f.ClientSet, nodeNames.List())
usageSummary, err := rm.GetLatest() usageSummary, err := rm.GetLatest()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
// TODO(random-liu): Remove the original log when we migrate to new perfdash // TODO(random-liu): Remove the original log when we migrate to new perfdash
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary)) e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result // Log perf result
@ -116,7 +116,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary))) framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
verifyCPULimits(expectedCPU, cpuSummary) verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC") ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
} }
@ -197,7 +197,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
var om *framework.RuntimeOperationMonitor var om *framework.RuntimeOperationMonitor
var rm *framework.ResourceMonitor var rm *framework.ResourceMonitor
BeforeEach(func() { ginkgo.BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeNames = sets.NewString() nodeNames = sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {
@ -208,7 +208,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
rm.Start() rm.Start()
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
rm.Stop() rm.Stop()
result := om.GetLatestRuntimeOperationErrorRate() result := om.GetLatestRuntimeOperationErrorRate()
e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result)) e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
@ -260,7 +260,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
podsPerNode := itArg.podsPerNode podsPerNode := itArg.podsPerNode
name := fmt.Sprintf( name := fmt.Sprintf(
"resource tracking for %d pods per node", podsPerNode) "resource tracking for %d pods per node", podsPerNode)
It(name, func() { ginkgo.It(name, func() {
runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits) runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits)
}) })
} }
@ -271,7 +271,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
podsPerNode := density[i] podsPerNode := density[i]
name := fmt.Sprintf( name := fmt.Sprintf(
"resource tracking for %d pods per node", podsPerNode) "resource tracking for %d pods per node", podsPerNode)
It(name, func() { ginkgo.It(name, func() {
runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil) runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil)
}) })
} }

View File

@ -27,8 +27,8 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode, hostDir string) *v1.Pod { func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode, hostDir string) *v1.Pod {
@ -80,7 +80,7 @@ func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode
var _ = SIGDescribe("Mount propagation", func() { var _ = SIGDescribe("Mount propagation", func() {
f := framework.NewDefaultFramework("mount-propagation") f := framework.NewDefaultFramework("mount-propagation")
It("should propagate mounts to the host", func() { ginkgo.It("should propagate mounts to the host", func() {
// This test runs two pods: master and slave with respective mount // This test runs two pods: master and slave with respective mount
// propagation on common /var/lib/kubelet/XXXX directory. Both mount a // propagation on common /var/lib/kubelet/XXXX directory. Both mount a
// tmpfs to a subdirectory there. We check that these mounts are // tmpfs to a subdirectory there. We check that these mounts are
@ -88,13 +88,13 @@ var _ = SIGDescribe("Mount propagation", func() {
// Pick a node where all pods will run. // Pick a node where all pods will run.
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling") gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero(), "No available nodes for scheduling")
node := &nodes.Items[0] node := &nodes.Items[0]
// Fail the test if the namespace is not set. We expect that the // Fail the test if the namespace is not set. We expect that the
// namespace is unique and we might delete user data if it's not. // namespace is unique and we might delete user data if it's not.
if len(f.Namespace.Name) == 0 { if len(f.Namespace.Name) == 0 {
Expect(f.Namespace.Name).ToNot(Equal("")) gomega.Expect(f.Namespace.Name).ToNot(gomega.Equal(""))
return return
} }
@ -172,10 +172,10 @@ var _ = SIGDescribe("Mount propagation", func() {
shouldBeVisible := mounts.Has(mountName) shouldBeVisible := mounts.Has(mountName)
if shouldBeVisible { if shouldBeVisible {
framework.ExpectNoError(err, "%s: failed to run %q", msg, cmd) framework.ExpectNoError(err, "%s: failed to run %q", msg, cmd)
Expect(stdout).To(Equal(mountName), msg) gomega.Expect(stdout).To(gomega.Equal(mountName), msg)
} else { } else {
// We *expect* cat to return error here // We *expect* cat to return error here
Expect(err).To(HaveOccurred(), msg) gomega.Expect(err).To(gomega.HaveOccurred(), msg)
} }
} }
} }

View File

@ -32,8 +32,8 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
// This test checks if node-problem-detector (NPD) runs fine without error on // This test checks if node-problem-detector (NPD) runs fine without error on
@ -45,7 +45,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
) )
f := framework.NewDefaultFramework("node-problem-detector") f := framework.NewDefaultFramework("node-problem-detector")
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessSSHKeyPresent() framework.SkipUnlessSSHKeyPresent()
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
@ -53,10 +53,10 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute) framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
}) })
It("should run without error", func() { ginkgo.It("should run without error", func() {
By("Getting all nodes and their SSH-able IP addresses") ginkgo.By("Getting all nodes and their SSH-able IP addresses")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero()) gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero())
hosts := []string{} hosts := []string{}
for _, node := range nodes.Items { for _, node := range nodes.Items {
for _, addr := range node.Status.Addresses { for _, addr := range node.Status.Addresses {
@ -66,7 +66,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
} }
} }
} }
Expect(len(hosts)).To(Equal(len(nodes.Items))) gomega.Expect(len(hosts)).To(gomega.Equal(len(nodes.Items)))
isStandaloneMode := make(map[string]bool) isStandaloneMode := make(map[string]bool)
cpuUsageStats := make(map[string][]float64) cpuUsageStats := make(map[string][]float64)
@ -84,52 +84,52 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
isStandaloneMode[host] = (err == nil && result.Code == 0) isStandaloneMode[host] = (err == nil && result.Code == 0)
By(fmt.Sprintf("Check node %q has node-problem-detector process", host)) ginkgo.By(fmt.Sprintf("Check node %q has node-problem-detector process", host))
// Using brackets "[n]" is a trick to prevent grep command itself from // Using brackets "[n]" is a trick to prevent grep command itself from
// showing up, because string text "[n]ode-problem-detector" does not // showing up, because string text "[n]ode-problem-detector" does not
// match regular expression "[n]ode-problem-detector". // match regular expression "[n]ode-problem-detector".
psCmd := "ps aux | grep [n]ode-problem-detector" psCmd := "ps aux | grep [n]ode-problem-detector"
result, err = e2essh.SSH(psCmd, host, framework.TestContext.Provider) result, err = e2essh.SSH(psCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(result.Code).To(BeZero()) gomega.Expect(result.Code).To(gomega.BeZero())
Expect(result.Stdout).To(ContainSubstring("node-problem-detector")) gomega.Expect(result.Stdout).To(gomega.ContainSubstring("node-problem-detector"))
By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host)) ginkgo.By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host))
journalctlCmd := "sudo journalctl -u node-problem-detector" journalctlCmd := "sudo journalctl -u node-problem-detector"
result, err = e2essh.SSH(journalctlCmd, host, framework.TestContext.Provider) result, err = e2essh.SSH(journalctlCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(result.Code).To(BeZero()) gomega.Expect(result.Code).To(gomega.BeZero())
Expect(result.Stdout).NotTo(ContainSubstring("node-problem-detector.service: Failed")) gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed"))
if isStandaloneMode[host] { if isStandaloneMode[host] {
cpuUsage, uptime := getCpuStat(f, host) cpuUsage, uptime := getCPUStat(f, host)
cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage)
uptimeStats[host] = append(uptimeStats[host], uptime) uptimeStats[host] = append(uptimeStats[host], uptime)
} }
By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host)) ginkgo.By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host))
log := "INFO: task umount.aufs:21568 blocked for more than 120 seconds." log := "INFO: task umount.aufs:21568 blocked for more than 120 seconds."
injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\"" injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\""
_, err = e2essh.SSH(injectLogCmd, host, framework.TestContext.Provider) _, err = e2essh.SSH(injectLogCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(result.Code).To(BeZero()) gomega.Expect(result.Code).To(gomega.BeZero())
} }
By("Check node-problem-detector can post conditions and events to API server") ginkgo.By("Check node-problem-detector can post conditions and events to API server")
for _, node := range nodes.Items { for _, node := range nodes.Items {
By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name)) ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name))
Eventually(func() error { gomega.Eventually(func() error {
return verifyNodeCondition(f, "KernelDeadlock", v1.ConditionTrue, "AUFSUmountHung", node.Name) return verifyNodeCondition(f, "KernelDeadlock", v1.ConditionTrue, "AUFSUmountHung", node.Name)
}, pollTimeout, pollInterval).Should(Succeed()) }, pollTimeout, pollInterval).Should(gomega.Succeed())
By(fmt.Sprintf("Check node-problem-detector posted AUFSUmountHung event on node %q", node.Name)) ginkgo.By(fmt.Sprintf("Check node-problem-detector posted AUFSUmountHung event on node %q", node.Name))
eventListOptions := metav1.ListOptions{FieldSelector: fields.Set{"involvedObject.kind": "Node"}.AsSelector().String()} eventListOptions := metav1.ListOptions{FieldSelector: fields.Set{"involvedObject.kind": "Node"}.AsSelector().String()}
Eventually(func() error { gomega.Eventually(func() error {
return verifyEvents(f, eventListOptions, 1, "AUFSUmountHung", node.Name) return verifyEvents(f, eventListOptions, 1, "AUFSUmountHung", node.Name)
}, pollTimeout, pollInterval).Should(Succeed()) }, pollTimeout, pollInterval).Should(gomega.Succeed())
} }
By("Gather node-problem-detector cpu and memory stats") ginkgo.By("Gather node-problem-detector cpu and memory stats")
numIterations := 60 numIterations := 60
for i := 1; i <= numIterations; i++ { for i := 1; i <= numIterations; i++ {
for j, host := range hosts { for j, host := range hosts {
@ -138,7 +138,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
rssStats[host] = append(rssStats[host], rss) rssStats[host] = append(rssStats[host], rss)
workingSetStats[host] = append(workingSetStats[host], workingSet) workingSetStats[host] = append(workingSetStats[host], workingSet)
if i == numIterations { if i == numIterations {
cpuUsage, uptime := getCpuStat(f, host) cpuUsage, uptime := getCPUStat(f, host)
cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage)
uptimeStats[host] = append(uptimeStats[host], uptime) uptimeStats[host] = append(uptimeStats[host], uptime)
} }
@ -217,22 +217,22 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64
memCmd := "cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.usage_in_bytes && cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.stat" memCmd := "cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.usage_in_bytes && cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.stat"
result, err := e2essh.SSH(memCmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(memCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(result.Code).To(BeZero()) gomega.Expect(result.Code).To(gomega.BeZero())
lines := strings.Split(result.Stdout, "\n") lines := strings.Split(result.Stdout, "\n")
memoryUsage, err := strconv.ParseFloat(lines[0], 64) memoryUsage, err := strconv.ParseFloat(lines[0], 64)
Expect(err).To(BeNil()) gomega.Expect(err).To(gomega.BeNil())
var totalInactiveFile float64 var totalInactiveFile float64
for _, line := range lines[1:] { for _, line := range lines[1:] {
tokens := strings.Split(line, " ") tokens := strings.Split(line, " ")
if tokens[0] == "total_rss" { if tokens[0] == "total_rss" {
rss, err = strconv.ParseFloat(tokens[1], 64) rss, err = strconv.ParseFloat(tokens[1], 64)
Expect(err).To(BeNil()) gomega.Expect(err).To(gomega.BeNil())
} }
if tokens[0] == "total_inactive_file" { if tokens[0] == "total_inactive_file" {
totalInactiveFile, err = strconv.ParseFloat(tokens[1], 64) totalInactiveFile, err = strconv.ParseFloat(tokens[1], 64)
Expect(err).To(BeNil()) gomega.Expect(err).To(gomega.BeNil())
} }
} }
@ -249,11 +249,11 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64
return return
} }
func getCpuStat(f *framework.Framework, host string) (usage, uptime float64) { func getCPUStat(f *framework.Framework, host string) (usage, uptime float64) {
cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'" cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'"
result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(result.Code).To(BeZero()) gomega.Expect(result.Code).To(gomega.BeZero())
lines := strings.Split(result.Stdout, "\n") lines := strings.Split(result.Stdout, "\n")
usage, err = strconv.ParseFloat(lines[0], 64) usage, err = strconv.ParseFloat(lines[0], 64)
@ -279,6 +279,6 @@ func getNpdPodStat(f *framework.Framework, nodeName string) (cpuUsage, rss, work
hasNpdPod = true hasNpdPod = true
break break
} }
Expect(hasNpdPod).To(BeTrue()) gomega.Expect(hasNpdPod).To(gomega.BeTrue())
return return
} }

View File

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -36,7 +36,7 @@ import (
// Slow by design (7 min) // Slow by design (7 min)
var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]", func() { var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]", func() {
f := framework.NewDefaultFramework("pod-garbage-collector") f := framework.NewDefaultFramework("pod-garbage-collector")
It("should handle the creation of 1000 pods", func() { ginkgo.It("should handle the creation of 1000 pods", func() {
var count int var count int
for count < 1000 { for count < 1000 {
pod, err := createTerminatingPod(f) pod, err := createTerminatingPod(f)
@ -62,7 +62,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]"
timeout := 2 * time.Minute timeout := 2 * time.Minute
gcThreshold := 100 gcThreshold := 100
By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) ginkgo.By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold))
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil { if err != nil {

View File

@ -34,8 +34,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -44,7 +44,7 @@ var _ = SIGDescribe("Pods Extended", func() {
framework.KubeDescribe("Delete Grace Period", func() { framework.KubeDescribe("Delete Grace Period", func() {
var podClient *framework.PodClient var podClient *framework.PodClient
BeforeEach(func() { ginkgo.BeforeEach(func() {
podClient = f.PodClient() podClient = f.PodClient()
}) })
@ -54,7 +54,7 @@ var _ = SIGDescribe("Pods Extended", func() {
Description: Create a pod, make sure it is running. Create a 'kubectl local proxy', capture the port the proxy is listening. Using the http client send a delete with gracePeriodSeconds=30. Pod SHOULD get deleted within 30 seconds. Description: Create a pod, make sure it is running. Create a 'kubectl local proxy', capture the port the proxy is listening. Using the http client send a delete with gracePeriodSeconds=30. Pod SHOULD get deleted within 30 seconds.
*/ */
framework.ConformanceIt("should be submitted and removed", func() { framework.ConformanceIt("should be submitted and removed", func() {
By("creating the pod") ginkgo.By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID()) name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{ pod := &v1.Pod{
@ -75,51 +75,51 @@ var _ = SIGDescribe("Pods Extended", func() {
}, },
} }
By("setting up selector") ginkgo.By("setting up selector")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(0)) gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{ options = metav1.ListOptions{
LabelSelector: selector.String(), LabelSelector: selector.String(),
ResourceVersion: pods.ListMeta.ResourceVersion, ResourceVersion: pods.ListMeta.ResourceVersion,
} }
By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod) podClient.Create(pod)
By("verifying the pod is in kubernetes") ginkgo.By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(1)) gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
// We need to wait for the pod to be running, otherwise the deletion // We need to wait for the pod to be running, otherwise the deletion
// may be carried out immediately rather than gracefully. // may be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod // save the running pod
pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
// start local proxy, so we can send graceful deletion over query string, rather than body parameter // start local proxy, so we can send graceful deletion over query string, rather than body parameter
cmd := framework.KubectlCmd("proxy", "-p", "0") cmd := framework.KubectlCmd("proxy", "-p", "0")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to start up proxy") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to start up proxy")
defer stdout.Close() defer stdout.Close()
defer stderr.Close() defer stderr.Close()
defer framework.TryKill(cmd) defer framework.TryKill(cmd)
buf := make([]byte, 128) buf := make([]byte, 128)
var n int var n int
n, err = stdout.Read(buf) n, err = stdout.Read(buf)
Expect(err).NotTo(HaveOccurred(), "failed to read from kubectl proxy stdout") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from kubectl proxy stdout")
output := string(buf[:n]) output := string(buf[:n])
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)") proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
match := proxyRegexp.FindStringSubmatch(output) match := proxyRegexp.FindStringSubmatch(output)
Expect(len(match)).To(Equal(2)) gomega.Expect(len(match)).To(gomega.Equal(2))
port, err := strconv.Atoi(match[1]) port, err := strconv.Atoi(match[1])
Expect(err).NotTo(HaveOccurred(), "failed to convert port into string") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to convert port into string")
endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name) endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name)
tr := &http.Transport{ tr := &http.Transport{
@ -127,21 +127,21 @@ var _ = SIGDescribe("Pods Extended", func() {
} }
client := &http.Client{Transport: tr} client := &http.Client{Transport: tr}
req, err := http.NewRequest("DELETE", endpoint, nil) req, err := http.NewRequest("DELETE", endpoint, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create http request") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create http request")
By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
rsp, err := client.Do(req) rsp, err := client.Do(req)
Expect(err).NotTo(HaveOccurred(), "failed to use http client to send delete") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to use http client to send delete")
Expect(rsp.StatusCode).Should(Equal(http.StatusOK), "failed to delete gracefully by client request") gomega.Expect(rsp.StatusCode).Should(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request")
var lastPod v1.Pod var lastPod v1.Pod
err = json.NewDecoder(rsp.Body).Decode(&lastPod) err = json.NewDecoder(rsp.Body).Decode(&lastPod)
Expect(err).NotTo(HaveOccurred(), "failed to decode graceful termination proxy response") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to decode graceful termination proxy response")
defer rsp.Body.Close() defer rsp.Body.Close()
By("verifying the kubelet observed the termination notice") ginkgo.By("verifying the kubelet observed the termination notice")
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName) podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
if err != nil { if err != nil {
e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
@ -159,23 +159,23 @@ var _ = SIGDescribe("Pods Extended", func() {
} }
e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed") e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
return true, nil return true, nil
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice") })).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
Expect(lastPod.DeletionTimestamp).ToNot(BeNil()) gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero()) gomega.Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(gomega.BeZero())
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0)) gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
}) })
}) })
framework.KubeDescribe("Pods Set QOS Class", func() { framework.KubeDescribe("Pods Set QOS Class", func() {
var podClient *framework.PodClient var podClient *framework.PodClient
BeforeEach(func() { ginkgo.BeforeEach(func() {
podClient = f.PodClient() podClient = f.PodClient()
}) })
/* /*
@ -184,7 +184,7 @@ var _ = SIGDescribe("Pods Extended", func() {
Description: Create a Pod with CPU and Memory request and limits. Pos status MUST have QOSClass set to PodQOSGuaranteed. Description: Create a Pod with CPU and Memory request and limits. Pos status MUST have QOSClass set to PodQOSGuaranteed.
*/ */
framework.ConformanceIt("should be submitted and removed ", func() { framework.ConformanceIt("should be submitted and removed ", func() {
By("creating the pod") ginkgo.By("creating the pod")
name := "pod-qos-class-" + string(uuid.NewUUID()) name := "pod-qos-class-" + string(uuid.NewUUID())
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -213,13 +213,13 @@ var _ = SIGDescribe("Pods Extended", func() {
}, },
} }
By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod) podClient.Create(pod)
By("verifying QOS class is set on the pod") ginkgo.By("verifying QOS class is set on the pod")
pod, err := podClient.Get(name, metav1.GetOptions{}) pod, err := podClient.Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to query for pod") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed) gomega.Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed)
}) })
}) })
}) })

View File

@ -32,11 +32,11 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
// partially cloned from webserver.go // State partially cloned from webserver.go
type State struct { type State struct {
Received map[string]int Received map[string]int
} }
@ -57,17 +57,17 @@ func testPreStop(c clientset.Interface, ns string) {
}, },
}, },
} }
By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) ginkgo.By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns))
podDescr, err := c.CoreV1().Pods(ns).Create(podDescr) podDescr, err := c.CoreV1().Pods(ns).Create(podDescr)
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { defer func() {
By("Deleting the server pod") ginkgo.By("Deleting the server pod")
c.CoreV1().Pods(ns).Delete(podDescr.Name, nil) c.CoreV1().Pods(ns).Delete(podDescr.Name, nil)
}() }()
By("Waiting for pods to come up.") ginkgo.By("Waiting for pods to come up.")
err = framework.WaitForPodRunningInNamespace(c, podDescr) err = framework.WaitForPodRunningInNamespace(c, podDescr)
framework.ExpectNoError(err, "waiting for server pod to start") framework.ExpectNoError(err, "waiting for server pod to start")
@ -100,7 +100,7 @@ func testPreStop(c clientset.Interface, ns string) {
}, },
} }
By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) ginkgo.By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns))
preStopDescr, err = c.CoreV1().Pods(ns).Create(preStopDescr) preStopDescr, err = c.CoreV1().Pods(ns).Create(preStopDescr)
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name))
deletePreStop := true deletePreStop := true
@ -108,7 +108,7 @@ func testPreStop(c clientset.Interface, ns string) {
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { defer func() {
if deletePreStop { if deletePreStop {
By("Deleting the tester pod") ginkgo.By("Deleting the tester pod")
c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil) c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil)
} }
}() }()
@ -117,7 +117,7 @@ func testPreStop(c clientset.Interface, ns string) {
framework.ExpectNoError(err, "waiting for tester pod to start") framework.ExpectNoError(err, "waiting for tester pod to start")
// Delete the pod with the preStop handler. // Delete the pod with the preStop handler.
By("Deleting pre-stop pod") ginkgo.By("Deleting pre-stop pod")
if err := c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil); err == nil { if err := c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil); err == nil {
deletePreStop = false deletePreStop = false
} }
@ -144,7 +144,7 @@ func testPreStop(c clientset.Interface, ns string) {
framework.Failf("Error validating prestop: %v", err) framework.Failf("Error validating prestop: %v", err)
return true, err return true, err
} }
By(fmt.Sprintf("Error validating prestop: %v", err)) ginkgo.By(fmt.Sprintf("Error validating prestop: %v", err))
} else { } else {
e2elog.Logf("Saw: %s", string(body)) e2elog.Logf("Saw: %s", string(body))
state := State{} state := State{}
@ -165,7 +165,7 @@ func testPreStop(c clientset.Interface, ns string) {
var _ = SIGDescribe("PreStop", func() { var _ = SIGDescribe("PreStop", func() {
f := framework.NewDefaultFramework("prestop") f := framework.NewDefaultFramework("prestop")
var podClient *framework.PodClient var podClient *framework.PodClient
BeforeEach(func() { ginkgo.BeforeEach(func() {
podClient = f.PodClient() podClient = f.PodClient()
}) })
@ -178,36 +178,36 @@ var _ = SIGDescribe("PreStop", func() {
testPreStop(f.ClientSet, f.Namespace.Name) testPreStop(f.ClientSet, f.Namespace.Name)
}) })
It("graceful pod terminated should wait until preStop hook completes the process", func() { ginkgo.It("graceful pod terminated should wait until preStop hook completes the process", func() {
gracefulTerminationPeriodSeconds := int64(30) gracefulTerminationPeriodSeconds := int64(30)
By("creating the pod") ginkgo.By("creating the pod")
name := "pod-prestop-hook-" + string(uuid.NewUUID()) name := "pod-prestop-hook-" + string(uuid.NewUUID())
pod := getPodWithpreStopLifeCycle(name) pod := getPodWithpreStopLifeCycle(name)
By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod) podClient.Create(pod)
By("waiting for pod running") ginkgo.By("waiting for pod running")
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
var err error var err error
pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds)) err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds))
Expect(err).NotTo(HaveOccurred(), "failed to delete pod") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod")
//wait up to graceful termination period seconds //wait up to graceful termination period seconds
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
By("verifying the pod running state after graceful termination") ginkgo.By("verifying the pod running state after graceful termination")
result := &v1.PodList{} result := &v1.PodList{}
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
client, err := framework.NodeProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort) client, err := framework.NodeProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort)
Expect(err).NotTo(HaveOccurred(), "failed to get the pods of the node") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get the pods of the node")
err = client.Into(result) err = client.Into(result)
Expect(err).NotTo(HaveOccurred(), "failed to parse the pods of the node") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to parse the pods of the node")
for _, kubeletPod := range result.Items { for _, kubeletPod := range result.Items {
if pod.Name != kubeletPod.Name { if pod.Name != kubeletPod.Name {

View File

@ -31,8 +31,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
func scTestPod(hostIPC bool, hostPID bool) *v1.Pod { func scTestPod(hostIPC bool, hostPID bool) *v1.Pod {
@ -63,7 +63,7 @@ func scTestPod(hostIPC bool, hostPID bool) *v1.Pod {
var _ = SIGDescribe("Security Context", func() { var _ = SIGDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context") f := framework.NewDefaultFramework("security-context")
It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func() { ginkgo.It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func() {
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"id", "-G"} pod.Spec.Containers[0].Command = []string{"id", "-G"}
pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678} pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678}
@ -71,7 +71,7 @@ var _ = SIGDescribe("Security Context", func() {
f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups)
}) })
It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() { ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() {
pod := scTestPod(false, false) pod := scTestPod(false, false)
userID := int64(1001) userID := int64(1001)
pod.Spec.SecurityContext.RunAsUser = &userID pod.Spec.SecurityContext.RunAsUser = &userID
@ -83,7 +83,7 @@ var _ = SIGDescribe("Security Context", func() {
}) })
}) })
It("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly]", func() { ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly]", func() {
pod := scTestPod(false, false) pod := scTestPod(false, false)
userID := int64(1001) userID := int64(1001)
groupID := int64(2002) groupID := int64(2002)
@ -97,7 +97,7 @@ var _ = SIGDescribe("Security Context", func() {
}) })
}) })
It("should support container.SecurityContext.RunAsUser [LinuxOnly]", func() { ginkgo.It("should support container.SecurityContext.RunAsUser [LinuxOnly]", func() {
pod := scTestPod(false, false) pod := scTestPod(false, false)
userID := int64(1001) userID := int64(1001)
overrideUserID := int64(1002) overrideUserID := int64(1002)
@ -112,7 +112,7 @@ var _ = SIGDescribe("Security Context", func() {
}) })
}) })
It("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly]", func() { ginkgo.It("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly]", func() {
pod := scTestPod(false, false) pod := scTestPod(false, false)
userID := int64(1001) userID := int64(1001)
groupID := int64(2001) groupID := int64(2001)
@ -131,19 +131,19 @@ var _ = SIGDescribe("Security Context", func() {
}) })
}) })
It("should support volume SELinux relabeling [Flaky] [LinuxOnly]", func() { ginkgo.It("should support volume SELinux relabeling [Flaky] [LinuxOnly]", func() {
testPodSELinuxLabeling(f, false, false) testPodSELinuxLabeling(f, false, false)
}) })
It("should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]", func() { ginkgo.It("should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]", func() {
testPodSELinuxLabeling(f, true, false) testPodSELinuxLabeling(f, true, false)
}) })
It("should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]", func() { ginkgo.It("should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]", func() {
testPodSELinuxLabeling(f, false, true) testPodSELinuxLabeling(f, false, true)
}) })
It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp] [LinuxOnly]", func() { ginkgo.It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp] [LinuxOnly]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha // TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined" pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined"
@ -152,7 +152,7 @@ var _ = SIGDescribe("Security Context", func() {
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
}) })
It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp] [LinuxOnly]", func() { ginkgo.It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp] [LinuxOnly]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha // TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Annotations[v1.SeccompPodAnnotationKey] = "unconfined" pod.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
@ -160,7 +160,7 @@ var _ = SIGDescribe("Security Context", func() {
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
}) })
It("should support seccomp alpha runtime/default annotation [Feature:Seccomp] [LinuxOnly]", func() { ginkgo.It("should support seccomp alpha runtime/default annotation [Feature:Seccomp] [LinuxOnly]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha // TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = v1.SeccompProfileRuntimeDefault pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = v1.SeccompProfileRuntimeDefault
@ -168,7 +168,7 @@ var _ = SIGDescribe("Security Context", func() {
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered
}) })
It("should support seccomp default which is unconfined [Feature:Seccomp] [LinuxOnly]", func() { ginkgo.It("should support seccomp default which is unconfined [Feature:Seccomp] [LinuxOnly]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha // TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
@ -212,18 +212,18 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
testContent := "hello" testContent := "hello"
testFilePath := mountPath + "/TEST" testFilePath := mountPath + "/TEST"
err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent) err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent)
Expect(err).To(BeNil()) gomega.Expect(err).To(gomega.BeNil())
content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath) content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath)
Expect(err).To(BeNil()) gomega.Expect(err).To(gomega.BeNil())
Expect(content).To(ContainSubstring(testContent)) gomega.Expect(content).To(gomega.ContainSubstring(testContent))
foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Confirm that the file can be accessed from a second // Confirm that the file can be accessed from a second
// pod using host_path with the same MCS label // pod using host_path with the same MCS label
volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", framework.TestContext.KubeVolumeDir, foundPod.UID, volumeName) volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", framework.TestContext.KubeVolumeDir, foundPod.UID, volumeName)
By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir)) ginkgo.By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir))
pod = scTestPod(hostIPC, hostPID) pod = scTestPod(hostIPC, hostPID)
pod.Spec.NodeName = foundPod.Spec.NodeName pod.Spec.NodeName = foundPod.Spec.NodeName
volumeMounts := []v1.VolumeMount{ volumeMounts := []v1.VolumeMount{
@ -266,5 +266,5 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
framework.ExpectNoError(err, "Error waiting for pod to run %v", pod) framework.ExpectNoError(err, "Error waiting for pod to run %v", pod)
content, err = f.ReadFileViaContainer(pod.Name, "test-container", testFilePath) content, err = f.ReadFileViaContainer(pod.Name, "test-container", testFilePath)
Expect(content).NotTo(ContainSubstring(testContent)) gomega.Expect(content).NotTo(gomega.ContainSubstring(testContent))
} }

View File

@ -24,7 +24,7 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
const maxNodes = 100 const maxNodes = 100
@ -33,7 +33,7 @@ var _ = SIGDescribe("SSH", func() {
f := framework.NewDefaultFramework("ssh") f := framework.NewDefaultFramework("ssh")
BeforeEach(func() { ginkgo.BeforeEach(func() {
// When adding more providers here, also implement their functionality in e2essh.GetSigner(...). // When adding more providers here, also implement their functionality in e2essh.GetSigner(...).
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
@ -42,9 +42,9 @@ var _ = SIGDescribe("SSH", func() {
framework.SkipUnlessSSHKeyPresent() framework.SkipUnlessSSHKeyPresent()
}) })
It("should SSH to all nodes and run commands", func() { ginkgo.It("should SSH to all nodes and run commands", func() {
// Get all nodes' external IPs. // Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses") ginkgo.By("Getting all nodes' SSH-able IP addresses")
hosts, err := e2essh.NodeSSHHosts(f.ClientSet) hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
if err != nil { if err != nil {
framework.Failf("Error getting node hostnames: %v", err) framework.Failf("Error getting node hostnames: %v", err)
@ -76,7 +76,7 @@ var _ = SIGDescribe("SSH", func() {
nodes = maxNodes nodes = maxNodes
} }
testhosts := hosts[:nodes] testhosts := hosts[:nodes]
By(fmt.Sprintf("SSH'ing to %d nodes and running %s", len(testhosts), testCase.cmd)) ginkgo.By(fmt.Sprintf("SSH'ing to %d nodes and running %s", len(testhosts), testCase.cmd))
for _, host := range testhosts { for _, host := range testhosts {
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
@ -104,7 +104,7 @@ var _ = SIGDescribe("SSH", func() {
} }
// Quickly test that SSH itself errors correctly. // Quickly test that SSH itself errors correctly.
By("SSH'ing to a nonexistent host") ginkgo.By("SSH'ing to a nonexistent host")
if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil { if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil {
framework.Failf("Expected error trying to SSH to nonexistent host.") framework.Failf("Expected error trying to SSH to nonexistent host.")
} }

View File

@ -27,8 +27,8 @@ import (
jobutil "k8s.io/kubernetes/test/e2e/framework/job" jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const dummyFinalizer = "k8s.io/dummy-finalizer" const dummyFinalizer = "k8s.io/dummy-finalizer"
@ -36,7 +36,7 @@ const dummyFinalizer = "k8s.io/dummy-finalizer"
var _ = framework.KubeDescribe("[Feature:TTLAfterFinished][NodeAlphaFeature:TTLAfterFinished]", func() { var _ = framework.KubeDescribe("[Feature:TTLAfterFinished][NodeAlphaFeature:TTLAfterFinished]", func() {
f := framework.NewDefaultFramework("ttlafterfinished") f := framework.NewDefaultFramework("ttlafterfinished")
It("job should be deleted once it finishes after TTL seconds", func() { ginkgo.It("job should be deleted once it finishes after TTL seconds", func() {
testFinishedJob(f) testFinishedJob(f)
}) })
}) })
@ -50,11 +50,11 @@ func cleanupJob(f *framework.Framework, job *batch.Job) {
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil) j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
} }
_, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc) _, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout) jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
err = jobutil.WaitForAllJobPodsGone(c, ns, job.Name) err = jobutil.WaitForAllJobPodsGone(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
func testFinishedJob(f *framework.Framework) { func testFinishedJob(f *framework.Framework) {
@ -73,26 +73,26 @@ func testFinishedJob(f *framework.Framework) {
e2elog.Logf("Create a Job %s/%s with TTL", ns, job.Name) e2elog.Logf("Create a Job %s/%s with TTL", ns, job.Name)
job, err := jobutil.CreateJob(c, ns, job) job, err := jobutil.CreateJob(c, ns, job)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Wait for the Job to finish") e2elog.Logf("Wait for the Job to finish")
err = jobutil.WaitForJobFinish(c, ns, job.Name) err = jobutil.WaitForJobFinish(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Wait for TTL after finished controller to delete the Job") e2elog.Logf("Wait for TTL after finished controller to delete the Job")
err = jobutil.WaitForJobDeleting(c, ns, job.Name) err = jobutil.WaitForJobDeleting(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished") e2elog.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
job, err = jobutil.GetJob(c, ns, job.Name) job, err = jobutil.GetJob(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
finishTime := jobutil.FinishTime(job) finishTime := jobutil.FinishTime(job)
finishTimeUTC := finishTime.UTC() finishTimeUTC := finishTime.UTC()
Expect(finishTime.IsZero()).NotTo(BeTrue()) gomega.Expect(finishTime.IsZero()).NotTo(gomega.BeTrue())
deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC() deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC()
Expect(deleteAtUTC).NotTo(BeNil()) gomega.Expect(deleteAtUTC).NotTo(gomega.BeNil())
expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second) expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second)
Expect(deleteAtUTC.Before(expireAtUTC)).To(BeFalse()) gomega.Expect(deleteAtUTC.Before(expireAtUTC)).To(gomega.BeFalse())
} }