fix kubectl e2e test to avoid alpha versions unless needed

This commit is contained in:
deads2k 2016-10-24 14:39:55 -04:00
parent 33ebe1f18b
commit fde2fedfa6

View File

@ -169,10 +169,62 @@ func runKubectlRetryOrDie(args ...string) string {
return output
}
var _ = framework.KubeDescribe("Kubectl client", func() {
// duplicated setup to avoid polluting "normal" clients with alpha features which confuses the generated clients
var _ = framework.KubeDescribe("Kubectl alpha client", func() {
defer GinkgoRecover()
f := framework.NewDefaultGroupVersionFramework("kubectl", BatchV2Alpha1GroupVersion)
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
framework.KubeDescribe("Kubectl run ScheduledJob", func() {
var nsFlag string
var sjName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
sjName = "e2e-test-echo-scheduledjob"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "scheduledjobs", sjName, nsFlag)
})
It("should create a ScheduledJob", func() {
framework.SkipIfMissingResource(f.ClientPool, ScheduledJobGroupVersionResource, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the ScheduledJob " + sjName + " was created")
sj, err := c.Batch().ScheduledJobs(ns).Get(sjName)
if err != nil {
framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err)
}
if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a ScheduledJob with correct schedule %s", schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers)
}
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure {
framework.Failf("Failed creating a ScheduledJob with correct restart policy for --restart=OnFailure")
}
})
})
})
var _ = framework.KubeDescribe("Kubectl client", func() {
defer GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
// Reustable cluster state function. This won't be adversly affected by lazy initialization of framework.
clusterState := func() *framework.ClusterVerification {
return f.NewClusterVerification(
@ -1082,43 +1134,6 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
})
})
framework.KubeDescribe("Kubectl run ScheduledJob", func() {
var nsFlag string
var sjName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
sjName = "e2e-test-echo-scheduledjob"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "scheduledjobs", sjName, nsFlag)
})
It("should create a ScheduledJob", func() {
framework.SkipIfMissingResource(f.ClientPool, ScheduledJobGroupVersionResource, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the ScheduledJob " + sjName + " was created")
sj, err := c.Batch().ScheduledJobs(ns).Get(sjName)
if err != nil {
framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err)
}
if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a ScheduledJob with correct schedule %s", schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers)
}
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure {
framework.Failf("Failed creating a ScheduledJob with correct restart policy for --restart=OnFailure")
}
})
})
framework.KubeDescribe("Kubectl run pod", func() {
var nsFlag string
var podName string