Merge pull request #35697 from shashidharatd/upgrade-tests

Automatic merge from submit-queue

Add more test cases to k8s e2e upgrade tests

**Special notes for your reviewer**: 
Added guestbook, secrets, daemonsets, configmaps, jobs to e2e upgrade tests according to the discussions in #35078
Still need to run these test cases in real setup, raised a PR here for initial comments

@quinton-hoole
This commit is contained in:
Kubernetes Submit Queue 2016-11-17 04:07:26 -08:00 committed by GitHub
commit f7f1533a3b
6 changed files with 489 additions and 179 deletions

View File

@ -25,20 +25,20 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
// TODO(mikedanese): Add setup, validate, and teardown for:
// - secrets
// - volumes
// - persistent volumes
var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
f := framework.NewDefaultFramework("cluster-upgrade")
framework.KubeDescribe("master upgrade", func() {
It("should maintain responsive services [Feature:MasterUpgrade]", func() {
It("should maintain functioning cluster during upgrade [Feature:MasterUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
@ -48,6 +48,11 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceRemainsUp(f, sem)
testSecretsDuringUpgrade(f, sem)
testConfigMapsDuringUpgrade(f, sem)
testGuestbookApplicationDuringUpgrade(f, sem)
testDaemonSetDuringUpgrade(f, sem)
testJobsDuringUpgrade(f, sem)
})
cm.Do()
})
@ -64,11 +69,16 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceUpBeforeAndAfter(f, sem)
testSecretsBeforeAndAfterUpgrade(f, sem)
testConfigMapsBeforeAndAfterUpgrade(f, sem)
testGuestbookApplicationBeforeAndAfterUpgrade(f, sem)
testDaemonSetBeforeAndAfterUpgrade(f, sem)
testJobsBeforeAndAfterUpgrade(f, sem)
})
cm.Do()
})
It("should maintain responsive services [Feature:ExperimentalNodeUpgrade]", func() {
It("should maintain functioning cluster during upgrade [Feature:ExperimentalNodeUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
@ -78,6 +88,11 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceRemainsUp(f, sem)
testSecretsDuringUpgrade(f, sem)
testConfigMapsDuringUpgrade(f, sem)
testGuestbookApplicationDuringUpgrade(f, sem)
testDaemonSetDuringUpgrade(f, sem)
testJobsDuringUpgrade(f, sem)
})
cm.Do()
})
@ -96,11 +111,16 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceUpBeforeAndAfter(f, sem)
testSecretsBeforeAndAfterUpgrade(f, sem)
testConfigMapsBeforeAndAfterUpgrade(f, sem)
testGuestbookApplicationBeforeAndAfterUpgrade(f, sem)
testDaemonSetBeforeAndAfterUpgrade(f, sem)
testJobsBeforeAndAfterUpgrade(f, sem)
})
cm.Do()
})
It("should maintain responsive services [Feature:ExperimentalClusterUpgrade]", func() {
It("should maintain functioning cluster during upgrade [Feature:ExperimentalClusterUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
@ -112,6 +132,11 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceRemainsUp(f, sem)
testSecretsDuringUpgrade(f, sem)
testConfigMapsDuringUpgrade(f, sem)
testGuestbookApplicationDuringUpgrade(f, sem)
testDaemonSetDuringUpgrade(f, sem)
testJobsDuringUpgrade(f, sem)
})
cm.Do()
})
@ -228,3 +253,200 @@ func checkNodesVersions(cs clientset.Interface, want string) error {
}
return nil
}
func testSecretsBeforeAndAfterUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testSecrets(f, sem, false)
}
func testSecretsDuringUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testSecrets(f, sem, true)
}
func testSecrets(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringDisruption bool) {
// Setup
pod, expectedOutput := common.DoSecretE2EMultipleVolumesSetup(f)
// Validate
By("consume secret before upgrade")
common.DoSecretE2EMultipleVolumesValidate(f, pod, expectedOutput)
sem.Ready()
if testDuringDisruption {
// Continuously validate
wait.Until(func() {
By("consume secret during upgrade")
common.DoSecretE2EMultipleVolumesValidate(f, pod, expectedOutput)
}, framework.Poll, sem.StopCh)
} else {
// Block until chaosmonkey is done
By("waiting for upgrade to finish without consuming secrets")
<-sem.StopCh
}
// Validate after upgrade
By("consume secret after upgrade")
common.DoSecretE2EMultipleVolumesValidate(f, pod, expectedOutput)
// Teardown
}
func testConfigMapsBeforeAndAfterUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testConfigMaps(f, sem, false)
}
func testConfigMapsDuringUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testConfigMaps(f, sem, true)
}
func testConfigMaps(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringDisruption bool) {
// Setup
pod, expectedOutput := common.DoConfigMapE2EWithoutMappingsSetup(f, 0, 0, nil)
// Validate
By("consume config-maps before upgrade")
common.DoConfigMapE2EWithoutMappingsValidate(f, pod, expectedOutput)
sem.Ready()
if testDuringDisruption {
// Continuously validate
wait.Until(func() {
By("consume config-maps during upgrade")
common.DoConfigMapE2EWithoutMappingsValidate(f, pod, expectedOutput)
}, framework.Poll, sem.StopCh)
} else {
// Block until chaosmonkey is done
By("waiting for upgrade to finish without consuming config-maps")
<-sem.StopCh
}
// Validate after upgrade
By("consume config-maps after upgrade")
common.DoConfigMapE2EWithoutMappingsValidate(f, pod, expectedOutput)
// Teardown
}
func testGuestbookApplicationBeforeAndAfterUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testGuestbookApplication(f, sem, false)
}
func testGuestbookApplicationDuringUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testGuestbookApplication(f, sem, true)
}
func testGuestbookApplication(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringDisruption bool) {
// Setup
By("setup guestbook app")
GuestbookApplicationSetup(f.ClientSet, f.Namespace.Name)
// Validate
By("validate guestbook app before upgrade")
GuestbookApplicationValidate(f.ClientSet, f.Namespace.Name)
sem.Ready()
if testDuringDisruption {
// Continuously validate
wait.Until(func() {
By("validate guestbook app during upgrade")
GuestbookApplicationValidate(f.ClientSet, f.Namespace.Name)
}, framework.Poll, sem.StopCh)
} else {
// Block until chaosmonkey is done
By("waiting for upgrade to finish without validating guestbook app")
<-sem.StopCh
}
// Validate after upgrade
By("validate guestbook app after upgrade")
GuestbookApplicationValidate(f.ClientSet, f.Namespace.Name)
// Teardown
By("teardown guestbook app")
GuestbookApplicationTeardown(f.ClientSet, f.Namespace.Name)
}
func testDaemonSetBeforeAndAfterUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testDaemonSet(f, sem, false)
}
func testDaemonSetDuringUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testDaemonSet(f, sem, true)
}
func testDaemonSet(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringDisruption bool) {
image := "gcr.io/google_containers/serve_hostname:v1.4"
dsName := "daemon-set"
// Setup
By("setup daemonset")
complexLabel, nodeSelector := TestDaemonSetWithNodeAffinitySetup(f, dsName, image)
// Validate
By("validate daemonset before upgrade")
TestDaemonSetWithNodeAffinityValidate(f, dsName, complexLabel, nodeSelector)
sem.Ready()
if testDuringDisruption {
// Continuously validate
wait.Until(func() {
By("validate daemonset during upgrade")
TestDaemonSetWithNodeAffinityValidate(f, dsName, complexLabel, nodeSelector)
}, framework.Poll, sem.StopCh)
} else {
// Block until chaosmonkey is done
By("waiting for upgrade to finish without validating daemonset")
<-sem.StopCh
}
// Validate after upgrade
By("validate daemonset after upgrade")
TestDaemonSetWithNodeAffinityValidate(f, dsName, complexLabel, nodeSelector)
// Teardown
By("teardown daemonset")
TestDaemonSetWithNodeAffinityTeardown(f, dsName)
}
func testJobsBeforeAndAfterUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testJobs(f, sem, false)
}
func testJobsDuringUpgrade(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testJobs(f, sem, true)
}
func testJobs(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringDisruption bool) {
parallelism := int32(2)
completions := int32(4)
// Setup
By("setup job")
job := TestJobsSetup(f, "randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
// Validate
By("validate job before upgrade")
TestJobsValidate(f, job, completions)
sem.Ready()
if testDuringDisruption {
// Continuously validate
wait.Until(func() {
By("validate job during upgrade")
TestJobsValidate(f, job, completions)
}, framework.Poll, sem.StopCh)
} else {
// Block until chaosmonkey is done
By("waiting for upgrade to finish without validating job")
<-sem.StopCh
}
// Validate after upgrade
By("validate job after upgrade")
TestJobsValidate(f, job, completions)
// Teardown
TestJobsTeardown(f, job)
}

View File

@ -278,7 +278,7 @@ func newConfigMap(f *framework.Framework, name string) *api.ConfigMap {
}
}
func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) {
func DoConfigMapE2EWithoutMappingsSetup(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) (*api.Pod, []string) {
var (
name = "configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
@ -352,8 +352,17 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
output = append(output, "mode of file \"/etc/configmap-volume/data-1\": "+modeString)
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
return pod, output
}
func DoConfigMapE2EWithoutMappingsValidate(f *framework.Framework, pod *api.Pod, output []string) {
f.TestContainerOutput("consume configMaps", pod, 0, output)
}
func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) {
pod, output := DoConfigMapE2EWithoutMappingsSetup(f, uid, fsGroup, defaultMode)
DoConfigMapE2EWithoutMappingsValidate(f, pod, output)
}
func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) {

View File

@ -73,73 +73,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
// This test ensures that the same secret can be mounted in multiple
// volumes in the same pod. This test case exists to prevent
// regressions that break this use-case.
var (
name = "secret-test-" + string(uuid.NewUUID())
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
volumeName2 = "secret-volume-2"
volumeMountPath2 = "/etc/secret-volume-2"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: volumeName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: name,
},
},
},
{
Name: volumeName2,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: name,
},
},
},
},
Containers: []api.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []api.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: api.RestartPolicyNever,
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
"mode of file \"/etc/secret-volume/data-1\": -rw-r--r--",
})
doSecretE2EMultipleVolumes(f)
})
It("should be consumable from pods in env vars [Conformance]", func() {
@ -334,3 +268,82 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}
func DoSecretE2EMultipleVolumesSetup(f *framework.Framework) (*api.Pod, []string) {
var (
name = "secret-test-" + string(uuid.NewUUID())
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
volumeName2 = "secret-volume-2"
volumeMountPath2 = "/etc/secret-volume-2"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: volumeName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: name,
},
},
},
{
Name: volumeName2,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: name,
},
},
},
},
Containers: []api.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []api.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: api.RestartPolicyNever,
},
}
expectedOutput := []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
"mode of file \"/etc/secret-volume/data-1\": -rw-r--r--",
}
return pod, expectedOutput
}
func DoSecretE2EMultipleVolumesValidate(f *framework.Framework, pod *api.Pod, expectedOutput []string) {
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}
func doSecretE2EMultipleVolumes(f *framework.Framework) {
pod, expectedOutput := DoSecretE2EMultipleVolumesSetup(f)
DoSecretE2EMultipleVolumesValidate(f, pod, expectedOutput)
}

View File

@ -204,71 +204,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
})
It("should run and stop complex daemon with node affinity", func() {
complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon with a node affinity %s", dsName)
affinity := map[string]string{
api.AffinityAnnotationKey: fmt.Sprintf(`
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "%s",
"operator": "In",
"values": ["%s"]
}]
}]
}}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]),
}
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{
Name: dsName,
},
Spec: extensions.DaemonSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: complexLabel,
Annotations: affinity,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: dsName,
Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
By("Initially, daemon pods should not be running on any nodes.")
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
Expect(len(daemonSetLabels)).To(Equal(1))
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
Expect(err).NotTo(HaveOccurred())
By("remove the node selector and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
By("We should now be able to delete the daemon set.")
Expect(c.Extensions().DaemonSets(ns).Delete(dsName, nil)).NotTo(HaveOccurred())
testDaemonSetWithNodeAffinity(f, dsName, image)
})
})
@ -393,3 +329,90 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error {
}
return nil
}
func TestDaemonSetWithNodeAffinitySetup(f *framework.Framework, dsName, image string) (map[string]string, map[string]string) {
ns := f.Namespace.Name
c := f.ClientSet
err := clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred())
complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon with a node affinity %s", dsName)
affinity := map[string]string{
api.AffinityAnnotationKey: fmt.Sprintf(`
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "%s",
"operator": "In",
"values": ["%s"]
}]
}]
}}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]),
}
_, err = c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{
Name: dsName,
},
Spec: extensions.DaemonSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: complexLabel,
Annotations: affinity,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: dsName,
Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
return complexLabel, nodeSelector
}
func TestDaemonSetWithNodeAffinityValidate(f *framework.Framework, dsName string, complexLabel, nodeSelector map[string]string) {
c := f.ClientSet
By("Initially, daemon pods should not be running on any nodes.")
err := wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(c)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
Expect(len(daemonSetLabels)).To(Equal(1))
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
Expect(err).NotTo(HaveOccurred())
By("remove the node selector and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
}
func TestDaemonSetWithNodeAffinityTeardown(f *framework.Framework, dsName string) {
ns := f.Namespace.Name
c := f.ClientSet
By("We should now be able to delete the daemon set.")
Expect(c.Extensions().DaemonSets(ns).Delete(dsName, nil)).NotTo(HaveOccurred())
}
func testDaemonSetWithNodeAffinity(f *framework.Framework, dsName, image string) {
complexLabel, nodeSelector := TestDaemonSetWithNodeAffinitySetup(f, dsName, image)
TestDaemonSetWithNodeAffinityValidate(f, dsName, complexLabel, nodeSelector)
TestDaemonSetWithNodeAffinityTeardown(f, dsName)
}

View File

@ -79,20 +79,13 @@ var _ = framework.KubeDescribe("Job", func() {
// Pods sometimes fail, but eventually succeed, after pod restarts
It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
By("Creating a job")
// 50% chance of container success, local restarts.
// Can't use the failOnce approach because that relies
// on an emptyDir, which is not preserved across new pods.
// Worst case analysis: 15 failures, each taking 1 minute to
// run due to some slowness, 1 in 2^15 chance of happening,
// causing test flake. Should be very rare.
job := newTestJob("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
TestJobs(f, "randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
})
It("should keep restarting failed pods", func() {
@ -339,3 +332,32 @@ func newBool(val bool) *bool {
*p = val
return p
}
func TestJobsSetup(f *framework.Framework, behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) *batch.Job {
job := newTestJob(behavior, name, rPol, parallelism, completions)
By("Creating a job")
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
return job
}
func TestJobsValidate(f *framework.Framework, job *batch.Job, completions int32) {
By("Ensuring job reaches completions")
err := waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
}
func TestJobsTeardown(f *framework.Framework, job *batch.Job) {
By("Delete the job")
err := deleteJob(f.ClientSet, f.Namespace.Name, job.ObjectMeta.Name)
Expect(err).NotTo(HaveOccurred())
}
func TestJobs(f *framework.Framework, behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) {
job := TestJobsSetup(f, behavior, name, rPol, parallelism, completions)
TestJobsValidate(f, job, completions)
TestJobsTeardown(f, job)
}

View File

@ -334,34 +334,8 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
})
framework.KubeDescribe("Guestbook application", func() {
forEachGBFile := func(run func(s string)) {
for _, gbAppFile := range []string{
"examples/guestbook/frontend-deployment.yaml",
"examples/guestbook/frontend-service.yaml",
"examples/guestbook/redis-master-deployment.yaml",
"examples/guestbook/redis-master-service.yaml",
"examples/guestbook/redis-slave-deployment.yaml",
"examples/guestbook/redis-slave-service.yaml",
} {
contents := framework.ReadOrDie(gbAppFile)
run(string(contents))
}
}
It("should create and stop a working application [Conformance]", func() {
framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery())
defer forEachGBFile(func(contents string) {
cleanupKubectlInputs(contents, ns)
})
By("creating all guestbook components")
forEachGBFile(func(contents string) {
framework.Logf(contents)
framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
})
By("validating guestbook app")
validateGuestbookApp(c, ns)
guestbookApplication(c, ns)
})
})
@ -1575,27 +1549,32 @@ func validateGuestbookApp(c clientset.Interface, ns string) {
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) {
// the response could be {"data": ""} or "data": "TestEntry"} depending on how many times validateGuestbookApp is called
if !waitForGuestbookResponse(c, "get", "", []string{`{"data": ""}`, `{"data": "TestEntry"}`}, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
}
framework.Logf("Trying to add a new entry to the guestbook.")
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) {
if !waitForGuestbookResponse(c, "set", "TestEntry", []string{`{"message": "Updated"}`}, guestbookResponseTimeout, ns) {
framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
}
framework.Logf("Verifying that added entry can be retrieved.")
if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) {
if !waitForGuestbookResponse(c, "get", "", []string{`{"data": "TestEntry"}`}, guestbookResponseTimeout, ns) {
framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
}
}
// Returns whether received expected response from guestbook on time.
func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
func waitForGuestbookResponse(c clientset.Interface, cmd, arg string, expectedResponse []string, timeout time.Duration, ns string) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
res, err := makeRequestToGuestbook(c, cmd, arg, ns)
if err == nil && res == expectedResponse {
return true
if err == nil {
for _, expResp := range expectedResponse {
if res == expResp {
return true
}
}
}
framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
}
@ -1810,3 +1789,45 @@ func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
p.Logger = log.New(logs, "", 0)
return httptest.NewServer(p), logs
}
func forEachGuestbookFile(run func(s string)) {
for _, gbAppFile := range []string{
"examples/guestbook/frontend-deployment.yaml",
"examples/guestbook/frontend-service.yaml",
"examples/guestbook/redis-master-deployment.yaml",
"examples/guestbook/redis-master-service.yaml",
"examples/guestbook/redis-slave-deployment.yaml",
"examples/guestbook/redis-slave-service.yaml",
} {
contents := framework.ReadOrDie(gbAppFile)
run(string(contents))
}
}
func GuestbookApplicationSetup(c clientset.Interface, ns string) {
framework.SkipUnlessServerVersionGTE(deploymentsVersion, c.Discovery())
By("creating all guestbook components")
forEachGuestbookFile(func(contents string) {
framework.Logf(contents)
framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
})
}
func GuestbookApplicationValidate(c clientset.Interface, ns string) {
By("validating guestbook app")
validateGuestbookApp(c, ns)
}
func GuestbookApplicationTeardown(c clientset.Interface, ns string) {
By("teardown guestbook app")
forEachGuestbookFile(func(contents string) {
cleanupKubectlInputs(contents, ns)
})
}
func guestbookApplication(c clientset.Interface, ns string) {
GuestbookApplicationSetup(c, ns)
GuestbookApplicationValidate(c, ns)
GuestbookApplicationTeardown(c, ns)
}