Merge pull request #42925 from janetkuo/ds-adopt-e2e

Automatic merge from submit-queue

Allow DaemonSet controller to PATCH pods, and add more steps and logs in DaemonSet pods adoption e2e test

DaemonSet pods adoption failed because DS controller aren't allowed to patch pods when claiming pods. 

[Edit] This PR fixes #42908 by modifying RBAC to allow DaemonSet controllers to patch pods, as well as adding more logs and steps to the original e2e test to make debugging easier. 

Tested locally with a local cluster and GCE cluster. 
@kargakis @lukaszo @kubernetes/sig-apps-pr-reviews
This commit is contained in:
Kubernetes Submit Queue 2017-03-13 14:06:03 -07:00 committed by GitHub
commit 5913c5a453
3 changed files with 86 additions and 68 deletions

View File

@ -84,7 +84,7 @@ func init() {
rbac.NewRule("get", "list", "watch").Groups(extensionsGroup).Resources("daemonsets").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(extensionsGroup).Resources("daemonsets").RuleOrDie(),
rbac.NewRule("update").Groups(extensionsGroup).Resources("daemonsets/status").RuleOrDie(), rbac.NewRule("update").Groups(extensionsGroup).Resources("daemonsets/status").RuleOrDie(),
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("list", "watch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), rbac.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(), rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(),
eventsRule(), eventsRule(),
}, },

View File

@ -171,6 +171,7 @@ items:
- create - create
- delete - delete
- list - list
- patch
- watch - watch
- apiGroups: - apiGroups:
- "" - ""

View File

@ -19,7 +19,6 @@ package e2e
import ( import (
"fmt" "fmt"
"reflect" "reflect"
"sort"
"strings" "strings"
"time" "time"
@ -27,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
@ -116,7 +116,6 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
err = checkDaemonStatus(f, dsName) err = checkDaemonStatus(f, dsName)
@ -219,7 +218,6 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
err = checkDaemonStatus(f, dsName) err = checkDaemonStatus(f, dsName)
@ -245,7 +243,6 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1))) Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1)))
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
@ -269,7 +266,6 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods are still running on every node of the cluster.") By("Check that daemon pods are still running on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
}) })
@ -286,7 +282,6 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration)) Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
@ -296,6 +291,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
By("Update daemon pods image.") By("Update daemon pods image.")
ds, err = c.Extensions().DaemonSets(ns).Get(dsName, metav1.GetOptions{}) ds, err = c.Extensions().DaemonSets(ns).Get(dsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ds.Spec.Template.Spec.Containers[0].Image = redisImage ds.Spec.Template.Spec.Containers[0].Image = redisImage
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
ds, err = c.Extensions().DaemonSets(ns).Update(ds) ds, err = c.Extensions().DaemonSets(ns).Update(ds)
@ -311,7 +307,6 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods are still running on every node of the cluster.") By("Check that daemon pods are still running on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
}) })
@ -319,8 +314,9 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
It("Should adopt or recreate existing pods when creating a RollingUpdate DaemonSet with matching or mismatching templateGeneration", func() { It("Should adopt or recreate existing pods when creating a RollingUpdate DaemonSet with matching or mismatching templateGeneration", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
// 1. Create a RollingUpdate DaemonSet
templateGeneration := int64(999) templateGeneration := int64(999)
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration) framework.Logf("Creating simple RollingUpdate DaemonSet %s with templateGeneration %d", dsName, templateGeneration)
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.TemplateGeneration = templateGeneration ds.Spec.TemplateGeneration = templateGeneration
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
@ -329,7 +325,6 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration)) Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
@ -337,18 +332,17 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration)) err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
dsPodsLastCreationTime := getDaemonPodsLastCreationTime(c, ns, label) // 2. Orphan DaemonSet pods
By(fmt.Sprintf("Deleting DaemonSet %s and orphaning its pods", dsName)) By(fmt.Sprintf("Deleting DaemonSet %s and orphaning its pods", dsName))
trueVar := true err = orphanDaemonSetPods(c, ds)
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
err = c.Extensions().DaemonSets(ns).Delete(ds.Name, deleteOptions)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ns, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(f, ns, ds.Name)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(f, ns, ds.Name))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted") Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
newDSName := dsName + "-new-adopt" // 3. Adopt DaemonSet pods (no restart)
newDSName := "adopt"
By(fmt.Sprintf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName)) By(fmt.Sprintf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName))
newDS := newDaemonSet(newDSName, image, label) newDS := newDaemonSet(newDSName, image, label)
newDS.Spec.TemplateGeneration = templateGeneration newDS.Spec.TemplateGeneration = templateGeneration
@ -357,23 +351,29 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration)) Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
By(fmt.Sprintf("Wait for all pods to be adopted by DaemonSet %s", newDSName))
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, newDS.UID, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
By(fmt.Sprintf("Make sure no daemon pod updated its template generation %d", templateGeneration)) By(fmt.Sprintf("Make sure no daemon pod updated its template generation %d", templateGeneration))
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration)) err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Make sure no pods are recreated") By("Make sure no pods are recreated by looking at their names")
newDSPodsFirstCreationTime := getDaemonPodsFirstCreationTime(c, ns, label) err = checkDaemonSetPodsName(c, ns, dsName, label)
Expect(newDSPodsFirstCreationTime.Before(dsPodsLastCreationTime) ||
newDSPodsFirstCreationTime.Equal(dsPodsLastCreationTime)).To(BeTrue())
By(fmt.Sprintf("Deleting DaemonSet %s and orphaning its pods", newDSName))
orphanDependents := true
err = c.Extensions().DaemonSets(ns).Delete(newDSName, &metav1.DeleteOptions{OrphanDependents: &orphanDependents})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// 4. Orphan DaemonSet pods again
By(fmt.Sprintf("Deleting DaemonSet %s and orphaning its pods", newDSName))
err = orphanDaemonSetPods(c, newDS)
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ns, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(f, ns, newDSName)) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(f, ns, newDSName))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted") Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
newRestartDSName := dsName + "-new-restart" // 4. Adopt DaemonSet pods (should kill and restart those pods)
newRestartDSName := "restart"
By(fmt.Sprintf("Creating a new RollingUpdate DaemonSet %s to restart adopted pods", newRestartDSName)) By(fmt.Sprintf("Creating a new RollingUpdate DaemonSet %s to restart adopted pods", newRestartDSName))
newRestartDS := newDaemonSet(newRestartDSName, image, label) newRestartDS := newDaemonSet(newRestartDSName, image, label)
newRestartDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} newRestartDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
@ -381,16 +381,23 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(newRestartDS.Spec.TemplateGeneration).To(Equal(int64(1))) Expect(newRestartDS.Spec.TemplateGeneration).To(Equal(int64(1)))
By("Wait for all DaemonSet pods template Generation to be updated to 1") By("Wait for restarted DaemonSet pods launch on every node of the cluster.")
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, templateGenerationMatch(c, ns, label, "1")) err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsNameMatch(c, ns, newRestartDSName, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod template generation to be 1") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to restart")
By("Make sure pods are recreated") By("Make sure restarted DaemonSet pods have correct template generation 1")
newRestartDSPodsFirstCreationTime := getDaemonPodsFirstCreationTime(c, ns, label) err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
Expect(dsPodsLastCreationTime.Before(newRestartDSPodsFirstCreationTime)).To(BeTrue()) Expect(err).NotTo(HaveOccurred())
}) })
}) })
func orphanDaemonSetPods(c clientset.Interface, ds *extensions.DaemonSet) error {
trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
return c.Extensions().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
}
func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet { func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet {
return &extensions.DaemonSet{ return &extensions.DaemonSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -527,7 +534,7 @@ func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string,
nodeNames := make([]string, 0) nodeNames := make([]string, 0)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
if !canScheduleOnNode(node, ds) { if !canScheduleOnNode(node, ds) {
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node, node.Spec.Taints) framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
continue continue
} }
nodeNames = append(nodeNames, node.Name) nodeNames = append(nodeNames, node.Name)
@ -586,20 +593,16 @@ func checkDaemonPodsImage(c clientset.Interface, ns string, selector map[string]
} }
} }
func templateGenerationMatch(c clientset.Interface, ns string, selector map[string]string, templateGeneration string) func() (bool, error) {
return func() (bool, error) {
err := checkDaemonPodsTemplateGeneration(c, ns, selector, templateGeneration)
match := err == nil
return match, nil
}
}
func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label map[string]string, templateGeneration string) error { func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label map[string]string, templateGeneration string) error {
pods := listDaemonPods(c, ns, label) pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items { for _, pod := range pods.Items {
// We don't care about inactive pods
if !controller.IsPodActive(&pod) {
continue
}
podTemplateGeneration := pod.Labels[extensions.DaemonSetTemplateGenerationKey] podTemplateGeneration := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
if podTemplateGeneration != templateGeneration { if podTemplateGeneration != templateGeneration {
return fmt.Errorf("Expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration) return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
} }
} }
return nil return nil
@ -615,34 +618,48 @@ func checkDaemonSetDeleted(f *framework.Framework, ns, name string) func() (bool
} }
} }
func getDaemonPodsLastCreationTime(c clientset.Interface, ns string, label map[string]string) metav1.Time { func checkDaemonSetPodsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
sortedPods := getDaemonPodsSortedByCreationTime(c, ns, label) return func() (bool, error) {
return sortedPods[len(sortedPods)-1].ObjectMeta.CreationTimestamp pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
// This pod is orphaned only when controller ref is cleared
if controllerRef := controller.GetControllerOf(&pod); controllerRef != nil {
return false, nil
}
}
return true, nil
}
} }
func getDaemonPodsFirstCreationTime(c clientset.Interface, ns string, label map[string]string) metav1.Time { func checkDaemonSetPodsAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
sortedPods := getDaemonPodsSortedByCreationTime(c, ns, label) return func() (bool, error) {
return sortedPods[0].ObjectMeta.CreationTimestamp pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
// This pod is adopted only when its controller ref is update
if controllerRef := controller.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != dsUID {
return false, nil
}
}
return true, nil
}
} }
func getDaemonPodsSortedByCreationTime(c clientset.Interface, ns string, label map[string]string) []v1.Pod { func checkDaemonSetPodsNameMatch(c clientset.Interface, ns, prefix string, label map[string]string) func() (bool, error) {
podList := listDaemonPods(c, ns, label) return func() (bool, error) {
pods := podList.Items if err := checkDaemonSetPodsName(c, ns, prefix, label); err != nil {
if len(pods) > 1 { framework.Logf("%v", err)
sort.Sort(podByCreationTimestamp(pods)) return false, nil
}
return true, nil
} }
return pods
} }
// podByCreationTimestamp sorts a list of DaemonSet pods by creation timestamp, using their names as a tie breaker. func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[string]string) error {
type podByCreationTimestamp []v1.Pod pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
func (o podByCreationTimestamp) Len() int { return len(o) } if !strings.HasPrefix(pod.Name, prefix) {
func (o podByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } return fmt.Errorf("expected pod %s name to be prefixed %q", pod.Name, prefix)
func (o podByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
} }
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) }
return nil
} }