mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
Merge pull request #60747 from dims/fix-daemonset-conformance-test-failure-and-remove-unused-code
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Fix test failure and delete unused code **What this PR does / why we need it**: `Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate` is broken by recent updates as Template generation isn't supported by apps/v1.DaemonSet anymore **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #60745 **Special notes for your reviewer**: **Release note**: ```release-note NONE ```
This commit is contained in:
commit
48a7048d98
@ -29,7 +29,6 @@ go_library(
|
|||||||
"//pkg/apis/core:go_default_library",
|
"//pkg/apis/core:go_default_library",
|
||||||
"//pkg/apis/extensions:go_default_library",
|
"//pkg/apis/extensions:go_default_library",
|
||||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
|
||||||
"//pkg/controller/daemon:go_default_library",
|
"//pkg/controller/daemon:go_default_library",
|
||||||
"//pkg/controller/deployment/util:go_default_library",
|
"//pkg/controller/deployment/util:go_default_library",
|
||||||
"//pkg/controller/job:go_default_library",
|
"//pkg/controller/job:go_default_library",
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||||
"k8s.io/kubernetes/pkg/kubectl"
|
"k8s.io/kubernetes/pkg/kubectl"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||||
@ -317,8 +316,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||||||
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
||||||
label := map[string]string{daemonsetNameLabel: dsName}
|
label := map[string]string{daemonsetNameLabel: dsName}
|
||||||
|
|
||||||
templateGeneration := int64(999)
|
framework.Logf("Creating simple daemon set %s", dsName)
|
||||||
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
|
|
||||||
ds := newDaemonSet(dsName, image, label)
|
ds := newDaemonSet(dsName, image, label)
|
||||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||||
@ -328,10 +326,6 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||||
|
|
||||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
|
||||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Check history and labels
|
// Check history and labels
|
||||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -345,16 +339,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
templateGeneration++
|
|
||||||
|
|
||||||
By("Check that daemon pods images are updated.")
|
By("Check that daemon pods images are updated.")
|
||||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
|
||||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Check that daemon pods are still running on every node of the cluster.")
|
By("Check that daemon pods are still running on every node of the cluster.")
|
||||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||||
@ -445,23 +434,6 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
|
|||||||
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
|
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
|
|
||||||
// It also checks that all dependents are orphaned, and the DaemonSet is deleted.
|
|
||||||
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *apps.DaemonSet) {
|
|
||||||
trueVar := true
|
|
||||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
|
||||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
|
|
||||||
err := c.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
|
||||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be orphaned")
|
|
||||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(c, ds.Namespace, ds.Name))
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
|
func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
|
||||||
return &apps.DaemonSet{
|
return &apps.DaemonSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -702,111 +674,6 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label map[string]string, templateGeneration string) error {
|
|
||||||
pods := listDaemonPods(c, ns, label)
|
|
||||||
for _, pod := range pods.Items {
|
|
||||||
// We don't care about inactive pods
|
|
||||||
if !controller.IsPodActive(&pod) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
podTemplateGeneration := pod.Labels[apps.DeprecatedTemplateGeneration]
|
|
||||||
if podTemplateGeneration != templateGeneration {
|
|
||||||
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
|
|
||||||
return func() (bool, error) {
|
|
||||||
_, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
|
||||||
if !apierrs.IsNotFound(err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDaemonSetPodsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
|
||||||
return func() (bool, error) {
|
|
||||||
pods := listDaemonPods(c, ns, label)
|
|
||||||
for _, pod := range pods.Items {
|
|
||||||
// This pod is orphaned only when controller ref is cleared
|
|
||||||
if controllerRef := metav1.GetControllerOf(&pod); controllerRef != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDaemonSetHistoryOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
|
||||||
return func() (bool, error) {
|
|
||||||
histories := listDaemonHistories(c, ns, label)
|
|
||||||
for _, history := range histories.Items {
|
|
||||||
// This history is orphaned only when controller ref is cleared
|
|
||||||
if controllerRef := metav1.GetControllerOf(&history); controllerRef != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDaemonSetPodsAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
|
|
||||||
return func() (bool, error) {
|
|
||||||
pods := listDaemonPods(c, ns, label)
|
|
||||||
for _, pod := range pods.Items {
|
|
||||||
// This pod is adopted only when its controller ref is update
|
|
||||||
if controllerRef := metav1.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != dsUID {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
|
|
||||||
return func() (bool, error) {
|
|
||||||
histories := listDaemonHistories(c, ns, label)
|
|
||||||
for _, history := range histories.Items {
|
|
||||||
// This history is adopted only when its controller ref is update
|
|
||||||
if controllerRef := metav1.GetControllerOf(&history); controllerRef == nil || controllerRef.UID != dsUID {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitDaemonSetAdoption(c clientset.Interface, ds *apps.DaemonSet, podPrefix string, podTemplateGeneration int64) {
|
|
||||||
ns := ds.Namespace
|
|
||||||
label := ds.Spec.Template.Labels
|
|
||||||
|
|
||||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, ds.UID, label))
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be adopted")
|
|
||||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryAdopted(c, ns, ds.UID, label))
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be adopted")
|
|
||||||
|
|
||||||
framework.Logf("Make sure no daemon pod updated its template generation %d", podTemplateGeneration)
|
|
||||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(podTemplateGeneration))
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.Logf("Make sure no pods are recreated by looking at their names")
|
|
||||||
err = checkDaemonSetPodsName(c, ns, podPrefix, label)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[string]string) error {
|
|
||||||
pods := listDaemonPods(c, ns, label)
|
|
||||||
for _, pod := range pods.Items {
|
|
||||||
if !strings.HasPrefix(pod.Name, prefix) {
|
|
||||||
return fmt.Errorf("expected pod %s name to be prefixed %q", pod.Name, prefix)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
|
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||||
|
Loading…
Reference in New Issue
Block a user