mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 03:33:56 +00:00
Merge pull request #47387 from janetkuo/daemonset-adopt-e2e
Automatic merge from submit-queue Add e2e tests for DaemonSet adopt and rollback shouldn't cause pod restart **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: xref #46981 **Special notes for your reviewer**: ~depends on #47084 (ignore commits prefixed `PICK` while reviewing)~ (merged) 1. Update DaemonSet e2e test: pod with different templateGeneration but the same template hash (meaning they came from current DaemonSet template) should not be restarted 1. Add DaemonSet e2e test: rollback a DaemonSet before a rollout is done, and existing pods shouldn't be restarted **Release note**: ```release-note NONE ```
This commit is contained in:
commit
aeb098916b
@ -188,6 +188,7 @@ go_library(
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -360,7 +361,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
})
|
||||
|
||||
It("Should adopt or recreate existing pods when creating a RollingUpdate DaemonSet with matching or mismatching templateGeneration", func() {
|
||||
It("Should adopt existing pods when creating a RollingUpdate DaemonSet regardless of templateGeneration", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
// 1. Create a RollingUpdate DaemonSet
|
||||
@ -373,76 +374,133 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
||||
framework.Logf("Make sure all daemon pods have correct template generation %d", templateGeneration)
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 2. Orphan DaemonSet pods
|
||||
By(fmt.Sprintf("Deleting DaemonSet %s and orphaning its pods", dsName))
|
||||
err = orphanDaemonSetPods(c, ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ns, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryOrphaned(c, ns, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(f, ns, ds.Name))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", dsName)
|
||||
deleteDaemonSetAndOrphan(c, ds)
|
||||
|
||||
// 3. Adopt DaemonSet pods (no restart)
|
||||
newDSName := "adopt"
|
||||
By(fmt.Sprintf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName))
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName)
|
||||
newDS := newDaemonSet(newDSName, image, label)
|
||||
newDS.Spec.TemplateGeneration = templateGeneration
|
||||
newDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newDS, err = c.Extensions().DaemonSets(ns).Create(newDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newDS.Spec.Template, ds.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
By(fmt.Sprintf("Wait for all pods to be adopted by DaemonSet %s", newDSName))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, newDS.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be adopted")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryAdopted(c, ns, newDS.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be adopted")
|
||||
|
||||
By(fmt.Sprintf("Make sure no daemon pod updated its template generation %d", templateGeneration))
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Make sure no pods are recreated by looking at their names")
|
||||
err = checkDaemonSetPodsName(c, ns, dsName, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newDS.Name)
|
||||
waitDaemonSetAdoption(c, newDS, ds.Name, templateGeneration)
|
||||
|
||||
// 4. Orphan DaemonSet pods again
|
||||
By(fmt.Sprintf("Deleting DaemonSet %s and orphaning its pods", newDSName))
|
||||
err = orphanDaemonSetPods(c, newDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ns, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryOrphaned(c, ns, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(f, ns, newDSName))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newDSName)
|
||||
deleteDaemonSetAndOrphan(c, newDS)
|
||||
|
||||
// 4. Adopt DaemonSet pods (should kill and restart those pods)
|
||||
newRestartDSName := "restart"
|
||||
By(fmt.Sprintf("Creating a new RollingUpdate DaemonSet %s to restart adopted pods", newRestartDSName))
|
||||
newRestartDS := newDaemonSet(newRestartDSName, image, label)
|
||||
newRestartDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newRestartDS, err = c.Extensions().DaemonSets(ns).Create(newRestartDS)
|
||||
// 5. Adopt DaemonSet pods (no restart) as long as template matches, even when templateGeneration doesn't match
|
||||
newAdoptDSName := "adopt-template-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS := newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS, err = c.Extensions().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRestartDS.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).NotTo(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
By("Wait for restarted DaemonSet pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsNameMatch(c, ns, newRestartDSName, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to restart")
|
||||
framework.Logf(fmt.Sprintf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name))
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
|
||||
By("Make sure restarted DaemonSet pods have correct template generation 1")
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
|
||||
// 6. Orphan DaemonSet pods again
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newAdoptDSName)
|
||||
deleteDaemonSetAndOrphan(c, newAdoptDS)
|
||||
|
||||
// 7. Adopt DaemonSet pods (no restart) as long as templateGeneration matches, even when template doesn't match
|
||||
newAdoptDSName = "adopt-template-generation-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS = newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.Template.Spec.Containers[0].Name = "not-match"
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS.Spec.TemplateGeneration = templateGeneration
|
||||
newAdoptDS, err = c.Extensions().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).NotTo(BeTrue(), "DaemonSet template should not match")
|
||||
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name)
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
})
|
||||
|
||||
It("Should rollback without unnecessary restarts", func() {
|
||||
// Skip clusters with only one node, where we cannot have half-done DaemonSet rollout for this test
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Make sure we're in the middle of a rollout
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
var existingPods, newPods []*v1.Pod
|
||||
for i := range pods.Items {
|
||||
pod := pods.Items[i]
|
||||
image := pod.Spec.Containers[0].Image
|
||||
switch image {
|
||||
case ds.Spec.Template.Spec.Containers[0].Image:
|
||||
existingPods = append(existingPods, &pod)
|
||||
case newDS.Spec.Template.Spec.Containers[0].Image:
|
||||
newPods = append(newPods, &pod)
|
||||
default:
|
||||
framework.Failf("unexpected pod found, image = %s", image)
|
||||
}
|
||||
}
|
||||
Expect(len(existingPods)).NotTo(Equal(0))
|
||||
Expect(len(newPods)).NotTo(Equal(0))
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure DaemonSet rollback is complete")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
|
||||
pods = listDaemonPods(c, ns, label)
|
||||
rollbackPods := map[string]bool{}
|
||||
for _, pod := range pods.Items {
|
||||
rollbackPods[pod.Name] = true
|
||||
}
|
||||
for _, pod := range existingPods {
|
||||
Expect(rollbackPods[pod.Name]).To(BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@ -451,11 +509,21 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
|
||||
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
|
||||
}
|
||||
|
||||
func orphanDaemonSetPods(c clientset.Interface, ds *extensions.DaemonSet) error {
|
||||
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
|
||||
// It also checks that all dependents are orphaned, and the DaemonSet is deleted.
|
||||
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
|
||||
return c.Extensions().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
err := c.Extensions().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(c, ds.Namespace, ds.Name))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
||||
}
|
||||
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet {
|
||||
@ -471,7 +539,7 @@ func newDaemonSet(dsName, image string, label map[string]string) *extensions.Dae
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: dsName,
|
||||
Name: "app",
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
@ -610,6 +678,18 @@ func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) fu
|
||||
}
|
||||
}
|
||||
|
||||
func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.Containers[0].Image == newImage {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
||||
func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
|
||||
newPod := daemon.NewPod(ds, node.Name)
|
||||
@ -688,9 +768,9 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonSetDeleted(f *framework.Framework, ns, name string) func() (bool, error) {
|
||||
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
_, err := f.ClientSet.Extensions().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
_, err := c.Extensions().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
@ -750,14 +830,22 @@ func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsNameMatch(c clientset.Interface, ns, prefix string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
if err := checkDaemonSetPodsName(c, ns, prefix, label); err != nil {
|
||||
framework.Logf("%v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
func waitDaemonSetAdoption(c clientset.Interface, ds *extensions.DaemonSet, podPrefix string, podTemplateGeneration int64) {
|
||||
ns := ds.Namespace
|
||||
label := ds.Spec.Template.Labels
|
||||
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, ds.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be adopted")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryAdopted(c, ns, ds.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be adopted")
|
||||
|
||||
framework.Logf("Make sure no daemon pod updated its template generation %d", podTemplateGeneration)
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(podTemplateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure no pods are recreated by looking at their names")
|
||||
err = checkDaemonSetPodsName(c, ns, podPrefix, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[string]string) error {
|
||||
|
@ -3587,6 +3587,30 @@ func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
|
||||
return job, pollErr
|
||||
}
|
||||
|
||||
type updateDSFunc func(*extensions.DaemonSet)
|
||||
|
||||
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *extensions.DaemonSet, err error) {
|
||||
daemonsets := c.ExtensionsV1beta1().DaemonSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(ds)
|
||||
if ds, err = daemonsets.Update(ds); err == nil {
|
||||
Logf("Updating DaemonSet %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to DaemonSet %q: %v", name, updateErr)
|
||||
}
|
||||
return ds, pollErr
|
||||
}
|
||||
|
||||
// NodeAddresses returns the first address of the given type of each node.
|
||||
func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
|
||||
hosts := []string{}
|
||||
|
Loading…
Reference in New Issue
Block a user