mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Add e2e test for rollback a DaemonSet should not cause pod restart
This commit is contained in:
parent
7e606f211c
commit
29620479d5
@ -437,6 +437,71 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name)
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
})
|
||||
|
||||
It("Should rollback without unnecessary restarts", func() {
|
||||
// Skip clusters with only one node, where we cannot have half-done DaemonSet rollout for this test
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Make sure we're in the middle of a rollout
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
var existingPods, newPods []*v1.Pod
|
||||
for i := range pods.Items {
|
||||
pod := pods.Items[i]
|
||||
image := pod.Spec.Containers[0].Image
|
||||
switch image {
|
||||
case ds.Spec.Template.Spec.Containers[0].Image:
|
||||
existingPods = append(existingPods, &pod)
|
||||
case newDS.Spec.Template.Spec.Containers[0].Image:
|
||||
newPods = append(newPods, &pod)
|
||||
default:
|
||||
framework.Failf("unexpected pod found, image = %s", image)
|
||||
}
|
||||
}
|
||||
Expect(len(existingPods)).NotTo(Equal(0))
|
||||
Expect(len(newPods)).NotTo(Equal(0))
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure DaemonSet rollback is complete")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
|
||||
pods = listDaemonPods(c, ns, label)
|
||||
rollbackPods := map[string]bool{}
|
||||
for _, pod := range pods.Items {
|
||||
rollbackPods[pod.Name] = true
|
||||
}
|
||||
for _, pod := range existingPods {
|
||||
Expect(rollbackPods[pod.Name]).To(BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// getDaemonSetImagePatch generates a patch for updating a DaemonSet's container image
|
||||
@ -613,6 +678,18 @@ func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) fu
|
||||
}
|
||||
}
|
||||
|
||||
func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.Containers[0].Image == newImage {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
||||
func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
|
||||
newPod := daemon.NewPod(ds, node.Name)
|
||||
|
@ -3587,6 +3587,30 @@ func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
|
||||
return job, pollErr
|
||||
}
|
||||
|
||||
type updateDSFunc func(*extensions.DaemonSet)
|
||||
|
||||
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *extensions.DaemonSet, err error) {
|
||||
daemonsets := c.ExtensionsV1beta1().DaemonSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(ds)
|
||||
if ds, err = daemonsets.Update(ds); err == nil {
|
||||
Logf("Updating DaemonSet %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to DaemonSet %q: %v", name, updateErr)
|
||||
}
|
||||
return ds, pollErr
|
||||
}
|
||||
|
||||
// NodeAddresses returns the first address of the given type of each node.
|
||||
func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
|
||||
hosts := []string{}
|
||||
|
Loading…
Reference in New Issue
Block a user