mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #61615 from janetkuo/rm-adopt-hash
Automatic merge from submit-queue (batch tested with PRs 61790, 61808, 60339, 61615, 61757). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Deployment to stop adding pod-template-hash labels/selector on adoption **What this PR does / why we need it**: This is a blocker for #55714, because ReplicaSet selector becomes immutable in `apps/v1`. With controller ref, Deployment's ReplicaSets and Pods can avoid fighting with each others without unique label/selector (pod-template-hash), so it's safe to stop adding hash label/selector on adoption. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #61433 **Special notes for your reviewer**: This is a behavioral change to Deployment controller that will affect all versions of Deployment APIs (`apps/v1`, `extensions/v1beta1`, `apps/v1beta1`, `apps/v1beta2`). **Release note**: ```release-note Deployment will stop adding pod-template-hash labels/selector to ReplicaSets and Pods it adopts. Resources created by Deployments are not affected (will still have pod-template-hash labels/selector). ```
This commit is contained in:
commit
2ba2cc1321
@ -29,7 +29,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
@ -117,11 +116,6 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
|
||||
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
|
||||
// This may lead to stale reads of replica sets, thus incorrect deployment status.
|
||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
// List the deployment's RSes & Pods and apply pod-template-hash info to deployment's adopted RSes/Pods
|
||||
rsList, err := dc.rsAndPodsWithHashKeySynced(d, rsList, podMap)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error labeling replica sets and pods with pod-template-hash: %v", err)
|
||||
}
|
||||
_, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList)
|
||||
|
||||
// Get new replica set with the updated revision number
|
||||
@ -133,99 +127,6 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.D
|
||||
return newRS, allOldRSs, nil
|
||||
}
|
||||
|
||||
// rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment
|
||||
// targets, with pod-template-hash information synced.
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForDeployment(d, rsList).
|
||||
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) ([]*extensions.ReplicaSet, error) {
|
||||
var syncedRSList []*extensions.ReplicaSet
|
||||
for _, rs := range rsList {
|
||||
// Add pod-template-hash information if it's not in the RS.
|
||||
// Otherwise, new RS produced by Deployment will overlap with pre-existing ones
|
||||
// that aren't constrained by the pod-template-hash.
|
||||
syncedRS, err := dc.addHashKeyToRSAndPods(rs, podMap[rs.UID], d.Status.CollisionCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
syncedRSList = append(syncedRSList, syncedRS)
|
||||
}
|
||||
return syncedRSList, nil
|
||||
}
|
||||
|
||||
// addHashKeyToRSAndPods adds pod-template-hash information to the given rs, if it's not already there, with the following steps:
|
||||
// 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created
|
||||
// 2. Add hash label to all pods this rs owns, wait until replicaset controller reports rs.Status.FullyLabeledReplicas equal to the desired number of replicas
|
||||
// 3. Add hash label to the rs's label and selector
|
||||
func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet, podList *v1.PodList, collisionCount *int32) (*extensions.ReplicaSet, error) {
|
||||
// If the rs already has the new hash label in its selector, it's done syncing
|
||||
if labelsutil.SelectorHasLabel(rs.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
return rs, nil
|
||||
}
|
||||
hash, err := deploymentutil.GetReplicaSetHash(rs, collisionCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label.
|
||||
updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name,
|
||||
func(updated *extensions.ReplicaSet) error {
|
||||
// Precondition: the RS doesn't contain the new hash in its pod template label.
|
||||
if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
return utilerrors.ErrPreconditionViolated
|
||||
}
|
||||
updated.Spec.Template.Labels = labelsutil.AddLabel(updated.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error updating replica set %s/%s pod template label with template hash: %v", rs.Namespace, rs.Name, err)
|
||||
}
|
||||
// Make sure rs pod template is updated so that it won't create pods without the new label (orphaned pods).
|
||||
if updatedRS.Generation > updatedRS.Status.ObservedGeneration {
|
||||
// TODO: Revisit if we really need to wait here as opposed to returning and
|
||||
// potentially unblocking this worker (can wait up to 1min before timing out).
|
||||
if err = deploymentutil.WaitForReplicaSetUpdated(dc.rsLister, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil {
|
||||
return nil, fmt.Errorf("error waiting for replica set %s/%s to be observed by controller: %v", updatedRS.Namespace, updatedRS.Name, err)
|
||||
}
|
||||
glog.V(4).Infof("Observed the update of replica set %s/%s's pod template with hash %s.", rs.Namespace, rs.Name, hash)
|
||||
}
|
||||
|
||||
// 2. Update all pods managed by the rs to have the new hash label, so they will be correctly adopted.
|
||||
if err := deploymentutil.LabelPodsWithHash(podList, dc.client, dc.podLister, rs.Namespace, rs.Name, hash); err != nil {
|
||||
return nil, fmt.Errorf("error in adding template hash label %s to pods %+v: %s", hash, podList, err)
|
||||
}
|
||||
|
||||
// We need to wait for the replicaset controller to observe the pods being
|
||||
// labeled with pod template hash. Because previously we've called
|
||||
// WaitForReplicaSetUpdated, the replicaset controller should have dropped
|
||||
// FullyLabeledReplicas to 0 already, we only need to wait it to increase
|
||||
// back to the number of replicas in the spec.
|
||||
// TODO: Revisit if we really need to wait here as opposed to returning and
|
||||
// potentially unblocking this worker (can wait up to 1min before timing out).
|
||||
if err := deploymentutil.WaitForPodsHashPopulated(dc.rsLister, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil {
|
||||
return nil, fmt.Errorf("Replica set %s/%s: error waiting for replicaset controller to observe pods being labeled with template hash: %v", updatedRS.Namespace, updatedRS.Name, err)
|
||||
}
|
||||
|
||||
// 3. Update rs label and selector to include the new hash label
|
||||
// Copy the old selector, so that we can scrub out any orphaned pods
|
||||
updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error {
|
||||
// Precondition: the RS doesn't contain the new hash in its label and selector.
|
||||
if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
return utilerrors.ErrPreconditionViolated
|
||||
}
|
||||
updated.Labels = labelsutil.AddLabel(updated.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
|
||||
updated.Spec.Selector = labelsutil.AddLabelToSelector(updated.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, hash)
|
||||
return nil
|
||||
})
|
||||
// If the RS isn't actually updated, that's okay, we'll retry in the
|
||||
// next sync loop since its selector isn't updated yet.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error updating ReplicaSet %s/%s label and selector with template hash: %v", updatedRS.Namespace, updatedRS.Name, err)
|
||||
}
|
||||
|
||||
// TODO: look for orphaned pods and label them in the background somewhere else periodically
|
||||
return updatedRS, nil
|
||||
}
|
||||
|
||||
// Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet.
|
||||
// 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's).
|
||||
// 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
|
||||
|
@ -29,7 +29,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
|
@ -32,13 +32,9 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
"k8s.io/client-go/util/integer"
|
||||
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@ -677,56 +673,6 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.
|
||||
return requiredRSs, allRSs
|
||||
}
|
||||
|
||||
// WaitForReplicaSetUpdated polls the replica set until it is updated.
|
||||
func WaitForReplicaSetUpdated(c extensionslisters.ReplicaSetLister, desiredGeneration int64, namespace, name string) error {
|
||||
return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err := c.ReplicaSets(namespace).Get(name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodsHashPopulated polls the replica set until updated and fully labeled.
|
||||
func WaitForPodsHashPopulated(c extensionslisters.ReplicaSetLister, desiredGeneration int64, namespace, name string) error {
|
||||
return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err := c.ReplicaSets(namespace).Get(name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration &&
|
||||
rs.Status.FullyLabeledReplicas == *(rs.Spec.Replicas), nil
|
||||
})
|
||||
}
|
||||
|
||||
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
|
||||
func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister corelisters.PodLister, namespace, name, hash string) error {
|
||||
for _, pod := range podList.Items {
|
||||
// Ignore inactive Pods.
|
||||
if !controller.IsPodActive(&pod) {
|
||||
continue
|
||||
}
|
||||
// Only label the pod that doesn't already have the new hash
|
||||
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
|
||||
_, err := UpdatePodWithRetries(c.CoreV1().Pods(namespace), podLister, pod.Namespace, pod.Name,
|
||||
func(podToUpdate *v1.Pod) error {
|
||||
// Precondition: the pod doesn't contain the new hash in its label.
|
||||
if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
return errors.ErrPreconditionViolated
|
||||
}
|
||||
podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in adding template hash label %s to pod %q: %v", hash, pod.Name, err)
|
||||
}
|
||||
glog.V(4).Infof("Labeled pod %s/%s of ReplicaSet %s/%s with hash %s.", pod.Namespace, pod.Name, namespace, name, hash)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
|
||||
func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template v1.PodTemplateSpec) *extensions.Deployment {
|
||||
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
|
||||
|
@ -17,8 +17,6 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
@ -26,8 +24,6 @@ import (
|
||||
unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
||||
// TODO: use client library instead when it starts to support update retries
|
||||
@ -62,10 +58,3 @@ func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsL
|
||||
|
||||
return rs, retryErr
|
||||
}
|
||||
|
||||
// GetReplicaSetHash returns the pod template hash of a ReplicaSet's pod template space
|
||||
func GetReplicaSetHash(rs *extensions.ReplicaSet, uniquifier *int32) (string, error) {
|
||||
rsTemplate := rs.Spec.Template.DeepCopy()
|
||||
rsTemplate.Labels = labelsutil.CloneAndRemoveLabel(rsTemplate.Labels, extensions.DefaultDeploymentUniqueLabelKey)
|
||||
return fmt.Sprintf("%d", controller.ComputeHash(rsTemplate, uniquifier)), nil
|
||||
}
|
||||
|
@ -275,10 +275,6 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(allOldRSs)).Should(Equal(1))
|
||||
// The old RS should contain pod-template-hash in its selector, label, and template label
|
||||
Expect(len(allOldRSs[0].Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
Expect(len(allOldRSs[0].Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
}
|
||||
|
||||
func testRecreateDeployment(f *framework.Framework) {
|
||||
|
@ -81,6 +81,32 @@ func TestNewDeployment(t *testing.T) {
|
||||
if newRS.Annotations[v1.LastAppliedConfigAnnotation] != "" {
|
||||
t.Errorf("expected new ReplicaSet last-applied annotation not copied from Deployment %s", tester.deployment.Name)
|
||||
}
|
||||
|
||||
// New RS should contain pod-template-hash in its selector, label, and template label
|
||||
rsHash, err := checkRSHashLabels(newRS)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// All pods targeted by the deployment should contain pod-template-hash in their labels
|
||||
selector, err := metav1.LabelSelectorAsSelector(tester.deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse deployment %s selector: %v", name, err)
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list pods of deployment %s: %v", name, err)
|
||||
}
|
||||
if len(pods.Items) != int(replicas) {
|
||||
t.Errorf("expected %d pods, got %d pods", replicas, len(pods.Items))
|
||||
}
|
||||
podHash, err := checkPodsHashLabel(pods)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if rsHash != podHash {
|
||||
t.Errorf("found mismatching pod-template-hash value: rs hash = %s whereas pod hash = %s", rsHash, podHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Deployments should support roll out, roll back, and roll over
|
||||
@ -654,104 +680,6 @@ func checkPodsHashLabel(pods *v1.PodList) (string, error) {
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// Deployment should label adopted ReplicaSets and Pods.
|
||||
func TestDeploymentLabelAdopted(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-adopted-deployment"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Create a RS to be adopted by the deployment.
|
||||
rsName := "test-adopted-controller"
|
||||
replicas := int32(1)
|
||||
rs := newReplicaSet(rsName, ns.Name, replicas)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create replicaset %s: %v", rsName, err)
|
||||
}
|
||||
// Mark RS pods as ready.
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse replicaset %s selector: %v", rsName, err)
|
||||
}
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(pods.Items) != int(replicas) {
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if err = markPodReady(c, ns.Name, &pod); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to mark pods replicaset %s as ready: %v", rsName, err)
|
||||
}
|
||||
|
||||
// Create a Deployment to adopt the old rs.
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
|
||||
if tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment); err != nil {
|
||||
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err = tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The RS and pods should be relabeled after the Deployment finishes adopting it and completes.
|
||||
if err := tester.waitForDeploymentComplete(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// There should be no old RSes (overlapping RS)
|
||||
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(tester.deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get all replicasets owned by deployment %s: %v", name, err)
|
||||
}
|
||||
if len(oldRSs) != 0 || len(allOldRSs) != 0 {
|
||||
t.Errorf("expected deployment to have no old replicasets, got %d old replicasets", len(allOldRSs))
|
||||
}
|
||||
|
||||
// New RS should be relabeled, i.e. contain pod-template-hash in its selector, label, and template label
|
||||
rsHash, err := checkRSHashLabels(newRS)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
|
||||
selector, err = metav1.LabelSelectorAsSelector(tester.deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse deployment %s selector: %v", name, err)
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list pods of deployment %s: %v", name, err)
|
||||
}
|
||||
if len(pods.Items) != int(replicas) {
|
||||
t.Errorf("expected %d pods, got %d pods", replicas, len(pods.Items))
|
||||
}
|
||||
podHash, err := checkPodsHashLabel(pods)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if rsHash != podHash {
|
||||
t.Errorf("found mismatching pod-template-hash value: rs hash = %s whereas pod hash = %s", rsHash, podHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Deployment should have a timeout condition when it fails to progress after given deadline.
|
||||
func TestFailedDeployment(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
|
Loading…
Reference in New Issue
Block a user