Merge pull request #41145 from kargakis/cleanup-test-fix

Automatic merge from submit-queue

Do not cleanup already deleted replica sets and add more logging around it

For https://k8s-gubernator.appspot.com/build/kubernetes-jenkins/logs/ci-kubernetes-e2e-kops-aws/3569

@ncdc will make the output of the test cleaner
This commit is contained in:
Kubernetes Submit Queue 2017-02-09 13:34:24 -08:00 committed by GitHub
commit 641315f859
3 changed files with 27 additions and 3 deletions

View File

@ -547,15 +547,17 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
}
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
glog.V(2).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name)
var errList []error
// TODO: This should be parallelized.
for i := int32(0); i < diff; i++ {
rs := oldRSs[i]
// Avoid delete replica set with non-zero replica counts
if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration {
if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration || rs.DeletionTimestamp != nil {
continue
}
glog.V(2).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
glog.V(2).Infof("Failed deleting old replica set %v for deployment %v: %v", rs.Name, deployment.Name, err)
errList = append(errList, err)

View File

@ -323,6 +323,9 @@ func TestScale(t *testing.T) {
func TestDeploymentController_cleanupDeployment(t *testing.T) {
selector := map[string]string{"foo": "bar"}
alreadyDeleted := newRSWithStatus("foo-1", 0, 0, selector)
now := metav1.Now()
alreadyDeleted.DeletionTimestamp = &now
tests := []struct {
oldRSs []*extensions.ReplicaSet
@ -366,10 +369,19 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
revisionHistoryLimit: 0,
expectedDeletions: 0,
},
{
oldRSs: []*extensions.ReplicaSet{
alreadyDeleted,
},
revisionHistoryLimit: 0,
expectedDeletions: 0,
},
}
for i := range tests {
test := tests[i]
t.Logf("scenario %d", i)
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(nil, fake, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake)

View File

@ -3422,17 +3422,27 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var oldRSs []*extensions.ReplicaSet
var d *extensions.Deployment
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
_, oldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
d = deployment
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
logReplicaSetsOfDeployment(d, oldRSs, nil)
}
return pollErr
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {