Merge pull request #33235 from caesarxuchao/fix-TestCreateWithNonExistentOwner

Automatic merge from submit-queue

Fix TestCreateWithNonExistentOwner

Fix #30228
As https://github.com/kubernetes/kubernetes/issues/30228#issuecomment-248779567 described, the GC did delete the garbage, it's the test logic failed. 
The test used to rely on `gc.QueuesDrained()`, which could return before the GC finished processing. It seems to be the only possible reason of the test failure. Hence, this PR changed the test to poll for the deletion of garbage.
This commit is contained in:
Kubernetes Submit Queue 2016-09-28 07:33:45 -07:00 committed by GitHub
commit 33d29b5d6b
3 changed files with 42 additions and 58 deletions

View File

@ -786,12 +786,6 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
gc.propagator.eventQueue.ShutDown() gc.propagator.eventQueue.ShutDown()
} }
// QueueDrained returns if the dirtyQueue and eventQueue are drained. It's
// useful for debugging. Note that it doesn't guarantee the workers are idle.
func (gc *GarbageCollector) QueuesDrained() bool {
return gc.dirtyQueue.Len() == 0 && gc.propagator.eventQueue.Len() == 0 && gc.orphanQueue.Len() == 0
}
// *FOR TEST USE ONLY* It's not safe to call this function when the GC is still // *FOR TEST USE ONLY* It's not safe to call this function when the GC is still
// busy. // busy.
// GraphHasUID returns if the Propagator has a particular UID store in its // GraphHasUID returns if the Propagator has a particular UID store in its

View File

@ -30,11 +30,10 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller/garbagecollector" "k8s.io/kubernetes/pkg/controller/garbagecollector"
@ -42,6 +41,7 @@ import (
"k8s.io/kubernetes/pkg/runtime/serializer" "k8s.io/kubernetes/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
@ -213,13 +213,6 @@ func TestCascadingDeletion(t *testing.T) {
if err := rcClient.Delete(toBeDeletedRCName, getNonOrphanOptions()); err != nil { if err := rcClient.Delete(toBeDeletedRCName, getNonOrphanOptions()); err != nil {
t.Fatalf("failed to delete replication controller: %v", err) t.Fatalf("failed to delete replication controller: %v", err)
} }
// wait for the garbage collector to drain its queue
if err := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
return gc.QueuesDrained(), nil
}); err != nil {
t.Fatal(err)
}
// sometimes the deletion of the RC takes long time to be observed by // sometimes the deletion of the RC takes long time to be observed by
// the gc, so wait for the garbage collector to observe the deletion of // the gc, so wait for the garbage collector to observe the deletion of
// the toBeDeletedRC // the toBeDeletedRC
@ -228,32 +221,21 @@ func TestCascadingDeletion(t *testing.T) {
}); err != nil { }); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// wait for the garbage collector to drain its queue again because it's if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 5*time.Second, 30*time.Second); err != nil {
// possible it just processed the delete of the toBeDeletedRC. t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
if err := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
return gc.QueuesDrained(), nil
}); err != nil {
t.Fatal(err)
} }
// checks the garbage collect doesn't delete pods it shouldn't delete.
t.Logf("garbage collector queues drained")
// checks the garbage collect doesn't delete pods it shouldn't do.
if _, err := podClient.Get(independentPodName); err != nil { if _, err := podClient.Get(independentPodName); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if _, err := podClient.Get(oneValidOwnerPodName); err != nil { if _, err := podClient.Get(oneValidOwnerPodName); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if _, err := podClient.Get(garbageCollectedPodName); err == nil || !errors.IsNotFound(err) {
t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
}
} }
// This test simulates the case where an object is created with an owner that // This test simulates the case where an object is created with an owner that
// doesn't exist. It verifies the GC will delete such an object. // doesn't exist. It verifies the GC will delete such an object.
func TestCreateWithNonExistentOwner(t *testing.T) { func TestCreateWithNonExistentOwner(t *testing.T) {
glog.V(6).Infof("TestCreateWithNonExistentOwner starts")
defer glog.V(6).Infof("TestCreateWithNonExistentOwner ends")
s, gc, clientSet := setup(t) s, gc, clientSet := setup(t)
defer s.Close() defer s.Close()
@ -279,15 +261,9 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
stopCh := make(chan struct{}) stopCh := make(chan struct{})
go gc.Run(5, stopCh) go gc.Run(5, stopCh)
defer close(stopCh) defer close(stopCh)
// wait for the garbage collector to drain its queue // wait for the garbage collector to delete the pod
if err := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) { if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 5*time.Second, 30*time.Second); err != nil {
return gc.QueuesDrained(), nil t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
}); err != nil {
t.Fatal(err)
}
t.Logf("garbage collector queues drained")
if _, err := podClient.Get(garbageCollectedPodName); err == nil || !errors.IsNotFound(err) {
t.Fatalf("expect pod %s to be garbage collected", garbageCollectedPodName)
} }
} }
@ -382,16 +358,8 @@ func TestStressingCascadingDeletion(t *testing.T) {
} }
wg.Wait() wg.Wait()
t.Logf("all pods are created, all replications controllers are created then deleted") t.Logf("all pods are created, all replications controllers are created then deleted")
// wait for the garbage collector to drain its queue // wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) { if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) {
return gc.QueuesDrained(), nil
}); err != nil {
t.Fatal(err)
}
t.Logf("garbage collector queues drained")
// wait for the RCs and Pods to reach the expected numbers. This shouldn't
// take long, because the queues are already drained.
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
podsInEachCollection := 3 podsInEachCollection := 3
// see the comments on the calls to setupRCsPods for details // see the comments on the calls to setupRCsPods for details
remainingGroups := 3 remainingGroups := 3
@ -478,12 +446,19 @@ func TestOrphaning(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to gracefully delete the rc: %v", err) t.Fatalf("Failed to gracefully delete the rc: %v", err)
} }
// verify the toBeDeleteRC is deleted
// wait for the garbage collector to drain its queue if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
if err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) { rcs, err := rcClient.List(api.ListOptions{})
return gc.QueuesDrained(), nil if err != nil {
return false, err
}
if len(rcs.Items) == 0 {
t.Logf("Still has %d RCs", len(rcs.Items))
return true, nil
}
return false, nil
}); err != nil { }); err != nil {
t.Fatal(err) t.Errorf("unexpected error: %v", err)
} }
// verify pods don't have the ownerPod as an owner anymore // verify pods don't have the ownerPod as an owner anymore
@ -499,9 +474,4 @@ func TestOrphaning(t *testing.T) {
t.Errorf("pod %s still has non-empty OwnerRefereces: %v", pod.ObjectMeta.Name, pod.ObjectMeta.OwnerReferences) t.Errorf("pod %s still has non-empty OwnerRefereces: %v", pod.ObjectMeta.Name, pod.ObjectMeta.OwnerReferences)
} }
} }
// verify the toBeDeleteRC is deleted
rcs, err := rcClient.List(api.ListOptions{})
if len(rcs.Items) != 0 {
t.Errorf("Expect RCs to be deleted, but got %#v", rcs.Items)
}
} }

View File

@ -20,11 +20,15 @@ import (
"fmt" "fmt"
"math/rand" "math/rand"
"testing" "testing"
"time"
etcd "github.com/coreos/etcd/client" etcd "github.com/coreos/etcd/client"
"github.com/golang/glog" "github.com/golang/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
"k8s.io/kubernetes/pkg/api/errors"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
@ -69,3 +73,19 @@ var Code409 = map[int]bool{409: true}
var Code422 = map[int]bool{422: true} var Code422 = map[int]bool{422: true}
var Code500 = map[int]bool{500: true} var Code500 = map[int]bool{500: true}
var Code503 = map[int]bool{503: true} var Code503 = map[int]bool{503: true}
// WaitForPodToDisappear polls the API server if the pod has been deleted.
func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := podClient.Get(podName)
if err == nil {
return false, nil
} else {
if errors.IsNotFound(err) {
return true, nil
} else {
return false, err
}
}
})
}