mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
tests
This commit is contained in:
parent
c3baf402f5
commit
31cb266340
@ -117,9 +117,9 @@ var InterestingApiServerMetrics = []string{
|
||||
}
|
||||
|
||||
var InterestingControllerManagerMetrics = []string{
|
||||
"garbage_collector_event_processing_latency_microseconds",
|
||||
"garbage_collector_dirty_processing_latency_microseconds",
|
||||
"garbage_collector_orphan_processing_latency_microseconds",
|
||||
"garbage_collector_event_queue_latency",
|
||||
"garbage_collector_dirty_queue_latency",
|
||||
"garbage_collector_orhan_queue_latency",
|
||||
}
|
||||
|
||||
var InterestingKubeletMetrics = []string{
|
||||
|
@ -20,7 +20,9 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
@ -30,8 +32,14 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func getForegroundOptions() *metav1.DeleteOptions {
|
||||
policy := metav1.DeletePropagationForeground
|
||||
return &metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
}
|
||||
|
||||
func getOrphanOptions() *metav1.DeleteOptions {
|
||||
var trueVar = true
|
||||
return &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
@ -76,9 +84,11 @@ func newOwnerDeployment(f *framework.Framework, deploymentName string) *v1beta1.
|
||||
}
|
||||
}
|
||||
|
||||
func newOwnerRC(f *framework.Framework, name string) *v1.ReplicationController {
|
||||
var replicas int32
|
||||
replicas = 2
|
||||
func getSelector() map[string]string {
|
||||
return map[string]string{"app": "gc-test"}
|
||||
}
|
||||
|
||||
func newOwnerRC(f *framework.Framework, name string, replicas int32) *v1.ReplicationController {
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
@ -125,6 +135,27 @@ func verifyRemainingDeploymentsAndReplicaSets(
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func newGCPod(name string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: new(int64),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "gcr.io/google_containers/nginx:1.7.9",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingObjects verifies if the number of the remaining replication
|
||||
// controllers and pods are rcNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
@ -174,7 +205,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
rc := newOwnerRC(f, rcName)
|
||||
rc := newOwnerRC(f, rcName, 2)
|
||||
By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
@ -225,9 +256,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
rc := newOwnerRC(f, rcName)
|
||||
replicas := int32(100)
|
||||
rc.Spec.Replicas = &replicas
|
||||
rc := newOwnerRC(f, rcName, 100)
|
||||
By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
@ -287,7 +316,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
rc := newOwnerRC(f, rcName)
|
||||
rc := newOwnerRC(f, rcName, 2)
|
||||
By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
@ -441,4 +470,237 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
It("[Feature:GarbageCollector] should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
rc := newOwnerRC(f, rcName, 100)
|
||||
By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
// wait for rc to create pods
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to get rc: %v", err)
|
||||
}
|
||||
if rc.Status.Replicas == *rc.Spec.Replicas {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
By("delete the rc")
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for the rc to be deleted")
|
||||
// default client QPS is 20, deleting each pod requires 2 requests, so 30s should be enough
|
||||
if err := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
|
||||
_, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
pods, _ := podClient.List(metav1.ListOptions{})
|
||||
framework.Logf("%d pods remaining", len(pods.Items))
|
||||
count := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp == nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
return false, nil
|
||||
} else {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}); err != nil {
|
||||
pods, err2 := podClient.List(metav1.ListOptions{})
|
||||
if err2 != nil {
|
||||
framework.Failf("%v", err2)
|
||||
}
|
||||
framework.Logf("%d remaining pods are:", len(pods.Items))
|
||||
framework.Logf("The ObjectMeta of the remaining pods are:")
|
||||
for _, pod := range pods.Items {
|
||||
framework.Logf("%#v", pod.ObjectMeta)
|
||||
}
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
// There shouldn't be any pods
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if len(pods.Items) != 0 {
|
||||
framework.Failf("expected no pods, got %#v", pods)
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
// TODO: this should be an integration test
|
||||
It("[Feature:GarbageCollector] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
rc1Name := "simpletest-rc-to-be-deleted"
|
||||
replicas := int32(100)
|
||||
halfReplicas := int(replicas / 2)
|
||||
rc1 := newOwnerRC(f, rc1Name, replicas)
|
||||
By("create the rc1")
|
||||
rc1, err := rcClient.Create(rc1)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
rc2Name := "simpletest-rc-to-stay"
|
||||
rc2 := newOwnerRC(f, rc2Name, 0)
|
||||
rc2.Spec.Selector = nil
|
||||
rc2.Spec.Template.ObjectMeta.Labels = map[string]string{"another.key": "another.value"}
|
||||
By("create the rc2")
|
||||
rc2, err = rcClient.Create(rc2)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
// wait for rc1 to be stable
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
rc1, err := rcClient.Get(rc1.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to get rc: %v", err)
|
||||
}
|
||||
if rc1.Status.Replicas == *rc1.Spec.Replicas {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
|
||||
for i := 0; i < halfReplicas; i++ {
|
||||
pod := pods.Items[i]
|
||||
_, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("delete the rc %s", rc1Name))
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc1.UID))
|
||||
if err := rcClient.Delete(rc1.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for the rc to be deleted")
|
||||
// default client QPS is 20, deleting each pod requires 2 requests, so 30s should be enough
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
_, err := rcClient.Get(rc1.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
pods, _ := podClient.List(metav1.ListOptions{})
|
||||
framework.Logf("%d pods remaining", len(pods.Items))
|
||||
count := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp == nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
return false, nil
|
||||
} else {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}); err != nil {
|
||||
pods, err2 := podClient.List(metav1.ListOptions{})
|
||||
if err2 != nil {
|
||||
framework.Failf("%v", err2)
|
||||
}
|
||||
framework.Logf("%d remaining pods are:", len(pods.Items))
|
||||
framework.Logf("ObjectMeta of remaining pods are:")
|
||||
for _, pod := range pods.Items {
|
||||
framework.Logf("%#v", pod.ObjectMeta)
|
||||
}
|
||||
framework.Failf("failed to delete rc %s, err: %v", rc1Name, err)
|
||||
}
|
||||
// half of the pods should still exist,
|
||||
pods, err = podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if len(pods.Items) != halfReplicas {
|
||||
framework.Failf("expected %d pods, got %d", halfReplicas, len(pods.Items))
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp != nil {
|
||||
framework.Failf("expected pod DeletionTimestamp to be nil, got %#v", pod.ObjectMeta)
|
||||
}
|
||||
// they should only have 1 ownerReference left
|
||||
if len(pod.ObjectMeta.OwnerReferences) != 1 {
|
||||
framework.Failf("expected pod to only have 1 owner, got %#v", pod.ObjectMeta.OwnerReferences)
|
||||
}
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
// TODO: should be an integration test
|
||||
It("[Feature:GarbageCollector] should not be blocked by dependency circle", func() {
|
||||
clientSet := f.ClientSet
|
||||
podClient := clientSet.Core().Pods(f.Namespace.Name)
|
||||
pod1 := newGCPod("pod1")
|
||||
pod1, err := podClient.Create(pod1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod2 := newGCPod("pod2")
|
||||
pod2, err = podClient.Create(pod2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod3 := newGCPod("pod3")
|
||||
pod3, err = podClient.Create(pod3)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// create circular dependency
|
||||
addRefPatch := func(name string, uid types.UID) []byte {
|
||||
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
|
||||
}
|
||||
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, addRefPatch(pod3.Name, pod3.UID))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, addRefPatch(pod1.Name, pod1.UID))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, addRefPatch(pod2.Name, pod2.UID))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
// delete one pod, should result in the deletion of all pods
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
|
||||
err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var pods *v1.PodList
|
||||
var err2 error
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
pods, err2 = podClient.List(metav1.ListOptions{})
|
||||
if err2 != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) == 0 {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
framework.Logf("pods are %#v", pods.Items)
|
||||
framework.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -28,8 +28,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -46,6 +46,11 @@ import (
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func getForegroundOptions() *metav1.DeleteOptions {
|
||||
policy := metav1.DeletePropagationForeground
|
||||
return &metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
}
|
||||
|
||||
func getOrphanOptions() *metav1.DeleteOptions {
|
||||
var trueVar = true
|
||||
return &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
@ -296,7 +301,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
|
||||
}
|
||||
podUIDs = append(podUIDs, pod.ObjectMeta.UID)
|
||||
}
|
||||
orphan := (options != nil && options.OrphanDependents != nil && *options.OrphanDependents) || (options == nil && len(initialFinalizers) != 0 && initialFinalizers[0] == metav1.FinalizerOrphan)
|
||||
orphan := (options != nil && options.OrphanDependents != nil && *options.OrphanDependents) || (options == nil && len(initialFinalizers) != 0 && initialFinalizers[0] == metav1.FinalizerOrphanDependents)
|
||||
// if we intend to orphan the pods, we need wait for the gc to observe the
|
||||
// creation of the pods, otherwise if the deletion of RC is observed before
|
||||
// the creation of the pods, the pods will not be orphaned.
|
||||
@ -355,9 +360,9 @@ func TestStressingCascadingDeletion(t *testing.T) {
|
||||
// rc is created with empty finalizers, deleted with nil delete options, pods will remain.
|
||||
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, nil, &wg, rcUIDs)
|
||||
// rc is created with the orphan finalizer, deleted with nil options, pods will remain.
|
||||
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphan}, nil, &wg, rcUIDs)
|
||||
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, nil, &wg, rcUIDs)
|
||||
// rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted.
|
||||
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphan}, getNonOrphanOptions(), &wg, rcUIDs)
|
||||
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, getNonOrphanOptions(), &wg, rcUIDs)
|
||||
// rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain.
|
||||
go setupRCsPods(t, gc, clientSet, "collection4-"+strconv.Itoa(i), ns.Name, []string{}, getOrphanOptions(), &wg, rcUIDs)
|
||||
}
|
||||
@ -395,19 +400,6 @@ func TestStressingCascadingDeletion(t *testing.T) {
|
||||
if gc.GraphHasUID(uids) {
|
||||
t.Errorf("Expect all nodes representing replication controllers are removed from the Propagator's graph")
|
||||
}
|
||||
metric := &dto.Metric{}
|
||||
garbagecollector.EventProcessingLatency.Write(metric)
|
||||
count := float64(metric.Summary.GetSampleCount())
|
||||
sum := metric.Summary.GetSampleSum()
|
||||
t.Logf("Average time spent in GC's eventQueue is %.1f microseconds", sum/count)
|
||||
garbagecollector.DirtyProcessingLatency.Write(metric)
|
||||
count = float64(metric.Summary.GetSampleCount())
|
||||
sum = metric.Summary.GetSampleSum()
|
||||
t.Logf("Average time spent in GC's dirtyQueue is %.1f microseconds", sum/count)
|
||||
garbagecollector.OrphanProcessingLatency.Write(metric)
|
||||
count = float64(metric.Summary.GetSampleCount())
|
||||
sum = metric.Summary.GetSampleSum()
|
||||
t.Logf("Average time spent in GC's orphanQueue is %.1f microseconds", sum/count)
|
||||
}
|
||||
|
||||
func TestOrphaning(t *testing.T) {
|
||||
@ -480,3 +472,183 @@ func TestOrphaning(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) {
|
||||
s, gc, clientSet := setup(t)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("gc-foreground1", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
// create the RC with the orphan finalizer set
|
||||
toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
remainingRC, err := rcClient.Create(newOwnerRC(remainingRCName, ns.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
trueVar := true
|
||||
pod := newPod("pod", ns.Name, []metav1.OwnerReference{
|
||||
{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRC.Name, BlockOwnerDeletion: &trueVar},
|
||||
{UID: remainingRC.ObjectMeta.UID, Name: remainingRC.Name},
|
||||
})
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go gc.Run(5, stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
err = rcClient.Delete(toBeDeletedRCName, getForegroundOptions())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete the rc: %v", err)
|
||||
}
|
||||
// verify the toBeDeleteRC is deleted
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
_, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// verify pods don't have the toBeDeleteRC as an owner anymore
|
||||
pod, err = podClient.Get("pod", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
if len(pod.ObjectMeta.OwnerReferences) != 1 {
|
||||
t.Errorf("expect pod to have only one ownerReference: got %#v", pod.ObjectMeta.OwnerReferences)
|
||||
} else if pod.ObjectMeta.OwnerReferences[0].Name != remainingRC.Name {
|
||||
t.Errorf("expect pod to have an ownerReference pointing to %s, got %#v", remainingRC.Name, pod.ObjectMeta.OwnerReferences)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) {
|
||||
s, gc, clientSet := setup(t)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("gc-foreground2", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
// create the RC with the orphan finalizer set
|
||||
toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
// BlockingOwnerDeletion is not set
|
||||
pod1 := newPod("pod1", ns.Name, []metav1.OwnerReference{
|
||||
{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRC.Name},
|
||||
})
|
||||
// adding finalizer that no controller handles, so that the pod won't be deleted
|
||||
pod1.ObjectMeta.Finalizers = []string{"x/y"}
|
||||
// BlockingOwnerDeletion is false
|
||||
falseVar := false
|
||||
pod2 := newPod("pod2", ns.Name, []metav1.OwnerReference{
|
||||
{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRC.Name, BlockOwnerDeletion: &falseVar},
|
||||
})
|
||||
// adding finalizer that no controller handles, so that the pod won't be deleted
|
||||
pod2.ObjectMeta.Finalizers = []string{"x/y"}
|
||||
_, err = podClient.Create(pod1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
_, err = podClient.Create(pod2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go gc.Run(5, stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
err = rcClient.Delete(toBeDeletedRCName, getForegroundOptions())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete the rc: %v", err)
|
||||
}
|
||||
// verify the toBeDeleteRC is deleted
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
_, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// verify pods are still there
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) != 2 {
|
||||
t.Errorf("expect there to be 2 pods, got %#v", pods.Items)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockingOwnerRefDoesBlock(t *testing.T) {
|
||||
s, gc, clientSet := setup(t)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("gc-foreground2", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
// create the RC with the orphan finalizer set
|
||||
toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
trueVar := true
|
||||
pod := newPod("pod", ns.Name, []metav1.OwnerReference{
|
||||
{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRC.Name, BlockOwnerDeletion: &trueVar},
|
||||
})
|
||||
// adding finalizer that no controller handles, so that the pod won't be deleted
|
||||
pod.ObjectMeta.Finalizers = []string{"x/y"}
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go gc.Run(5, stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
err = rcClient.Delete(toBeDeletedRCName, getForegroundOptions())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete the rc: %v", err)
|
||||
}
|
||||
time.Sleep(30 * time.Second)
|
||||
// verify the toBeDeleteRC is NOT deleted
|
||||
_, err = rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// verify pods are still there
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) != 1 {
|
||||
t.Errorf("expect there to be 1 pods, got %#v", pods.Items)
|
||||
}
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN
|
||||
}
|
||||
|
||||
func TestAdoption(t *testing.T) {
|
||||
var trueVar = true
|
||||
boolPtr := func(b bool) *bool { return &b }
|
||||
testCases := []struct {
|
||||
name string
|
||||
existingOwnerReferences func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference
|
||||
@ -181,7 +181,7 @@ func TestAdoption(t *testing.T) {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"}}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -190,29 +190,29 @@ func TestAdoption(t *testing.T) {
|
||||
return []metav1.OwnerReference{}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers rs as a controller",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true)}}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers other rs as the controller, refers the rs as an owner",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar},
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true)},
|
||||
{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"},
|
||||
}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar},
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: boolPtr(true)},
|
||||
{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"},
|
||||
}
|
||||
},
|
||||
@ -255,7 +255,7 @@ func TestAdoption(t *testing.T) {
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Fatalf("test %q failed: %v", tc.name, err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN
|
||||
}
|
||||
|
||||
func TestAdoption(t *testing.T) {
|
||||
var trueVar = true
|
||||
boolPtr := func(b bool) *bool { return &b }
|
||||
testCases := []struct {
|
||||
name string
|
||||
existingOwnerReferences func(rc *v1.ReplicationController) []metav1.OwnerReference
|
||||
@ -170,7 +170,7 @@ func TestAdoption(t *testing.T) {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"}}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -179,29 +179,29 @@ func TestAdoption(t *testing.T) {
|
||||
return []metav1.OwnerReference{}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers rc as a controller",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers other rc as the controller, refers the rc as an owner",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar},
|
||||
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)},
|
||||
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
|
||||
}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar},
|
||||
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)},
|
||||
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
|
||||
}
|
||||
},
|
||||
@ -243,7 +243,7 @@ func TestAdoption(t *testing.T) {
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Fatalf("test %q failed: %v", tc.name, err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user