diff --git a/pkg/controller/garbagecollector/BUILD b/pkg/controller/garbagecollector/BUILD index 792ed2e1a9a..bdf8dffde8f 100644 --- a/pkg/controller/garbagecollector/BUILD +++ b/pkg/controller/garbagecollector/BUILD @@ -56,15 +56,18 @@ go_test( srcs = [ "dump_test.go", "garbagecollector_test.go", + "graph_builder_test.go", ], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/install:go_default_library", + "//pkg/controller/garbagecollector/metaonly:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", @@ -75,12 +78,18 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/metadata:go_default_library", + "//staging/src/k8s.io/client-go/metadata/fake:go_default_library", "//staging/src/k8s.io/client-go/metadata/metadatainformer:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/controller-manager/pkg/informerfactory:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/golang/groupcache/lru:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/golang.org/x/time/rate:go_default_library", "//vendor/gonum.org/v1/gonum/graph:go_default_library", "//vendor/gonum.org/v1/gonum/graph/simple:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", diff --git a/pkg/controller/garbagecollector/garbagecollector_test.go b/pkg/controller/garbagecollector/garbagecollector_test.go index c5972088a63..1ad9101aaee 100644 --- a/pkg/controller/garbagecollector/garbagecollector_test.go +++ b/pkg/controller/garbagecollector/garbagecollector_test.go @@ -17,6 +17,7 @@ limitations under the License. package garbagecollector import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -26,15 +27,21 @@ import ( "testing" "time" + "golang.org/x/time/rate" + + "github.com/golang/groupcache/lru" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" _ "k8s.io/kubernetes/pkg/apis/core/install" + "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly" "k8s.io/utils/pointer" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta/testrestmapper" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" @@ -45,8 +52,11 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/metadata" + fakemetadata "k8s.io/client-go/metadata/fake" "k8s.io/client-go/metadata/metadatainformer" restclient "k8s.io/client-go/rest" + clientgotesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/controller-manager/pkg/informerfactory" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -972,3 +982,1551 @@ func (f *fakeServerResources) getInterfaceUsedCount() int { func (*fakeServerResources) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { return nil, nil } + +func TestConflictingData(t *testing.T) { + pod1ns1 := makeID("v1", "Pod", "ns1", "podname1", "poduid1") + pod2ns1 := makeID("v1", "Pod", "ns1", "podname2", "poduid2") + pod2ns2 := makeID("v1", "Pod", "ns2", "podname2", "poduid2") + node1 := makeID("v1", "Node", "", "nodename", "nodeuid1") + + role1v1beta1 := makeID("rbac.authorization.k8s.io/v1beta1", "Role", "ns1", "role1", "roleuid1") + role1v1 := makeID("rbac.authorization.k8s.io/v1", "Role", "ns1", "role1", "roleuid1") + + deployment1apps := makeID("apps/v1", "Deployment", "ns1", "deployment1", "deploymentuid1") + deployment1extensions := makeID("extensions/v1beta1", "Deployment", "ns1", "deployment1", "deploymentuid1") // not served, still referenced + + // when a reference is made to node1 from a namespaced resource, the virtual node inserted has namespace coordinates + node1WithNamespace := makeID("v1", "Node", "ns1", "nodename", "nodeuid1") + + // when a reference is made to pod1 from a cluster-scoped resource, the virtual node inserted has no namespace + pod1nonamespace := makeID("v1", "Pod", "", "podname1", "poduid1") + + badSecretReferenceWithDeploymentUID := makeID("v1", "Secret", "ns1", "secretname", string(deployment1apps.UID)) + badChildPod := makeID("v1", "Pod", "ns1", "badpod", "badpoduid") + goodChildPod := makeID("v1", "Pod", "ns1", "goodpod", "goodpoduid") + + var testScenarios = []struct { + name string + initialObjects []runtime.Object + steps []step + }{ + { + name: "good child in ns1 -> cluster-scoped owner", + steps: []step{ + // setup + createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, node1)), + // observe namespaced child with not-yet-observed cluster-scoped parent + processEvent(makeAddEvent(pod1ns1, node1)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1WithNamespace, virtual)}, // virtual node1 (matching child namespace) + pendingAttemptToDelete: []*node{makeNode(node1WithNamespace, virtual)}, // virtual node1 queued for attempted delete + }), + // handle queued delete of virtual node + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{"get /v1, Resource=nodes name=nodename"}, + graphNodes: []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1WithNamespace, virtual)}, // virtual node1 (matching child namespace) + pendingAttemptToDelete: []*node{makeNode(node1WithNamespace, virtual)}, // virtual node1 still not observed, got requeued + }), + // observe cluster-scoped parent + processEvent(makeAddEvent(node1)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1)}, // node1 switched to observed, fixed namespace coordinate + pendingAttemptToDelete: []*node{makeNode(node1WithNamespace, virtual)}, // virtual node1 queued for attempted delete + }), + // handle queued delete of virtual node + // final state: child and parent present in graph, no queued actions + processAttemptToDelete(1), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1)}, + }), + }, + }, + // child in namespace A with owner reference to namespaced type in namespace B + // * should be deleted immediately + // * event should be logged in namespace A with involvedObject of bad-child indicating the error + { + name: "bad child in ns1 -> owner in ns2 (child first)", + steps: []step{ + // 0,1: setup + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, pod2ns1)), + createObjectInClient("", "v1", "pods", "ns2", makeMetadataObj(pod2ns2)), + // 2,3: observe namespaced child with not-yet-observed namespace-scoped parent + processEvent(makeAddEvent(pod1ns1, pod2ns2)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns1, virtual)}, // virtual pod2 (matching child namespace) + pendingAttemptToDelete: []*node{makeNode(pod2ns1, virtual)}, // virtual pod2 queued for attempted delete + }), + // 4,5: observe parent + processEvent(makeAddEvent(pod2ns2)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns2)}, // pod2 is no longer virtual, namespace coordinate is corrected + pendingAttemptToDelete: []*node{makeNode(pod2ns1, virtual), makeNode(pod1ns1)}, // virtual pod2 still queued for attempted delete, bad child pod1 queued because it disagreed with observed parent + events: []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: ns1, name: podname2, uid: poduid2] does not exist in namespace "ns1" involvedObject{kind=Pod,apiVersion=v1}`}, + }), + // 6,7: handle queued delete of virtual parent + processAttemptToDelete(1), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns2)}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1)}, // bad child pod1 queued because it disagreed with observed parent + }), + // 8,9: handle queued delete of bad child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of pod1 pre-delete + "get /v1, Resource=pods ns=ns1 name=podname2", // verification bad parent reference is absent + "delete /v1, Resource=pods ns=ns1 name=podname1", // pod1 delete + }, + graphNodes: []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns2)}, + absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent + }), + // 10,11: observe delete issued in step 8 + // final state: parent present in graph, no queued actions + processEvent(makeDeleteEvent(pod1ns1)), + assertState(state{ + graphNodes: []*node{makeNode(pod2ns2)}, // only good parent remains + absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent + }), + }, + }, + { + name: "bad child in ns1 -> owner in ns2 (owner first)", + steps: []step{ + // 0,1: setup + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, pod2ns1)), + createObjectInClient("", "v1", "pods", "ns2", makeMetadataObj(pod2ns2)), + // 2,3: observe parent + processEvent(makeAddEvent(pod2ns2)), + assertState(state{ + graphNodes: []*node{makeNode(pod2ns2)}, + }), + // 4,5: observe namespaced child with invalid cross-namespace reference to parent + processEvent(makeAddEvent(pod1ns1, pod2ns1)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(pod2ns1)), makeNode(pod2ns2)}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1)}, // bad child queued for attempted delete + events: []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: ns1, name: podname2, uid: poduid2] does not exist in namespace "ns1" involvedObject{kind=Pod,apiVersion=v1}`}, + }), + // 6,7: handle queued delete of bad child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of pod1 pre-delete + "get /v1, Resource=pods ns=ns1 name=podname2", // verification bad parent reference is absent + "delete /v1, Resource=pods ns=ns1 name=podname1", // pod1 delete + }, + graphNodes: []*node{makeNode(pod1ns1, withOwners(pod2ns1)), makeNode(pod2ns2)}, + pendingAttemptToDelete: []*node{}, + absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent + }), + // 8,9: observe delete issued in step 6 + // final state: parent present in graph, no queued actions + processEvent(makeDeleteEvent(pod1ns1)), + assertState(state{ + graphNodes: []*node{makeNode(pod2ns2)}, // only good parent remains + absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent + }), + }, + }, + // child that is cluster-scoped with owner reference to namespaced type in namespace B + // * should not be deleted + // * event should be logged in namespace kube-system with involvedObject of bad-child indicating the error + { + name: "bad cluster-scoped child -> owner in ns1 (child first)", + steps: []step{ + // setup + createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1ns1)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1)), + // 2,3: observe cluster-scoped child with not-yet-observed namespaced parent + processEvent(makeAddEvent(node1, pod1ns1)), + assertState(state{ + graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1nonamespace, virtual)}, // virtual pod1 (with no namespace) + pendingAttemptToDelete: []*node{makeNode(pod1nonamespace, virtual)}, // virtual pod1 queued for attempted delete + }), + // 4,5: handle queued delete of virtual pod1 + processAttemptToDelete(1), + assertState(state{ + graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1nonamespace, virtual)}, // virtual pod1 (with no namespace) + pendingAttemptToDelete: []*node{}, // namespace-scoped virtual object without a namespace coordinate not re-queued + }), + // 6,7: observe namespace-scoped parent + processEvent(makeAddEvent(pod1ns1)), + assertState(state{ + graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)}, // pod1 namespace coordinate corrected, made non-virtual + events: []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: , name: podname1, uid: poduid1] does not exist in namespace "" involvedObject{kind=Node,apiVersion=v1}`}, + pendingAttemptToDelete: []*node{makeNode(node1, withOwners(pod1ns1))}, // bad cluster-scoped child added to attemptToDelete queue + }), + // 8,9: handle queued attempted delete of bad cluster-scoped child + // final state: parent and child present in graph, no queued actions + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=nodes name=nodename", // lookup of node pre-delete + }, + graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)}, + }), + }, + }, + { + name: "bad cluster-scoped child -> owner in ns1 (owner first)", + steps: []step{ + // setup + createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1ns1)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1)), + // 2,3: observe namespace-scoped parent + processEvent(makeAddEvent(pod1ns1)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1)}, + }), + // 4,5: observe cluster-scoped child + processEvent(makeAddEvent(node1, pod1ns1)), + assertState(state{ + graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)}, + events: []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: , name: podname1, uid: poduid1] does not exist in namespace "" involvedObject{kind=Node,apiVersion=v1}`}, + pendingAttemptToDelete: []*node{makeNode(node1, withOwners(pod1ns1))}, // bad cluster-scoped child added to attemptToDelete queue + }), + // 6,7: handle queued attempted delete of bad cluster-scoped child + // final state: parent and child present in graph, no queued actions + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=nodes name=nodename", // lookup of node pre-delete + }, + graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)}, + }), + }, + }, + // child pointing at non-preferred still-served apiVersion of parent object (e.g. rbac/v1beta1) + // * should not be deleted prematurely + // * should not repeatedly poll attemptToDelete while waiting + // * should be deleted when the actual parent is deleted + { + name: "good child -> existing owner with non-preferred accessible API version", + steps: []step{ + // setup + createObjectInClient("rbac.authorization.k8s.io", "v1", "roles", "ns1", makeMetadataObj(role1v1)), + createObjectInClient("rbac.authorization.k8s.io", "v1beta1", "roles", "ns1", makeMetadataObj(role1v1beta1)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, role1v1beta1)), + // 3,4: observe child + processEvent(makeAddEvent(pod1ns1, role1v1beta1)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1beta1, virtual)}, + pendingAttemptToDelete: []*node{makeNode(role1v1beta1, virtual)}, // virtual parent enqueued for delete attempt + }), + // 5,6: handle queued attempted delete of virtual parent + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get rbac.authorization.k8s.io/v1beta1, Resource=roles ns=ns1 name=role1", // lookup of node pre-delete + }, + graphNodes: []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1beta1, virtual)}, + pendingAttemptToDelete: []*node{makeNode(role1v1beta1, virtual)}, // not yet observed, still in the attemptToDelete queue + }), + // 7,8: observe parent via v1 + processEvent(makeAddEvent(role1v1)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1)}, // parent version/virtual state gets corrected + pendingAttemptToDelete: []*node{makeNode(role1v1beta1, virtual), makeNode(pod1ns1, withOwners(role1v1beta1))}, // virtual parent and mismatched child enqueued for delete attempt + }), + // 9,10: process attemptToDelete + // virtual node dropped from attemptToDelete with no further action because the real node has been observed now + processAttemptToDelete(1), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1)}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(role1v1beta1))}, // mismatched child enqueued for delete attempt + }), + // 11,12: process attemptToDelete for mismatched parent + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete + "get rbac.authorization.k8s.io/v1beta1, Resource=roles ns=ns1 name=role1", // verifying parent is solid + }, + graphNodes: []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1)}, + }), + // 13,14: teardown + deleteObjectFromClient("rbac.authorization.k8s.io", "v1", "roles", "ns1", "role1"), + deleteObjectFromClient("rbac.authorization.k8s.io", "v1beta1", "roles", "ns1", "role1"), + // 15,16: observe delete via v1 + processEvent(makeDeleteEvent(role1v1)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(role1v1beta1))}, // only child remains + absentOwnerCache: []objectReference{role1v1}, // cached absence of parent via v1 + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(role1v1beta1))}, + }), + // 17,18: process attemptToDelete for child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete + "get rbac.authorization.k8s.io/v1beta1, Resource=roles ns=ns1 name=role1", // verifying parent is solid + "delete /v1, Resource=pods ns=ns1 name=podname1", + }, + absentOwnerCache: []objectReference{role1v1, role1v1beta1}, // cached absence of v1beta1 role + graphNodes: []*node{makeNode(pod1ns1, withOwners(role1v1beta1))}, + }), + // 19,20: observe delete issued in step 17 + // final state: empty graph, no queued actions + processEvent(makeDeleteEvent(pod1ns1)), + assertState(state{ + absentOwnerCache: []objectReference{role1v1, role1v1beta1}, + }), + }, + }, + // child pointing at no-longer-served apiVersion of still-existing parent object (e.g. extensions/v1beta1 deployment) + // * should not be deleted (this is indistinguishable from referencing an unknown kind/version) + // * virtual parent should not repeatedly poll attemptToDelete once real parent is observed + { + name: "child -> existing owner with inaccessible API version (child first)", + steps: []step{ + // setup + createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)), + // 2,3: observe child + processEvent(makeAddEvent(pod1ns1, deployment1extensions)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)}, + pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // virtual parent enqueued for delete attempt + }), + // 4,5: handle queued attempted delete of virtual parent + processAttemptToDelete(1), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)}, + pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // requeued on restmapper error + }), + // 6,7: observe parent via v1 + processEvent(makeAddEvent(deployment1apps)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)}, // parent version/virtual state gets corrected + pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual), makeNode(pod1ns1, withOwners(deployment1extensions))}, // virtual parent and mismatched child enqueued for delete attempt + }), + // 8,9: process attemptToDelete + // virtual node dropped from attemptToDelete with no further action because the real node has been observed now + processAttemptToDelete(1), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child enqueued for delete attempt + }), + // 10,11: process attemptToDelete for mismatched child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete + }, + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error + }), + // 12: teardown + deleteObjectFromClient("apps", "v1", "deployments", "ns1", "deployment1"), + // 13,14: observe delete via v1 + processEvent(makeDeleteEvent(deployment1apps)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains + absentOwnerCache: []objectReference{deployment1apps}, // cached absence of parent via v1 + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, + }), + // 17,18: process attemptToDelete for child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete + }, + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains + absentOwnerCache: []objectReference{deployment1apps}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error + }), + }, + }, + { + name: "child -> existing owner with inaccessible API version (owner first)", + steps: []step{ + // setup + createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)), + // 2,3: observe parent via v1 + processEvent(makeAddEvent(deployment1apps)), + assertState(state{ + graphNodes: []*node{makeNode(deployment1apps)}, + }), + // 4,5: observe child + processEvent(makeAddEvent(pod1ns1, deployment1extensions)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child enqueued for delete attempt + }), + // 6,7: process attemptToDelete for mismatched child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete + }, + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error + }), + // 8: teardown + deleteObjectFromClient("apps", "v1", "deployments", "ns1", "deployment1"), + // 9,10: observe delete via v1 + processEvent(makeDeleteEvent(deployment1apps)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains + absentOwnerCache: []objectReference{deployment1apps}, // cached absence of parent via v1 + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, + }), + // 11,12: process attemptToDelete for child + // final state: child with unresolveable ownerRef remains, queued in pendingAttemptToDelete + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete + }, + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains + absentOwnerCache: []objectReference{deployment1apps}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error + }), + }, + }, + // child pointing at no-longer-served apiVersion of no-longer-existing parent object (e.g. extensions/v1beta1 deployment) + // * should not be deleted (this is indistinguishable from referencing an unknown kind/version) + // * should repeatedly poll attemptToDelete + // * should not block deletion of legitimate children of missing deployment + { + name: "child -> non-existent owner with inaccessible API version (inaccessible parent apiVersion first)", + steps: []step{ + // setup + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, deployment1apps)), + // 2,3: observe child pointing at no-longer-served apiVersion + processEvent(makeAddEvent(pod1ns1, deployment1extensions)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)}, + pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // virtual parent enqueued for delete attempt + }), + // 4,5: observe child pointing at served apiVersion where owner does not exist + processEvent(makeAddEvent(pod2ns1, deployment1apps)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))}, + pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))}, // mismatched child enqueued for delete attempt + }), + // 6,7: handle attempt to delete virtual parent for inaccessible apiVersion + processAttemptToDelete(1), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))}, + pendingAttemptToDelete: []*node{makeNode(pod2ns1, withOwners(deployment1apps)), makeNode(deployment1extensions, virtual)}, // inaccessible parent requeued to end + }), + // 8,9: handle attempt to delete mismatched child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname2", // lookup of child pre-delete + "get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of parent + "delete /v1, Resource=pods ns=ns1 name=podname2", // delete child + }, + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))}, + absentOwnerCache: []objectReference{deployment1apps}, // verifiably absent parent remembered + pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // mismatched child with verifiably absent parent deleted + }), + // 10,11: observe delete issued in step 8 + processEvent(makeDeleteEvent(pod2ns1)), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)}, + absentOwnerCache: []objectReference{deployment1apps}, + pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, + }), + // 12,13: final state: inaccessible parent requeued in attemptToDelete + processAttemptToDelete(1), + assertState(state{ + graphNodes: []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)}, + absentOwnerCache: []objectReference{deployment1apps}, + pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, + }), + }, + }, + + { + name: "child -> non-existent owner with inaccessible API version (accessible parent apiVersion first)", + steps: []step{ + // setup + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, deployment1apps)), + // 2,3: observe child pointing at served apiVersion where owner does not exist + processEvent(makeAddEvent(pod2ns1, deployment1apps)), + assertState(state{ + graphNodes: []*node{ + makeNode(pod2ns1, withOwners(deployment1apps)), + makeNode(deployment1apps, virtual)}, + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps, virtual)}, // virtual parent enqueued for delete attempt + }), + // 4,5: observe child pointing at no-longer-served apiVersion + processEvent(makeAddEvent(pod1ns1, deployment1extensions)), + assertState(state{ + graphNodes: []*node{ + makeNode(pod2ns1, withOwners(deployment1apps)), + makeNode(deployment1apps, virtual), + makeNode(pod1ns1, withOwners(deployment1extensions))}, + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps, virtual), + makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child enqueued for delete attempt + }), + // 6,7: handle attempt to delete virtual parent for accessible apiVersion + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of parent, gets 404 + }, + pendingGraphChanges: []*event{makeVirtualDeleteEvent(deployment1apps)}, // virtual parent not found, queued virtual delete event + graphNodes: []*node{ + makeNode(pod2ns1, withOwners(deployment1apps)), + makeNode(deployment1apps, virtual), + makeNode(pod1ns1, withOwners(deployment1extensions))}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, + }), + // 8,9: handle attempt to delete mismatched child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete + }, + pendingGraphChanges: []*event{makeVirtualDeleteEvent(deployment1apps)}, + graphNodes: []*node{ + makeNode(pod2ns1, withOwners(deployment1apps)), + makeNode(deployment1apps, virtual), + makeNode(pod1ns1, withOwners(deployment1extensions))}, + pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // restmapper on inaccessible parent, requeued + }), + // 10,11: handle queued virtual delete event + processPendingGraphChanges(1), + assertState(state{ + graphNodes: []*node{ + makeNode(pod2ns1, withOwners(deployment1apps)), + makeNode(deployment1extensions, virtual), // deployment node changed identity to alternative virtual identity + makeNode(pod1ns1, withOwners(deployment1extensions)), + }, + absentOwnerCache: []objectReference{deployment1apps}, // absent apps/v1 parent remembered + pendingAttemptToDelete: []*node{ + makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion + makeNode(pod2ns1, withOwners(deployment1apps)), // children of absent apps/v1 parent queued for delete attempt + makeNode(deployment1extensions, virtual), // new virtual parent queued for delete attempt + }, + }), + + // 12,13: handle attempt to delete child referencing inaccessible apiVersion + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete + }, + graphNodes: []*node{ + makeNode(pod2ns1, withOwners(deployment1apps)), + makeNode(deployment1extensions, virtual), + makeNode(pod1ns1, withOwners(deployment1extensions))}, + absentOwnerCache: []objectReference{deployment1apps}, + pendingAttemptToDelete: []*node{ + makeNode(pod2ns1, withOwners(deployment1apps)), // children of absent apps/v1 parent queued for delete attempt + makeNode(deployment1extensions, virtual), // new virtual parent queued for delete attempt + makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion - requeued to end + }, + }), + + // 14,15: handle attempt to delete child referencing accessible apiVersion + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=podname2", // lookup of child pre-delete + "delete /v1, Resource=pods ns=ns1 name=podname2", // parent absent, delete + }, + graphNodes: []*node{ + makeNode(pod2ns1, withOwners(deployment1apps)), + makeNode(deployment1extensions, virtual), + makeNode(pod1ns1, withOwners(deployment1extensions))}, + absentOwnerCache: []objectReference{deployment1apps}, + pendingAttemptToDelete: []*node{ + makeNode(deployment1extensions, virtual), // new virtual parent queued for delete attempt + makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion + }, + }), + + // 16,17: handle attempt to delete virtual parent in inaccessible apiVersion + processAttemptToDelete(1), + assertState(state{ + graphNodes: []*node{ + makeNode(pod2ns1, withOwners(deployment1apps)), + makeNode(deployment1extensions, virtual), + makeNode(pod1ns1, withOwners(deployment1extensions))}, + absentOwnerCache: []objectReference{deployment1apps}, + pendingAttemptToDelete: []*node{ + makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion + makeNode(deployment1extensions, virtual), // virtual parent with inaccessible apiVersion - requeued to end + }, + }), + + // 18,19: observe delete of pod2 from step 14 + // final state: virtual parent for inaccessible apiVersion and child of that parent remain in graph, queued for delete attempts with backoff + processEvent(makeDeleteEvent(pod2ns1)), + assertState(state{ + graphNodes: []*node{ + makeNode(deployment1extensions, virtual), + makeNode(pod1ns1, withOwners(deployment1extensions))}, + absentOwnerCache: []objectReference{deployment1apps}, + pendingAttemptToDelete: []*node{ + makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion + makeNode(deployment1extensions, virtual), // virtual parent with inaccessible apiVersion + }, + }), + }, + }, + // child pointing at incorrect apiVersion/kind of still-existing parent object (e.g. core/v1 Secret with uid=123, where an apps/v1 Deployment with uid=123 exists) + // * should be deleted immediately + // * should not trigger deletion of legitimate children of parent + { + name: "bad child -> existing owner with incorrect API version (bad child, good child, bad parent delete, good parent)", + steps: []step{ + // setup + createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(badChildPod, badSecretReferenceWithDeploymentUID)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(goodChildPod, deployment1apps)), + // 3,4: observe bad child + processEvent(makeAddEvent(badChildPod, badSecretReferenceWithDeploymentUID)), + assertState(state{ + graphNodes: []*node{ + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, + pendingAttemptToDelete: []*node{ + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, // virtual parent enqueued for delete attempt + }), + + // 5,6: observe good child + processEvent(makeAddEvent(goodChildPod, deployment1apps)), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), // good child added + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, + pendingAttemptToDelete: []*node{ + makeNode(badSecretReferenceWithDeploymentUID, virtual), // virtual parent enqueued for delete attempt + makeNode(goodChildPod, withOwners(deployment1apps)), // good child enqueued for delete attempt + }, + }), + + // 7,8: process pending delete of virtual parent + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=secrets ns=ns1 name=secretname", // lookup of bad parent reference + }, + pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, + pendingAttemptToDelete: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), // good child enqueued for delete attempt + }, + }), + + // 9,10: process pending delete of good child, gets 200, remains + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=goodpod", // lookup of child pre-delete + "get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent reference, returns 200 + }, + pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, + }), + + // 11,12: process virtual delete event of bad parent reference + processPendingGraphChanges(1), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(deployment1apps, virtual)}, // parent node switched to alternate identity, still virtual + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, // remember absence of bad parent coordinates + pendingAttemptToDelete: []*node{ + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // child of bad parent coordinates enqueued for delete attempt + makeNode(deployment1apps, virtual), // new alternate virtual parent identity queued for delete attempt + }, + }), + + // 13,14: process pending delete of bad child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=badpod", // lookup of child pre-delete + "delete /v1, Resource=pods ns=ns1 name=badpod", // delete of bad child (absence of bad parent is cached) + }, + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(deployment1apps, virtual)}, // parent node switched to alternate identity, still virtual + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps, virtual), // new alternate virtual parent identity queued for delete attempt + }, + }), + + // 15,16: process pending delete of new virtual parent + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of virtual parent, returns 200 + }, + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(deployment1apps, virtual)}, // parent node switched to alternate identity, still virtual + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps, virtual), // requeued, not yet observed + }, + }), + + // 17,18: observe good parent + processEvent(makeAddEvent(deployment1apps)), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(deployment1apps)}, // parent node made non-virtual + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps), // still queued, no longer virtual + }, + }), + + // 19,20: observe delete of bad child from step 13 + processEvent(makeDeleteEvent(badChildPod, badSecretReferenceWithDeploymentUID)), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + // bad child node removed + makeNode(deployment1apps)}, + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps), // still queued, no longer virtual + }, + }), + + // 21,22: process pending delete of good parent + // final state: good parent in graph with correct coordinates, good children remain, no pending deletions + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent, returns 200 + }, + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(deployment1apps)}, + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + }), + }, + }, + { + name: "bad child -> existing owner with incorrect API version (bad child, good child, good parent, bad parent delete)", + steps: []step{ + // setup + createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(badChildPod, badSecretReferenceWithDeploymentUID)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(goodChildPod, deployment1apps)), + // 3,4: observe bad child + processEvent(makeAddEvent(badChildPod, badSecretReferenceWithDeploymentUID)), + assertState(state{ + graphNodes: []*node{ + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, + pendingAttemptToDelete: []*node{ + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, // virtual parent enqueued for delete attempt + }), + + // 5,6: observe good child + processEvent(makeAddEvent(goodChildPod, deployment1apps)), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), // good child added + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, + pendingAttemptToDelete: []*node{ + makeNode(badSecretReferenceWithDeploymentUID, virtual), // virtual parent enqueued for delete attempt + makeNode(goodChildPod, withOwners(deployment1apps)), // good child enqueued for delete attempt + }, + }), + + // 7,8: process pending delete of virtual parent + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=secrets ns=ns1 name=secretname", // lookup of bad parent reference + }, + pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, + pendingAttemptToDelete: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), // good child enqueued for delete attempt + }, + }), + + // 9,10: process pending delete of good child, gets 200, remains + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=goodpod", // lookup of child pre-delete + "get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent reference, returns 200 + }, + pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, + }), + + // 11,12: good parent add event + insertEvent(makeAddEvent(deployment1apps)), + assertState(state{ + pendingGraphChanges: []*event{ + makeAddEvent(deployment1apps), // good parent observation sneaked in + makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(badSecretReferenceWithDeploymentUID, virtual)}, + }), + + // 13,14: process good parent add + processPendingGraphChanges(1), + assertState(state{ + pendingGraphChanges: []*event{ + makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent still queued virtual delete event + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(deployment1apps)}, // parent node gets fixed, no longer virtual + pendingAttemptToDelete: []*node{ + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))}, // child of bad parent coordinates enqueued for delete attempt + }), + + // 15,16: process virtual delete event of bad parent reference + processPendingGraphChanges(1), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(deployment1apps)}, + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, // remember absence of bad parent coordinates + pendingAttemptToDelete: []*node{ + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // child of bad parent coordinates enqueued for delete attempt + }, + }), + + // 17,18: process pending delete of bad child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=badpod", // lookup of child pre-delete + "delete /v1, Resource=pods ns=ns1 name=badpod", // delete of bad child (absence of bad parent is cached) + }, + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), + makeNode(deployment1apps)}, + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + }), + + // 19,20: observe delete of bad child from step 17 + // final state: good parent in graph with correct coordinates, good children remain, no pending deletions + processEvent(makeDeleteEvent(badChildPod, badSecretReferenceWithDeploymentUID)), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + // bad child node removed + makeNode(deployment1apps)}, + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + }), + }, + }, + { + name: "bad child -> existing owner with incorrect API version (good child, bad child, good parent)", + steps: []step{ + // setup + createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(badChildPod, badSecretReferenceWithDeploymentUID)), + createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(goodChildPod, deployment1apps)), + // 3,4: observe good child + processEvent(makeAddEvent(goodChildPod, deployment1apps)), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), // good child added + makeNode(deployment1apps, virtual)}, // virtual parent added + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps, virtual), // virtual parent enqueued for delete attempt + }, + }), + + // 5,6: observe bad child + processEvent(makeAddEvent(badChildPod, badSecretReferenceWithDeploymentUID)), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(deployment1apps, virtual), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))}, // bad child added + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps, virtual), // virtual parent enqueued for delete attempt + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // bad child enqueued for delete attempt + }, + }), + + // 7,8: process pending delete of virtual parent + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent reference, returns 200 + }, + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(deployment1apps, virtual), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))}, + pendingAttemptToDelete: []*node{ + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // bad child enqueued for delete attempt + makeNode(deployment1apps, virtual), // virtual parent requeued to end, still virtual + }, + }), + + // 9,10: process pending delete of bad child + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get /v1, Resource=pods ns=ns1 name=badpod", // lookup of child pre-delete + "get /v1, Resource=secrets ns=ns1 name=secretname", // lookup of bad parent reference, returns 404 + "delete /v1, Resource=pods ns=ns1 name=badpod", // delete of bad child + }, + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(deployment1apps, virtual), + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))}, + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, // remember absence of bad parent + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps, virtual), // virtual parent requeued to end, still virtual + }, + }), + + // 11,12: observe good parent + processEvent(makeAddEvent(deployment1apps)), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(deployment1apps), // good parent no longer virtual + makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))}, + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps), // parent requeued to end, no longer virtual + }, + }), + + // 13,14: observe delete of bad child from step 9 + processEvent(makeDeleteEvent(badChildPod, badSecretReferenceWithDeploymentUID)), + assertState(state{ + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + // bad child node removed + makeNode(deployment1apps)}, + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + pendingAttemptToDelete: []*node{ + makeNode(deployment1apps), // parent requeued to end, no longer virtual + }, + }), + + // 15,16: process pending delete of good parent + // final state: good parent in graph with correct coordinates, good children remain, no pending deletions + processAttemptToDelete(1), + assertState(state{ + clientActions: []string{ + "get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent, returns 200 + }, + graphNodes: []*node{ + makeNode(goodChildPod, withOwners(deployment1apps)), + makeNode(deployment1apps)}, + absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, + }), + }, + }, + } + + alwaysStarted := make(chan struct{}) + close(alwaysStarted) + for _, scenario := range testScenarios { + t.Run(scenario.name, func(t *testing.T) { + + absentOwnerCache := NewReferenceCache(100) + + eventRecorder := record.NewFakeRecorder(100) + eventRecorder.IncludeObject = true + + metadataClient := fakemetadata.NewSimpleMetadataClient(legacyscheme.Scheme) + + tweakableRM := meta.NewDefaultRESTMapper(nil) + tweakableRM.AddSpecific( + schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"}, + schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"}, + schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "role"}, + meta.RESTScopeNamespace, + ) + tweakableRM.AddSpecific( + schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "Role"}, + schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"}, + schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "role"}, + meta.RESTScopeNamespace, + ) + tweakableRM.AddSpecific( + schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, + schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"}, + meta.RESTScopeNamespace, + ) + restMapper := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}} + + // set up our workqueues + attemptToDelete := newTrackingWorkqueue() + attemptToOrphan := newTrackingWorkqueue() + graphChanges := newTrackingWorkqueue() + + gc := &GarbageCollector{ + metadataClient: metadataClient, + restMapper: restMapper, + attemptToDelete: attemptToDelete, + attemptToOrphan: attemptToOrphan, + absentOwnerCache: absentOwnerCache, + dependencyGraphBuilder: &GraphBuilder{ + eventRecorder: eventRecorder, + metadataClient: metadataClient, + informersStarted: alwaysStarted, + graphChanges: graphChanges, + uidToNode: &concurrentUIDToNode{ + uidToNodeLock: sync.RWMutex{}, + uidToNode: make(map[types.UID]*node), + }, + attemptToDelete: attemptToDelete, + absentOwnerCache: absentOwnerCache, + }, + } + + ctx := stepContext{ + t: t, + gc: gc, + eventRecorder: eventRecorder, + metadataClient: metadataClient, + attemptToDelete: attemptToDelete, + attemptToOrphan: attemptToOrphan, + graphChanges: graphChanges, + } + for i, s := range scenario.steps { + ctx.t.Logf("%d: %s", i, s.name) + s.check(ctx) + if ctx.t.Failed() { + return + } + verifyGraphInvariants(fmt.Sprintf("after step %d", i), gc.dependencyGraphBuilder.uidToNode.uidToNode, t) + if ctx.t.Failed() { + return + } + } + }) + } +} + +func makeID(groupVersion string, kind string, namespace, name, uid string) objectReference { + return objectReference{ + OwnerReference: metav1.OwnerReference{APIVersion: groupVersion, Kind: kind, Name: name, UID: types.UID(uid)}, + Namespace: namespace, + } +} + +type nodeTweak func(*node) *node + +func virtual(n *node) *node { + n.virtual = true + return n +} +func withOwners(ownerReferences ...objectReference) nodeTweak { + return func(n *node) *node { + var owners []metav1.OwnerReference + for _, o := range ownerReferences { + owners = append(owners, o.OwnerReference) + } + n.owners = owners + return n + } +} + +func makeNode(identity objectReference, tweaks ...nodeTweak) *node { + n := &node{identity: identity} + for _, tweak := range tweaks { + n = tweak(n) + } + return n +} + +func makeAddEvent(identity objectReference, owners ...objectReference) *event { + gv, err := schema.ParseGroupVersion(identity.APIVersion) + if err != nil { + panic(err) + } + return &event{ + eventType: addEvent, + gvk: gv.WithKind(identity.Kind), + obj: makeObj(identity, owners...), + } +} + +func makeVirtualDeleteEvent(identity objectReference, owners ...objectReference) *event { + e := makeDeleteEvent(identity, owners...) + e.virtual = true + return e +} + +func makeDeleteEvent(identity objectReference, owners ...objectReference) *event { + gv, err := schema.ParseGroupVersion(identity.APIVersion) + if err != nil { + panic(err) + } + return &event{ + eventType: deleteEvent, + gvk: gv.WithKind(identity.Kind), + obj: makeObj(identity, owners...), + } +} + +func makeObj(identity objectReference, owners ...objectReference) *metaonly.MetadataOnlyObject { + obj := &metaonly.MetadataOnlyObject{ + TypeMeta: metav1.TypeMeta{APIVersion: identity.APIVersion, Kind: identity.Kind}, + ObjectMeta: metav1.ObjectMeta{Namespace: identity.Namespace, UID: identity.UID, Name: identity.Name}, + } + for _, owner := range owners { + obj.ObjectMeta.OwnerReferences = append(obj.ObjectMeta.OwnerReferences, owner.OwnerReference) + } + return obj +} + +func makeMetadataObj(identity objectReference, owners ...objectReference) *metav1.PartialObjectMetadata { + obj := &metav1.PartialObjectMetadata{ + TypeMeta: metav1.TypeMeta{APIVersion: identity.APIVersion, Kind: identity.Kind}, + ObjectMeta: metav1.ObjectMeta{Namespace: identity.Namespace, UID: identity.UID, Name: identity.Name}, + } + for _, owner := range owners { + obj.ObjectMeta.OwnerReferences = append(obj.ObjectMeta.OwnerReferences, owner.OwnerReference) + } + return obj +} + +type stepContext struct { + t *testing.T + gc *GarbageCollector + eventRecorder *record.FakeRecorder + metadataClient *fakemetadata.FakeMetadataClient + attemptToDelete *trackingWorkqueue + attemptToOrphan *trackingWorkqueue + graphChanges *trackingWorkqueue +} + +type step struct { + name string + check func(stepContext) +} + +func processPendingGraphChanges(count int) step { + return step{ + name: "processPendingGraphChanges", + check: func(ctx stepContext) { + ctx.t.Helper() + if count <= 0 { + // process all + for ctx.gc.dependencyGraphBuilder.graphChanges.Len() != 0 { + ctx.gc.dependencyGraphBuilder.processGraphChanges() + } + } else { + for i := 0; i < count; i++ { + if ctx.gc.dependencyGraphBuilder.graphChanges.Len() == 0 { + ctx.t.Errorf("expected at least %d pending changes, got %d", count, i+1) + return + } + ctx.gc.dependencyGraphBuilder.processGraphChanges() + } + } + }, + } +} + +func processAttemptToDelete(count int) step { + return step{ + name: "processAttemptToDelete", + check: func(ctx stepContext) { + ctx.t.Helper() + if count <= 0 { + // process all + for ctx.gc.dependencyGraphBuilder.attemptToDelete.Len() != 0 { + ctx.gc.attemptToDeleteWorker() + } + } else { + for i := 0; i < count; i++ { + if ctx.gc.dependencyGraphBuilder.attemptToDelete.Len() == 0 { + ctx.t.Errorf("expected at least %d pending changes, got %d", count, i+1) + return + } + ctx.gc.attemptToDeleteWorker() + } + } + }, + } +} + +func insertEvent(e *event) step { + return step{ + name: "insertEvent", + check: func(ctx stepContext) { + ctx.t.Helper() + // drain queue into items + var items []interface{} + for ctx.gc.dependencyGraphBuilder.graphChanges.Len() > 0 { + item, _ := ctx.gc.dependencyGraphBuilder.graphChanges.Get() + ctx.gc.dependencyGraphBuilder.graphChanges.Done(item) + items = append(items, item) + } + + // add the new event + ctx.gc.dependencyGraphBuilder.graphChanges.Add(e) + + // reappend the items + for _, item := range items { + ctx.gc.dependencyGraphBuilder.graphChanges.Add(item) + } + }, + } +} + +func processEvent(e *event) step { + return step{ + name: "processEvent", + check: func(ctx stepContext) { + ctx.t.Helper() + if ctx.gc.dependencyGraphBuilder.graphChanges.Len() != 0 { + ctx.t.Fatalf("events present in graphChanges, must process pending graphChanges before calling processEvent") + } + ctx.gc.dependencyGraphBuilder.graphChanges.Add(e) + ctx.gc.dependencyGraphBuilder.processGraphChanges() + }, + } +} + +func createObjectInClient(group, version, resource, namespace string, obj *metav1.PartialObjectMetadata) step { + return step{ + name: "createObjectInClient", + check: func(ctx stepContext) { + ctx.t.Helper() + if len(ctx.metadataClient.Actions()) > 0 { + ctx.t.Fatal("cannot call createObjectInClient with pending client actions, call assertClientActions to check and clear first") + } + gvr := schema.GroupVersionResource{Group: group, Version: version, Resource: resource} + var c fakemetadata.MetadataClient + if namespace == "" { + c = ctx.metadataClient.Resource(gvr).(fakemetadata.MetadataClient) + } else { + c = ctx.metadataClient.Resource(gvr).Namespace(namespace).(fakemetadata.MetadataClient) + } + if _, err := c.CreateFake(obj, metav1.CreateOptions{}); err != nil { + ctx.t.Fatal(err) + } + ctx.metadataClient.ClearActions() + }, + } +} + +func deleteObjectFromClient(group, version, resource, namespace, name string) step { + return step{ + name: "deleteObjectFromClient", + check: func(ctx stepContext) { + ctx.t.Helper() + if len(ctx.metadataClient.Actions()) > 0 { + ctx.t.Fatal("cannot call deleteObjectFromClient with pending client actions, call assertClientActions to check and clear first") + } + gvr := schema.GroupVersionResource{Group: group, Version: version, Resource: resource} + var c fakemetadata.MetadataClient + if namespace == "" { + c = ctx.metadataClient.Resource(gvr).(fakemetadata.MetadataClient) + } else { + c = ctx.metadataClient.Resource(gvr).Namespace(namespace).(fakemetadata.MetadataClient) + } + if err := c.Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { + ctx.t.Fatal(err) + } + ctx.metadataClient.ClearActions() + }, + } +} + +type state struct { + events []string + clientActions []string + graphNodes []*node + pendingGraphChanges []*event + pendingAttemptToDelete []*node + pendingAttemptToOrphan []*node + absentOwnerCache []objectReference +} + +func assertState(s state) step { + return step{ + name: "assertState", + check: func(ctx stepContext) { + ctx.t.Helper() + + { + for _, absent := range s.absentOwnerCache { + if !ctx.gc.absentOwnerCache.Has(absent) { + ctx.t.Errorf("expected absent owner %s was not in the absentOwnerCache", absent) + } + } + if len(s.absentOwnerCache) != ctx.gc.absentOwnerCache.cache.Len() { + // only way to inspect is to drain them all, but that's ok because we're failing the test anyway + ctx.gc.absentOwnerCache.cache.OnEvicted = func(key lru.Key, item interface{}) { + found := false + for _, absent := range s.absentOwnerCache { + if absent == key { + found = true + break + } + } + if !found { + ctx.t.Errorf("unexpected item in absent owner cache: %s", key) + } + } + ctx.gc.absentOwnerCache.cache.Clear() + ctx.t.Error("unexpected items in absent owner cache") + } + } + + { + var actualEvents []string + // drain sent events + loop: + for { + select { + case event := <-ctx.eventRecorder.Events: + actualEvents = append(actualEvents, event) + default: + break loop + } + } + if !reflect.DeepEqual(actualEvents, s.events) { + ctx.t.Logf("expected:\n%s", strings.Join(s.events, "\n")) + ctx.t.Logf("actual:\n%s", strings.Join(actualEvents, "\n")) + ctx.t.Fatalf("did not get expected events") + } + } + + { + var actualClientActions []string + for _, action := range ctx.metadataClient.Actions() { + s := fmt.Sprintf("%s %s", action.GetVerb(), action.GetResource()) + if action.GetNamespace() != "" { + s += " ns=" + action.GetNamespace() + } + if get, ok := action.(clientgotesting.GetAction); ok && get.GetName() != "" { + s += " name=" + get.GetName() + } + actualClientActions = append(actualClientActions, s) + } + if (len(s.clientActions) > 0 || len(actualClientActions) > 0) && !reflect.DeepEqual(s.clientActions, actualClientActions) { + ctx.t.Logf("expected:\n%s", strings.Join(s.clientActions, "\n")) + ctx.t.Logf("actual:\n%s", strings.Join(actualClientActions, "\n")) + ctx.t.Fatalf("did not get expected client actions") + } + ctx.metadataClient.ClearActions() + } + + { + if l := len(ctx.gc.dependencyGraphBuilder.uidToNode.uidToNode); l != len(s.graphNodes) { + ctx.t.Errorf("expected %d nodes, got %d", len(s.graphNodes), l) + } + for _, n := range s.graphNodes { + graphNode, ok := ctx.gc.dependencyGraphBuilder.uidToNode.Read(n.identity.UID) + if !ok { + ctx.t.Errorf("%s: no node in graph with uid=%s", n.identity.UID, n.identity.UID) + continue + } + if graphNode.identity != n.identity { + ctx.t.Errorf("%s: expected identity %v, got %v", n.identity.UID, n.identity, graphNode.identity) + } + if graphNode.virtual != n.virtual { + ctx.t.Errorf("%s: expected virtual %v, got %v", n.identity.UID, n.virtual, graphNode.virtual) + } + if (len(graphNode.owners) > 0 || len(n.owners) > 0) && !reflect.DeepEqual(graphNode.owners, n.owners) { + expectedJSON, _ := json.Marshal(n.owners) + actualJSON, _ := json.Marshal(graphNode.owners) + ctx.t.Errorf("%s: expected owners %s, got %s", n.identity.UID, expectedJSON, actualJSON) + } + } + } + + { + for i := range s.pendingGraphChanges { + e := s.pendingGraphChanges[i] + if len(ctx.graphChanges.pendingList) < i+1 { + ctx.t.Errorf("graphChanges: expected %d events, got %d", len(s.pendingGraphChanges), ctx.graphChanges.Len()) + break + } + + a := ctx.graphChanges.pendingList[i].(*event) + if !reflect.DeepEqual(e, a) { + objectDiff := "" + if !reflect.DeepEqual(e.obj, a.obj) { + objectDiff = "\nobjectDiff:\n" + cmp.Diff(e.obj, a.obj) + } + oldObjectDiff := "" + if !reflect.DeepEqual(e.oldObj, a.oldObj) { + oldObjectDiff = "\noldObjectDiff:\n" + cmp.Diff(e.oldObj, a.oldObj) + } + ctx.t.Errorf("graphChanges[%d]: expected\n%#v\ngot\n%#v%s%s", i, e, a, objectDiff, oldObjectDiff) + } + } + if ctx.graphChanges.Len() > len(s.pendingGraphChanges) { + for i, a := range ctx.graphChanges.pendingList[len(s.pendingGraphChanges):] { + ctx.t.Errorf("graphChanges[%d]: unexpected event: %v", len(s.pendingGraphChanges)+i, a) + } + } + } + + { + for i := range s.pendingAttemptToDelete { + e := s.pendingAttemptToDelete[i].identity + e_virtual := s.pendingAttemptToDelete[i].virtual + if ctx.attemptToDelete.Len() < i+1 { + ctx.t.Errorf("attemptToDelete: expected %d events, got %d", len(s.pendingAttemptToDelete), ctx.attemptToDelete.Len()) + break + } + a := ctx.attemptToDelete.pendingList[i].(*node).identity + a_virtual := ctx.attemptToDelete.pendingList[i].(*node).virtual + if !reflect.DeepEqual(e, a) { + ctx.t.Errorf("attemptToDelete[%d]: expected %v, got %v", i, e, a) + } + if e_virtual != a_virtual { + ctx.t.Errorf("attemptToDelete[%d]: expected virtual node %v, got non-virtual node %v", i, e, a) + } + } + if ctx.attemptToDelete.Len() > len(s.pendingAttemptToDelete) { + for i, a := range ctx.attemptToDelete.pendingList[len(s.pendingAttemptToDelete):] { + ctx.t.Errorf("attemptToDelete[%d]: unexpected node: %v", len(s.pendingAttemptToDelete)+i, a.(*node).identity) + } + } + } + + { + for i := range s.pendingAttemptToOrphan { + e := s.pendingAttemptToOrphan[i].identity + if ctx.attemptToOrphan.Len() < i+1 { + ctx.t.Errorf("attemptToOrphan: expected %d events, got %d", len(s.pendingAttemptToOrphan), ctx.attemptToOrphan.Len()) + break + } + a := ctx.attemptToOrphan.pendingList[i].(*node).identity + if !reflect.DeepEqual(e, a) { + ctx.t.Errorf("attemptToOrphan[%d]: expected %v, got %v", i, e, a) + } + } + if ctx.attemptToOrphan.Len() > len(s.pendingAttemptToOrphan) { + for i, a := range ctx.attemptToOrphan.pendingList[len(s.pendingAttemptToOrphan):] { + ctx.t.Errorf("attemptToOrphan[%d]: unexpected node: %v", len(s.pendingAttemptToOrphan)+i, a.(*node).identity) + } + } + } + }, + } + +} + +// trackingWorkqueue implements RateLimitingInterface, +// allows introspection of the items in the queue, +// and treats AddAfter and AddRateLimited the same as Add +// so they are always synchronous. +type trackingWorkqueue struct { + limiter workqueue.RateLimitingInterface + pendingList []interface{} + pendingMap map[interface{}]struct{} +} + +var _ = workqueue.RateLimitingInterface(&trackingWorkqueue{}) + +func newTrackingWorkqueue() *trackingWorkqueue { + return &trackingWorkqueue{ + limiter: workqueue.NewRateLimitingQueue(&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Inf, 100)}), + pendingMap: map[interface{}]struct{}{}, + } +} + +func (t *trackingWorkqueue) Add(item interface{}) { + t.queue(item) + t.limiter.Add(item) +} +func (t *trackingWorkqueue) AddAfter(item interface{}, duration time.Duration) { + t.Add(item) +} +func (t *trackingWorkqueue) AddRateLimited(item interface{}) { + t.Add(item) +} +func (t *trackingWorkqueue) Get() (interface{}, bool) { + item, shutdown := t.limiter.Get() + t.dequeue(item) + return item, shutdown +} +func (t *trackingWorkqueue) Done(item interface{}) { + t.limiter.Done(item) +} +func (t *trackingWorkqueue) Forget(item interface{}) { + t.limiter.Forget(item) +} +func (t *trackingWorkqueue) NumRequeues(item interface{}) int { + return 0 +} +func (t *trackingWorkqueue) Len() int { + if e, a := len(t.pendingList), len(t.pendingMap); e != a { + panic(fmt.Errorf("pendingList != pendingMap: %d / %d", e, a)) + } + if e, a := len(t.pendingList), t.limiter.Len(); e != a { + panic(fmt.Errorf("pendingList != limiter.Len(): %d / %d", e, a)) + } + return len(t.pendingList) +} +func (t *trackingWorkqueue) ShutDown() { + t.limiter.ShutDown() +} +func (t *trackingWorkqueue) ShuttingDown() bool { + return t.limiter.ShuttingDown() +} + +func (t *trackingWorkqueue) queue(item interface{}) { + if _, queued := t.pendingMap[item]; queued { + // fmt.Printf("already queued: %#v\n", item) + return + } + t.pendingMap[item] = struct{}{} + t.pendingList = append(t.pendingList, item) +} +func (t *trackingWorkqueue) dequeue(item interface{}) { + if _, queued := t.pendingMap[item]; !queued { + // fmt.Printf("not queued: %#v\n", item) + return + } + delete(t.pendingMap, item) + newPendingList := []interface{}{} + for _, p := range t.pendingList { + if p == item { + continue + } + newPendingList = append(newPendingList, p) + } + t.pendingList = newPendingList +} diff --git a/pkg/controller/garbagecollector/graph_builder_test.go b/pkg/controller/garbagecollector/graph_builder_test.go new file mode 100644 index 00000000000..d52d223500b --- /dev/null +++ b/pkg/controller/garbagecollector/graph_builder_test.go @@ -0,0 +1,212 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package garbagecollector + +import ( + "reflect" + "testing" +) + +func TestGetAlternateOwnerIdentity(t *testing.T) { + ns1child1 := makeID("v1", "Child", "ns1", "child1", "childuid11") + ns1child2 := makeID("v1", "Child", "ns1", "child2", "childuid12") + + ns2child1 := makeID("v1", "Child", "ns2", "child1", "childuid21") + + clusterchild1 := makeID("v1", "Child", "", "child1", "childuidc1") + + var ( + nsabsentparentns1 = makeID("v1", "NSParent", "ns1", "parentname", "parentuid") + nsabsentparentns2 = makeID("v1", "NSParent", "ns2", "parentname", "parentuid") + + nsabsentparent_version = makeID("xx", "NSParent", "ns1", "parentname", "parentuid") + nsabsentparent_kind = makeID("v1", "xxxxxxxx", "ns1", "parentname", "parentuid") + nsabsentparent_name = makeID("v1", "NSParent", "ns1", "xxxxxxxxxx", "parentuid") + + clusterabsentparent = makeID("v1", "ClusterParent", "", "parentname", "parentuid") + clusterabsentparent_version = makeID("xx", "ClusterParent", "", "parentname", "parentuid") + clusterabsentparent_kind = makeID("v1", "xxxxxxxxxxxxx", "", "parentname", "parentuid") + clusterabsentparent_name = makeID("v1", "ClusterParent", "", "xxxxxxxxxx", "parentuid") + + clusterabsentparent_ns1_version = makeID("xx", "ClusterParent", "ns1", "parentname", "parentuid") + clusterabsentparent_ns1_kind = makeID("v1", "xxxxxxxxxxxxx", "ns1", "parentname", "parentuid") + ) + + orderedNamespacedReferences := []objectReference{ + makeID("v1", "kind", "ns1", "name", "uid"), + makeID("v2", "kind", "ns1", "name", "uid"), + makeID("v3", "kind", "ns1", "name", "uid"), + makeID("v4", "kind", "ns1", "name", "uid"), + makeID("v5", "kind", "ns1", "name", "uid"), + } + orderedClusterReferences := []objectReference{ + makeID("v1", "kind", "", "name", "uid"), + makeID("v2", "kind", "", "name", "uid"), + makeID("v3", "kind", "", "name", "uid"), + makeID("v4", "kind", "", "name", "uid"), + makeID("v5", "kind", "", "name", "uid"), + } + + testcases := []struct { + name string + deps []*node + verifiedAbsent objectReference + expectedAlternate *objectReference + }{ + { + name: "namespaced alternate version", + deps: []*node{ + makeNode(ns1child1, withOwners(nsabsentparentns1)), + makeNode(ns1child2, withOwners(nsabsentparent_version)), + }, + verifiedAbsent: nsabsentparentns1, + expectedAlternate: &nsabsentparent_version, // switch to alternate version + }, + { + name: "namespaced alternate kind", + deps: []*node{ + makeNode(ns1child1, withOwners(nsabsentparentns1)), + makeNode(ns1child2, withOwners(nsabsentparent_kind)), + }, + verifiedAbsent: nsabsentparentns1, + expectedAlternate: &nsabsentparent_kind, // switch to alternate kind + }, + { + name: "namespaced alternate namespace", + deps: []*node{ + makeNode(ns1child1, withOwners(nsabsentparentns1)), + makeNode(ns2child1, withOwners(nsabsentparentns2)), + }, + verifiedAbsent: nsabsentparentns1, + expectedAlternate: &nsabsentparentns2, // switch to alternate namespace + }, + { + name: "namespaced alternate name", + deps: []*node{ + makeNode(ns1child1, withOwners(nsabsentparentns1)), + makeNode(ns1child1, withOwners(nsabsentparent_name)), + }, + verifiedAbsent: nsabsentparentns1, + expectedAlternate: &nsabsentparent_name, // switch to alternate name + }, + + { + name: "cluster alternate version", + deps: []*node{ + makeNode(ns1child1, withOwners(clusterabsentparent)), + makeNode(ns1child2, withOwners(clusterabsentparent_version)), + }, + verifiedAbsent: clusterabsentparent, + expectedAlternate: &clusterabsentparent_ns1_version, // switch to alternate version, namespaced to new dependent since we don't know the version is cluster-scoped + }, + { + name: "cluster alternate kind", + deps: []*node{ + makeNode(ns1child1, withOwners(clusterabsentparent)), + makeNode(ns1child2, withOwners(clusterabsentparent_kind)), + }, + verifiedAbsent: clusterabsentparent, + expectedAlternate: &clusterabsentparent_ns1_kind, // switch to alternate kind, namespaced to new dependent since we don't know the new kind is cluster-scoped + }, + { + name: "cluster alternate namespace", + deps: []*node{ + makeNode(ns1child1, withOwners(clusterabsentparent)), + makeNode(ns2child1, withOwners(clusterabsentparent)), + }, + verifiedAbsent: clusterabsentparent, + expectedAlternate: nil, // apiVersion/kind verified cluster-scoped, namespace delta ignored, no alternates found + }, + { + name: "cluster alternate name", + deps: []*node{ + makeNode(ns1child1, withOwners(clusterabsentparent)), + makeNode(ns1child1, withOwners(clusterabsentparent_name)), + }, + verifiedAbsent: clusterabsentparent, + expectedAlternate: &clusterabsentparent_name, // switch to alternate name, apiVersion/kind verified cluster-scoped, namespace dropped + }, + + { + name: "namespaced ref from namespaced child returns first if absent is sorted last", + deps: []*node{ + makeNode(ns1child1, withOwners(orderedNamespacedReferences...)), + }, + verifiedAbsent: orderedNamespacedReferences[len(orderedNamespacedReferences)-1], + expectedAlternate: &orderedNamespacedReferences[0], + }, + { + name: "namespaced ref from namespaced child returns next after absent", + deps: []*node{ + makeNode(ns1child1, withOwners(orderedNamespacedReferences...)), + }, + verifiedAbsent: orderedNamespacedReferences[len(orderedNamespacedReferences)-2], + expectedAlternate: &orderedNamespacedReferences[len(orderedNamespacedReferences)-1], + }, + + { + name: "cluster ref from cluster child returns first if absent is sorted last", + deps: []*node{ + makeNode(clusterchild1, withOwners(orderedClusterReferences...)), + }, + verifiedAbsent: orderedClusterReferences[len(orderedClusterReferences)-1], + expectedAlternate: &orderedClusterReferences[0], + }, + { + name: "cluster ref from cluster child returns next after absent", + deps: []*node{ + makeNode(clusterchild1, withOwners(orderedClusterReferences...)), + }, + verifiedAbsent: orderedClusterReferences[len(orderedClusterReferences)-2], + expectedAlternate: &orderedClusterReferences[len(orderedClusterReferences)-1], + }, + + { + name: "ignore unrelated", + deps: []*node{ + makeNode(ns1child1, withOwners(clusterabsentparent, makeID("v1", "Parent", "ns1", "name", "anotheruid"))), + }, + verifiedAbsent: clusterabsentparent, + expectedAlternate: nil, + }, + { + name: "ignore matches", + deps: []*node{ + makeNode(ns1child1, withOwners(clusterabsentparent, clusterabsentparent)), + }, + verifiedAbsent: clusterabsentparent, + expectedAlternate: nil, + }, + { + name: "collapse duplicates", + deps: []*node{ + makeNode(clusterchild1, withOwners(clusterabsentparent, clusterabsentparent_kind, clusterabsentparent_kind)), + }, + verifiedAbsent: clusterabsentparent, + expectedAlternate: &clusterabsentparent_kind, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + alternate := getAlternateOwnerIdentity(tc.deps, tc.verifiedAbsent) + if !reflect.DeepEqual(alternate, tc.expectedAlternate) { + t.Errorf("expected\n%#v\ngot\n%#v", tc.expectedAlternate, alternate) + } + }) + } +} diff --git a/staging/src/k8s.io/client-go/tools/record/fake.go b/staging/src/k8s.io/client-go/tools/record/fake.go index 2ff444ea887..0b3f344a977 100644 --- a/staging/src/k8s.io/client-go/tools/record/fake.go +++ b/staging/src/k8s.io/client-go/tools/record/fake.go @@ -27,17 +27,29 @@ import ( // thrown away in this case. type FakeRecorder struct { Events chan string + + IncludeObject bool +} + +func objectString(object runtime.Object, includeObject bool) string { + if !includeObject { + return "" + } + return fmt.Sprintf(" involvedObject{kind=%s,apiVersion=%s}", + object.GetObjectKind().GroupVersionKind().Kind, + object.GetObjectKind().GroupVersionKind().GroupVersion(), + ) } func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { if f.Events != nil { - f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) + f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject)) } } func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { if f.Events != nil { - f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject) } }