Merge pull request #78742 from smarterclayton/client_gc

The garbage collector and quota counter should use the metadata client and protobuf to access resources
This commit is contained in:
Kubernetes Prow Robot 2019-07-11 14:33:33 -07:00 committed by GitHub
commit 91b26341f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 114 additions and 95 deletions

View File

@ -121,10 +121,10 @@ go_library(
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/dynamic/dynamicinformer:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/metadata:go_default_library",
"//staging/src/k8s.io/client-go/metadata/metadatainformer:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",

View File

@ -31,7 +31,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
@ -43,10 +43,10 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/apiserver/pkg/util/term"
cacheddiscovery "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/metadata"
"k8s.io/client-go/metadata/metadatainformer"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/leaderelection"
@ -239,7 +239,7 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error {
}
controllerContext.InformerFactory.Start(controllerContext.Stop)
controllerContext.GenericInformerFactory.Start(controllerContext.Stop)
controllerContext.ObjectOrMetadataInformerFactory.Start(controllerContext.Stop)
close(controllerContext.InformersStarted)
select {}
@ -295,9 +295,11 @@ type ControllerContext struct {
// InformerFactory gives access to informers for the controller.
InformerFactory informers.SharedInformerFactory
// GenericInformerFactory gives access to informers for typed resources
// and dynamic resources.
GenericInformerFactory controller.InformerFactory
// ObjectOrMetadataInformerFactory gives access to informers for typed resources
// and dynamic resources by their metadata. All generic controllers currently use
// object metadata - if a future controller needs access to the full object this
// would become GenericInformerFactory and take a dynamic client.
ObjectOrMetadataInformerFactory controller.InformerFactory
// ComponentConfig provides access to init options for a given controller
ComponentConfig kubectrlmgrconfig.KubeControllerManagerConfiguration
@ -448,8 +450,8 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien
versionedClient := rootClientBuilder.ClientOrDie("shared-informers")
sharedInformers := informers.NewSharedInformerFactory(versionedClient, ResyncPeriod(s)())
dynamicClient := dynamic.NewForConfigOrDie(rootClientBuilder.ConfigOrDie("dynamic-informers"))
dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, ResyncPeriod(s)())
metadataClient := metadata.NewForConfigOrDie(rootClientBuilder.ConfigOrDie("metadata-informers"))
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, ResyncPeriod(s)())
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
@ -477,17 +479,17 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien
}
ctx := ControllerContext{
ClientBuilder: clientBuilder,
InformerFactory: sharedInformers,
GenericInformerFactory: controller.NewInformerFactory(sharedInformers, dynamicInformers),
ComponentConfig: s.ComponentConfig,
RESTMapper: restMapper,
AvailableResources: availableResources,
Cloud: cloud,
LoopMode: loopMode,
Stop: stop,
InformersStarted: make(chan struct{}),
ResyncPeriod: ResyncPeriod(s),
ClientBuilder: clientBuilder,
InformerFactory: sharedInformers,
ObjectOrMetadataInformerFactory: controller.NewInformerFactory(sharedInformers, metadataInformers),
ComponentConfig: s.ComponentConfig,
RESTMapper: restMapper,
AvailableResources: availableResources,
Cloud: cloud,
LoopMode: loopMode,
Stop: stop,
InformersStarted: make(chan struct{}),
ResyncPeriod: ResyncPeriod(s),
}
return ctx, nil
}

View File

@ -33,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
utilfeature "k8s.io/apiserver/pkg/util/feature"
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/metadata"
restclient "k8s.io/client-go/rest"
@ -334,7 +333,7 @@ func startResourceQuotaController(ctx ControllerContext) (http.Handler, bool, er
QuotaClient: resourceQuotaControllerClient.CoreV1(),
ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(),
ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaController.ResourceQuotaSyncPeriod.Duration),
InformerFactory: ctx.GenericInformerFactory,
InformerFactory: ctx.ObjectOrMetadataInformerFactory,
ReplenishmentResyncPeriod: ctx.ResyncPeriod,
DiscoveryFunc: discoveryFunc,
IgnoredResourcesFunc: quotaConfiguration.IgnoredResources,
@ -423,7 +422,7 @@ func startGarbageCollectorController(ctx ControllerContext) (http.Handler, bool,
discoveryClient := cacheddiscovery.NewMemCacheClient(gcClientset.Discovery())
config := ctx.ClientBuilder.ConfigOrDie("generic-garbage-collector")
dynamicClient, err := dynamic.NewForConfig(config)
metadataClient, err := metadata.NewForConfig(config)
if err != nil {
return nil, true, err
}
@ -435,11 +434,11 @@ func startGarbageCollectorController(ctx ControllerContext) (http.Handler, bool,
ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{}
}
garbageCollector, err := garbagecollector.NewGarbageCollector(
dynamicClient,
metadataClient,
ctx.RESTMapper,
deletableResources,
ignoredResources,
ctx.GenericInformerFactory,
ctx.ObjectOrMetadataInformerFactory,
ctx.InformersStarted,
)
if err != nil {

View File

@ -123,10 +123,10 @@ func TestController_DiscoveryError(t *testing.T) {
testClientBuilder := TestClientBuilder{clientset: testClientset}
testInformerFactory := informers.NewSharedInformerFactoryWithOptions(testClientset, time.Duration(1))
ctx := ControllerContext{
ClientBuilder: testClientBuilder,
InformerFactory: testInformerFactory,
GenericInformerFactory: testInformerFactory,
InformersStarted: make(chan struct{}),
ClientBuilder: testClientBuilder,
InformerFactory: testInformerFactory,
ObjectOrMetadataInformerFactory: testInformerFactory,
InformersStarted: make(chan struct{}),
}
for funcName, controllerInit := range controllerInitFuncMap {
_, _, err := controllerInit(ctx)

View File

@ -165,7 +165,7 @@
"k8s.io/client-go/util/retry",
"k8s.io/client-go/util/workqueue",
"k8s.io/client-go/util/testing",
"k8s.io/client-go/transport"
"k8s.io/client-go/transport"
]
},
{

View File

@ -80,11 +80,11 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/dynamic/dynamicinformer:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/metadata/metadatainformer:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",

View File

@ -25,7 +25,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
@ -34,8 +33,8 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/metadata:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
@ -69,11 +68,11 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/dynamic/dynamicinformer:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/metadata:go_default_library",
"//staging/src/k8s.io/client-go/metadata/metadatainformer:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",

View File

@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
@ -35,9 +34,10 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/metadata"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
// import known versions
_ "k8s.io/client-go/kubernetes"
)
@ -56,8 +56,8 @@ const ResourceResyncTime time.Duration = 0
// ensures that the garbage collector operates with a graph that is at least as
// up to date as the notification is sent.
type GarbageCollector struct {
restMapper resettableRESTMapper
dynamicClient dynamic.Interface
restMapper resettableRESTMapper
metadataClient metadata.Interface
// garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe.
attemptToDelete workqueue.RateLimitingInterface
// garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items.
@ -71,7 +71,7 @@ type GarbageCollector struct {
}
func NewGarbageCollector(
dynamicClient dynamic.Interface,
metadataClient metadata.Interface,
mapper resettableRESTMapper,
deletableResources map[schema.GroupVersionResource]struct{},
ignoredResources map[schema.GroupResource]struct{},
@ -82,13 +82,14 @@ func NewGarbageCollector(
attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan")
absentOwnerCache := NewUIDCache(500)
gc := &GarbageCollector{
dynamicClient: dynamicClient,
metadataClient: metadataClient,
restMapper: mapper,
attemptToDelete: attemptToDelete,
attemptToOrphan: attemptToOrphan,
absentOwnerCache: absentOwnerCache,
}
gb := &GraphBuilder{
metadataClient: metadataClient,
informersStarted: informersStarted,
restMapper: mapper,
graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
@ -323,7 +324,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
// If isDangling looks up the referenced object at the API server, it also
// returns its latest state.
func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *node) (
dangling bool, owner *unstructured.Unstructured, err error) {
dangling bool, owner *metav1.PartialObjectMetadata, err error) {
if gc.absentOwnerCache.Has(reference.UID) {
klog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
return true, nil, nil
@ -342,7 +343,7 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no
// TODO: It's only necessary to talk to the API server if the owner node
// is a "virtual" node. The local graph could lag behind the real
// status, but in practice, the difference is small.
owner, err = gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(reference.Name, metav1.GetOptions{})
owner, err = gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(reference.Name, metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
gc.absentOwnerCache.Add(reference.UID)

View File

@ -30,7 +30,7 @@ import (
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -40,11 +40,11 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/metadata"
"k8s.io/client-go/metadata/metadatainformer"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/api/legacyscheme"
@ -61,7 +61,7 @@ func TestGarbageCollectorConstruction(t *testing.T) {
config := &restclient.Config{}
tweakableRM := meta.NewDefaultRESTMapper(nil)
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
dynamicClient, err := dynamic.NewForConfig(config)
metadataClient, err := metadata.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
@ -76,13 +76,13 @@ func TestGarbageCollectorConstruction(t *testing.T) {
client := fake.NewSimpleClientset()
sharedInformers := informers.NewSharedInformerFactory(client, 0)
dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 0)
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
// No monitor will be constructed for the non-core resource, but the GC
// construction will not fail.
alwaysStarted := make(chan struct{})
close(alwaysStarted)
gc, err := NewGarbageCollector(dynamicClient, rm, twoResources, map[schema.GroupResource]struct{}{},
controller.NewInformerFactory(sharedInformers, dynamicInformers), alwaysStarted)
gc, err := NewGarbageCollector(metadataClient, rm, twoResources, map[schema.GroupResource]struct{}{},
controller.NewInformerFactory(sharedInformers, metadataInformers), alwaysStarted)
if err != nil {
t.Fatal(err)
}
@ -156,7 +156,7 @@ func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *htt
fakeResponse, ok := f.response[request.Method+request.URL.Path]
if !ok {
fakeResponse.statusCode = 200
fakeResponse.content = []byte("{\"kind\": \"List\"}")
fakeResponse.content = []byte(`{"apiVersion": "v1", "kind": "List"}`)
}
response.Header().Set("Content-Type", "application/json")
response.WriteHeader(fakeResponse.statusCode)
@ -193,7 +193,7 @@ type garbageCollector struct {
}
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
dynamicClient, err := dynamic.NewForConfig(config)
metadataClient, err := metadata.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
@ -203,7 +203,7 @@ func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
sharedInformers := informers.NewSharedInformerFactory(client, 0)
alwaysStarted := make(chan struct{})
close(alwaysStarted)
gc, err := NewGarbageCollector(dynamicClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, podResource, ignoredResources, sharedInformers, alwaysStarted)
gc, err := NewGarbageCollector(metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, podResource, ignoredResources, sharedInformers, alwaysStarted)
if err != nil {
t.Fatal(err)
}
@ -221,6 +221,7 @@ func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: "ns1",
UID: "456",
OwnerReferences: ownerReferences,
},
}
@ -811,7 +812,7 @@ func TestGarbageCollectorSync(t *testing.T) {
}
rm := &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}
dynamicClient, err := dynamic.NewForConfig(clientConfig)
metadataClient, err := metadata.NewForConfig(clientConfig)
if err != nil {
t.Fatal(err)
}
@ -822,7 +823,7 @@ func TestGarbageCollectorSync(t *testing.T) {
sharedInformers := informers.NewSharedInformerFactory(client, 0)
alwaysStarted := make(chan struct{})
close(alwaysStarted)
gc, err := NewGarbageCollector(dynamicClient, rm, podResource, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
gc, err := NewGarbageCollector(metadataClient, rm, podResource, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
if err != nil {
t.Fatal(err)
}

View File

@ -31,6 +31,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/metadata"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
@ -88,6 +89,7 @@ type GraphBuilder struct {
// it is protected by monitorLock.
running bool
metadataClient metadata.Interface
// monitors are the producer of the graphChanges queue, graphBuilder alters
// the in-memory graph according to the changes.
graphChanges workqueue.RateLimitingInterface

View File

@ -23,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
@ -57,23 +56,23 @@ func (gc *GarbageCollector) deleteObject(item objectReference, policy *metav1.De
uid := item.UID
preconditions := metav1.Preconditions{UID: &uid}
deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions, PropagationPolicy: policy}
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Delete(item.Name, &deleteOptions)
return gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Delete(item.Name, &deleteOptions)
}
func (gc *GarbageCollector) getObject(item objectReference) (*unstructured.Unstructured, error) {
func (gc *GarbageCollector) getObject(item objectReference) (*metav1.PartialObjectMetadata, error) {
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
if err != nil {
return nil, err
}
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Get(item.Name, metav1.GetOptions{})
return gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Get(item.Name, metav1.GetOptions{})
}
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte, pt types.PatchType) (*unstructured.Unstructured, error) {
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte, pt types.PatchType) (*metav1.PartialObjectMetadata, error) {
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
if err != nil {
return nil, err
}
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Patch(item.Name, pt, patch, metav1.PatchOptions{})
return gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Patch(item.Name, pt, patch, metav1.PatchOptions{})
}
func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string) error {
@ -105,10 +104,10 @@ func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string)
}
// remove the owner from dependent's OwnerReferences
patch, err := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"resourceVersion": accessor.GetResourceVersion(),
"finalizers": newFinalizers,
patch, err := json.Marshal(&objectForFinalizersPatch{
ObjectMetaForFinalizersPatch: ObjectMetaForFinalizersPatch{
ResourceVersion: accessor.GetResourceVersion(),
Finalizers: newFinalizers,
},
})
if err != nil {

View File

@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
@ -51,7 +50,7 @@ func (gc *GarbageCollector) getMetadata(apiVersion, kind, namespace, name string
m, ok := gc.dependencyGraphBuilder.monitors[apiResource]
if !ok || m == nil {
// If local cache doesn't exist for mapping.Resource, send a GET request to API server
return gc.dynamicClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
return gc.metadataClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
}
key := name
if len(namespace) != 0 {
@ -63,7 +62,7 @@ func (gc *GarbageCollector) getMetadata(apiVersion, kind, namespace, name string
}
if !exist {
// If local cache doesn't contain the object, send a GET request to API server
return gc.dynamicClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
return gc.metadataClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
}
obj, ok := raw.(runtime.Object)
if !ok {
@ -72,6 +71,15 @@ func (gc *GarbageCollector) getMetadata(apiVersion, kind, namespace, name string
return meta.Accessor(obj)
}
type objectForFinalizersPatch struct {
ObjectMetaForFinalizersPatch `json:"metadata"`
}
type ObjectMetaForFinalizersPatch struct {
ResourceVersion string `json:"resourceVersion"`
Finalizers []string `json:"finalizers"`
}
type objectForPatch struct {
ObjectMetaForPatch `json:"metadata"`
}
@ -87,7 +95,7 @@ type jsonMergePatchFunc func(*node) ([]byte, error)
// patch tries strategic merge patch on item first, and if SMP is not supported, it fallbacks to JSON merge
// patch.
func (gc *GarbageCollector) patch(item *node, smp []byte, jmp jsonMergePatchFunc) (*unstructured.Unstructured, error) {
func (gc *GarbageCollector) patch(item *node, smp []byte, jmp jsonMergePatchFunc) (*metav1.PartialObjectMetadata, error) {
smpResult, err := gc.patchObject(item.identity, smp, types.StrategicMergePatchType)
if err == nil {
return smpResult, nil

View File

@ -18,8 +18,8 @@ package controller
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
"k8s.io/client-go/metadata/metadatainformer"
)
// InformerFactory creates informers for each group version resource.
@ -29,28 +29,28 @@ type InformerFactory interface {
}
type informerFactory struct {
typedInformerFactory informers.SharedInformerFactory
dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory
typedInformerFactory informers.SharedInformerFactory
metadataInformerFactory metadatainformer.SharedInformerFactory
}
func (i *informerFactory) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) {
informer, err := i.typedInformerFactory.ForResource(resource)
if err != nil {
return i.dynamicInformerFactory.ForResource(resource), nil
return i.metadataInformerFactory.ForResource(resource), nil
}
return informer, nil
}
func (i *informerFactory) Start(stopCh <-chan struct{}) {
i.typedInformerFactory.Start(stopCh)
i.dynamicInformerFactory.Start(stopCh)
i.metadataInformerFactory.Start(stopCh)
}
// NewInformerFactory creates a new InformerFactory which works with both typed
// resources and dynamic resources
func NewInformerFactory(typedInformerFactory informers.SharedInformerFactory, dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory) InformerFactory {
// resources and metadata-only resources
func NewInformerFactory(typedInformerFactory informers.SharedInformerFactory, metadataInformerFactory metadatainformer.SharedInformerFactory) InformerFactory {
return &informerFactory{
typedInformerFactory: typedInformerFactory,
dynamicInformerFactory: dynamicInformerFactory,
typedInformerFactory: typedInformerFactory,
metadataInformerFactory: metadataInformerFactory,
}
}

View File

@ -70,9 +70,9 @@ func ConfigFor(inConfig *rest.Config) *rest.Config {
return config
}
// NewConfigOrDie creates a new metadata client for the given config and
// NewForConfigOrDie creates a new metadata client for the given config and
// panics if there is an error in the config.
func NewConfigOrDie(c *rest.Config) Interface {
func NewForConfigOrDie(c *rest.Config) Interface {
ret, err := NewForConfig(c)
if err != nil {
panic(err)

View File

@ -236,7 +236,7 @@ func TestClient(t *testing.T) {
defer s.Close()
cfg := ConfigFor(&rest.Config{Host: s.URL})
client := NewConfigOrDie(cfg).(*Client)
client := NewForConfigOrDie(cfg).(*Client)
tt.want(t, client)
})
}

View File

@ -545,7 +545,7 @@ func TestMetadataClient(t *testing.T) {
return wrapper
})
client := metadata.NewConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services"))
client := metadata.NewForConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services"))
items, err := client.Namespace(ns).List(metav1.ListOptions{})
if err != nil {
t.Fatal(err)
@ -622,7 +622,7 @@ func TestMetadataClient(t *testing.T) {
return wrapper
})
client := metadata.NewConfigOrDie(cfg).Resource(crdGVR)
client := metadata.NewForConfigOrDie(cfg).Resource(crdGVR)
items, err := client.Namespace(ns).List(metav1.ListOptions{})
if err != nil {
t.Fatal(err)
@ -688,7 +688,7 @@ func TestMetadataClient(t *testing.T) {
return wrapper
})
client := metadata.NewConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services"))
client := metadata.NewForConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services"))
w, err := client.Namespace(ns).Watch(metav1.ListOptions{ResourceVersion: svc.ResourceVersion, Watch: true})
if err != nil {
t.Fatal(err)
@ -744,7 +744,7 @@ func TestMetadataClient(t *testing.T) {
}
cfg := metadata.ConfigFor(config)
client := metadata.NewConfigOrDie(cfg).Resource(crdGVR)
client := metadata.NewForConfigOrDie(cfg).Resource(crdGVR)
patched, err := client.Namespace(ns).Patch("test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{})
if err != nil {
@ -759,7 +759,7 @@ func TestMetadataClient(t *testing.T) {
wrapper.nested = rt
return wrapper
})
client = metadata.NewConfigOrDie(cfg).Resource(crdGVR)
client = metadata.NewForConfigOrDie(cfg).Resource(crdGVR)
w, err := client.Namespace(ns).Watch(metav1.ListOptions{ResourceVersion: cr.GetResourceVersion(), Watch: true})
if err != nil {

View File

@ -29,9 +29,10 @@ go_test(
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/dynamic/dynamicinformer:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/metadata:go_default_library",
"//staging/src/k8s.io/client-go/metadata/metadatainformer:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//test/integration:go_default_library",

View File

@ -24,7 +24,7 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
@ -38,9 +38,10 @@ import (
"k8s.io/apiserver/pkg/storage/names"
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/metadata"
"k8s.io/client-go/metadata/metadatainformer"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/cache"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
@ -201,6 +202,7 @@ type testContext struct {
clientSet clientset.Interface
apiExtensionClient apiextensionsclientset.Interface
dynamicClient dynamic.Interface
metadataClient metadata.Interface
startGC func(workers int)
// syncPeriod is how often the GC started with startGC will be resynced.
syncPeriod time.Duration
@ -231,20 +233,24 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
restMapper.Reset()
deletableResources := garbagecollector.GetDeletableResources(discoveryClient)
config := *result.ClientConfig
metadataClient, err := metadata.NewForConfig(&config)
if err != nil {
t.Fatalf("failed to create metadataClient: %v", err)
}
dynamicClient, err := dynamic.NewForConfig(&config)
if err != nil {
t.Fatalf("failed to create dynamicClient: %v", err)
}
sharedInformers := informers.NewSharedInformerFactory(clientSet, 0)
dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 0)
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
alwaysStarted := make(chan struct{})
close(alwaysStarted)
gc, err := garbagecollector.NewGarbageCollector(
dynamicClient,
metadataClient,
restMapper,
deletableResources,
garbagecollector.DefaultIgnoredResources(),
controller.NewInformerFactory(sharedInformers, dynamicInformers),
controller.NewInformerFactory(sharedInformers, metadataInformers),
alwaysStarted,
)
if err != nil {
@ -278,6 +284,7 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
clientSet: clientSet,
apiExtensionClient: apiExtensionClient,
dynamicClient: dynamicClient,
metadataClient: metadataClient,
startGC: startGC,
syncPeriod: syncPeriod,
}

4
vendor/modules.txt vendored
View File

@ -1315,8 +1315,6 @@ k8s.io/client-go/discovery/cached/disk
k8s.io/client-go/discovery/cached/memory
k8s.io/client-go/discovery/fake
k8s.io/client-go/dynamic
k8s.io/client-go/dynamic/dynamicinformer
k8s.io/client-go/dynamic/dynamiclister
k8s.io/client-go/dynamic/fake
k8s.io/client-go/informers
k8s.io/client-go/informers/admissionregistration
@ -1477,6 +1475,8 @@ k8s.io/client-go/listers/storage/v1
k8s.io/client-go/listers/storage/v1alpha1
k8s.io/client-go/listers/storage/v1beta1
k8s.io/client-go/metadata
k8s.io/client-go/metadata/metadatainformer
k8s.io/client-go/metadata/metadatalister
k8s.io/client-go/pkg/apis/clientauthentication
k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
k8s.io/client-go/pkg/apis/clientauthentication/v1beta1