mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #78742 from smarterclayton/client_gc
The garbage collector and quota counter should use the metadata client and protobuf to access resources
This commit is contained in:
commit
91b26341f4
@ -121,10 +121,10 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/dynamic/dynamicinformer:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/metadata:go_default_library",
|
"//staging/src/k8s.io/client-go/metadata:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/metadata/metadatainformer:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -43,10 +43,10 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/apiserver/pkg/util/term"
|
"k8s.io/apiserver/pkg/util/term"
|
||||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||||
"k8s.io/client-go/dynamic"
|
|
||||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/metadata"
|
||||||
|
"k8s.io/client-go/metadata/metadatainformer"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/restmapper"
|
"k8s.io/client-go/restmapper"
|
||||||
"k8s.io/client-go/tools/leaderelection"
|
"k8s.io/client-go/tools/leaderelection"
|
||||||
@ -239,7 +239,7 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
controllerContext.InformerFactory.Start(controllerContext.Stop)
|
controllerContext.InformerFactory.Start(controllerContext.Stop)
|
||||||
controllerContext.GenericInformerFactory.Start(controllerContext.Stop)
|
controllerContext.ObjectOrMetadataInformerFactory.Start(controllerContext.Stop)
|
||||||
close(controllerContext.InformersStarted)
|
close(controllerContext.InformersStarted)
|
||||||
|
|
||||||
select {}
|
select {}
|
||||||
@ -295,9 +295,11 @@ type ControllerContext struct {
|
|||||||
// InformerFactory gives access to informers for the controller.
|
// InformerFactory gives access to informers for the controller.
|
||||||
InformerFactory informers.SharedInformerFactory
|
InformerFactory informers.SharedInformerFactory
|
||||||
|
|
||||||
// GenericInformerFactory gives access to informers for typed resources
|
// ObjectOrMetadataInformerFactory gives access to informers for typed resources
|
||||||
// and dynamic resources.
|
// and dynamic resources by their metadata. All generic controllers currently use
|
||||||
GenericInformerFactory controller.InformerFactory
|
// object metadata - if a future controller needs access to the full object this
|
||||||
|
// would become GenericInformerFactory and take a dynamic client.
|
||||||
|
ObjectOrMetadataInformerFactory controller.InformerFactory
|
||||||
|
|
||||||
// ComponentConfig provides access to init options for a given controller
|
// ComponentConfig provides access to init options for a given controller
|
||||||
ComponentConfig kubectrlmgrconfig.KubeControllerManagerConfiguration
|
ComponentConfig kubectrlmgrconfig.KubeControllerManagerConfiguration
|
||||||
@ -448,8 +450,8 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien
|
|||||||
versionedClient := rootClientBuilder.ClientOrDie("shared-informers")
|
versionedClient := rootClientBuilder.ClientOrDie("shared-informers")
|
||||||
sharedInformers := informers.NewSharedInformerFactory(versionedClient, ResyncPeriod(s)())
|
sharedInformers := informers.NewSharedInformerFactory(versionedClient, ResyncPeriod(s)())
|
||||||
|
|
||||||
dynamicClient := dynamic.NewForConfigOrDie(rootClientBuilder.ConfigOrDie("dynamic-informers"))
|
metadataClient := metadata.NewForConfigOrDie(rootClientBuilder.ConfigOrDie("metadata-informers"))
|
||||||
dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, ResyncPeriod(s)())
|
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, ResyncPeriod(s)())
|
||||||
|
|
||||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||||
// important when we start apiserver and controller manager at the same time.
|
// important when we start apiserver and controller manager at the same time.
|
||||||
@ -479,7 +481,7 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien
|
|||||||
ctx := ControllerContext{
|
ctx := ControllerContext{
|
||||||
ClientBuilder: clientBuilder,
|
ClientBuilder: clientBuilder,
|
||||||
InformerFactory: sharedInformers,
|
InformerFactory: sharedInformers,
|
||||||
GenericInformerFactory: controller.NewInformerFactory(sharedInformers, dynamicInformers),
|
ObjectOrMetadataInformerFactory: controller.NewInformerFactory(sharedInformers, metadataInformers),
|
||||||
ComponentConfig: s.ComponentConfig,
|
ComponentConfig: s.ComponentConfig,
|
||||||
RESTMapper: restMapper,
|
RESTMapper: restMapper,
|
||||||
AvailableResources: availableResources,
|
AvailableResources: availableResources,
|
||||||
|
@ -33,7 +33,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||||
"k8s.io/client-go/dynamic"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/metadata"
|
"k8s.io/client-go/metadata"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
@ -334,7 +333,7 @@ func startResourceQuotaController(ctx ControllerContext) (http.Handler, bool, er
|
|||||||
QuotaClient: resourceQuotaControllerClient.CoreV1(),
|
QuotaClient: resourceQuotaControllerClient.CoreV1(),
|
||||||
ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(),
|
ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(),
|
||||||
ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaController.ResourceQuotaSyncPeriod.Duration),
|
ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaController.ResourceQuotaSyncPeriod.Duration),
|
||||||
InformerFactory: ctx.GenericInformerFactory,
|
InformerFactory: ctx.ObjectOrMetadataInformerFactory,
|
||||||
ReplenishmentResyncPeriod: ctx.ResyncPeriod,
|
ReplenishmentResyncPeriod: ctx.ResyncPeriod,
|
||||||
DiscoveryFunc: discoveryFunc,
|
DiscoveryFunc: discoveryFunc,
|
||||||
IgnoredResourcesFunc: quotaConfiguration.IgnoredResources,
|
IgnoredResourcesFunc: quotaConfiguration.IgnoredResources,
|
||||||
@ -423,7 +422,7 @@ func startGarbageCollectorController(ctx ControllerContext) (http.Handler, bool,
|
|||||||
discoveryClient := cacheddiscovery.NewMemCacheClient(gcClientset.Discovery())
|
discoveryClient := cacheddiscovery.NewMemCacheClient(gcClientset.Discovery())
|
||||||
|
|
||||||
config := ctx.ClientBuilder.ConfigOrDie("generic-garbage-collector")
|
config := ctx.ClientBuilder.ConfigOrDie("generic-garbage-collector")
|
||||||
dynamicClient, err := dynamic.NewForConfig(config)
|
metadataClient, err := metadata.NewForConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
}
|
}
|
||||||
@ -435,11 +434,11 @@ func startGarbageCollectorController(ctx ControllerContext) (http.Handler, bool,
|
|||||||
ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{}
|
ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{}
|
||||||
}
|
}
|
||||||
garbageCollector, err := garbagecollector.NewGarbageCollector(
|
garbageCollector, err := garbagecollector.NewGarbageCollector(
|
||||||
dynamicClient,
|
metadataClient,
|
||||||
ctx.RESTMapper,
|
ctx.RESTMapper,
|
||||||
deletableResources,
|
deletableResources,
|
||||||
ignoredResources,
|
ignoredResources,
|
||||||
ctx.GenericInformerFactory,
|
ctx.ObjectOrMetadataInformerFactory,
|
||||||
ctx.InformersStarted,
|
ctx.InformersStarted,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -125,7 +125,7 @@ func TestController_DiscoveryError(t *testing.T) {
|
|||||||
ctx := ControllerContext{
|
ctx := ControllerContext{
|
||||||
ClientBuilder: testClientBuilder,
|
ClientBuilder: testClientBuilder,
|
||||||
InformerFactory: testInformerFactory,
|
InformerFactory: testInformerFactory,
|
||||||
GenericInformerFactory: testInformerFactory,
|
ObjectOrMetadataInformerFactory: testInformerFactory,
|
||||||
InformersStarted: make(chan struct{}),
|
InformersStarted: make(chan struct{}),
|
||||||
}
|
}
|
||||||
for funcName, controllerInit := range controllerInitFuncMap {
|
for funcName, controllerInit := range controllerInitFuncMap {
|
||||||
|
@ -80,11 +80,11 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/dynamic/dynamicinformer:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/metadata/metadatainformer:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||||
|
@ -25,7 +25,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
@ -34,8 +33,8 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/metadata:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||||
@ -69,11 +68,11 @@ go_test(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/dynamic/dynamicinformer:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/metadata:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/metadata/metadatainformer:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
@ -35,9 +34,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/discovery"
|
"k8s.io/client-go/discovery"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/metadata"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
|
|
||||||
// import known versions
|
// import known versions
|
||||||
_ "k8s.io/client-go/kubernetes"
|
_ "k8s.io/client-go/kubernetes"
|
||||||
)
|
)
|
||||||
@ -57,7 +57,7 @@ const ResourceResyncTime time.Duration = 0
|
|||||||
// up to date as the notification is sent.
|
// up to date as the notification is sent.
|
||||||
type GarbageCollector struct {
|
type GarbageCollector struct {
|
||||||
restMapper resettableRESTMapper
|
restMapper resettableRESTMapper
|
||||||
dynamicClient dynamic.Interface
|
metadataClient metadata.Interface
|
||||||
// garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe.
|
// garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe.
|
||||||
attemptToDelete workqueue.RateLimitingInterface
|
attemptToDelete workqueue.RateLimitingInterface
|
||||||
// garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items.
|
// garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items.
|
||||||
@ -71,7 +71,7 @@ type GarbageCollector struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewGarbageCollector(
|
func NewGarbageCollector(
|
||||||
dynamicClient dynamic.Interface,
|
metadataClient metadata.Interface,
|
||||||
mapper resettableRESTMapper,
|
mapper resettableRESTMapper,
|
||||||
deletableResources map[schema.GroupVersionResource]struct{},
|
deletableResources map[schema.GroupVersionResource]struct{},
|
||||||
ignoredResources map[schema.GroupResource]struct{},
|
ignoredResources map[schema.GroupResource]struct{},
|
||||||
@ -82,13 +82,14 @@ func NewGarbageCollector(
|
|||||||
attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan")
|
attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan")
|
||||||
absentOwnerCache := NewUIDCache(500)
|
absentOwnerCache := NewUIDCache(500)
|
||||||
gc := &GarbageCollector{
|
gc := &GarbageCollector{
|
||||||
dynamicClient: dynamicClient,
|
metadataClient: metadataClient,
|
||||||
restMapper: mapper,
|
restMapper: mapper,
|
||||||
attemptToDelete: attemptToDelete,
|
attemptToDelete: attemptToDelete,
|
||||||
attemptToOrphan: attemptToOrphan,
|
attemptToOrphan: attemptToOrphan,
|
||||||
absentOwnerCache: absentOwnerCache,
|
absentOwnerCache: absentOwnerCache,
|
||||||
}
|
}
|
||||||
gb := &GraphBuilder{
|
gb := &GraphBuilder{
|
||||||
|
metadataClient: metadataClient,
|
||||||
informersStarted: informersStarted,
|
informersStarted: informersStarted,
|
||||||
restMapper: mapper,
|
restMapper: mapper,
|
||||||
graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
|
graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
|
||||||
@ -323,7 +324,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
|
|||||||
// If isDangling looks up the referenced object at the API server, it also
|
// If isDangling looks up the referenced object at the API server, it also
|
||||||
// returns its latest state.
|
// returns its latest state.
|
||||||
func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *node) (
|
func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *node) (
|
||||||
dangling bool, owner *unstructured.Unstructured, err error) {
|
dangling bool, owner *metav1.PartialObjectMetadata, err error) {
|
||||||
if gc.absentOwnerCache.Has(reference.UID) {
|
if gc.absentOwnerCache.Has(reference.UID) {
|
||||||
klog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
klog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
@ -342,7 +343,7 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no
|
|||||||
// TODO: It's only necessary to talk to the API server if the owner node
|
// TODO: It's only necessary to talk to the API server if the owner node
|
||||||
// is a "virtual" node. The local graph could lag behind the real
|
// is a "virtual" node. The local graph could lag behind the real
|
||||||
// status, but in practice, the difference is small.
|
// status, but in practice, the difference is small.
|
||||||
owner, err = gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(reference.Name, metav1.GetOptions{})
|
owner, err = gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(reference.Name, metav1.GetOptions{})
|
||||||
switch {
|
switch {
|
||||||
case errors.IsNotFound(err):
|
case errors.IsNotFound(err):
|
||||||
gc.absentOwnerCache.Add(reference.UID)
|
gc.absentOwnerCache.Add(reference.UID)
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
|
|
||||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -40,11 +40,11 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
"k8s.io/client-go/discovery"
|
"k8s.io/client-go/discovery"
|
||||||
"k8s.io/client-go/dynamic"
|
|
||||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
"k8s.io/client-go/metadata"
|
||||||
|
"k8s.io/client-go/metadata/metadatainformer"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
@ -61,7 +61,7 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
|||||||
config := &restclient.Config{}
|
config := &restclient.Config{}
|
||||||
tweakableRM := meta.NewDefaultRESTMapper(nil)
|
tweakableRM := meta.NewDefaultRESTMapper(nil)
|
||||||
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
|
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
|
||||||
dynamicClient, err := dynamic.NewForConfig(config)
|
metadataClient, err := metadata.NewForConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -76,13 +76,13 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
|
|
||||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||||
dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 0)
|
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
|
||||||
// No monitor will be constructed for the non-core resource, but the GC
|
// No monitor will be constructed for the non-core resource, but the GC
|
||||||
// construction will not fail.
|
// construction will not fail.
|
||||||
alwaysStarted := make(chan struct{})
|
alwaysStarted := make(chan struct{})
|
||||||
close(alwaysStarted)
|
close(alwaysStarted)
|
||||||
gc, err := NewGarbageCollector(dynamicClient, rm, twoResources, map[schema.GroupResource]struct{}{},
|
gc, err := NewGarbageCollector(metadataClient, rm, twoResources, map[schema.GroupResource]struct{}{},
|
||||||
controller.NewInformerFactory(sharedInformers, dynamicInformers), alwaysStarted)
|
controller.NewInformerFactory(sharedInformers, metadataInformers), alwaysStarted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -156,7 +156,7 @@ func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *htt
|
|||||||
fakeResponse, ok := f.response[request.Method+request.URL.Path]
|
fakeResponse, ok := f.response[request.Method+request.URL.Path]
|
||||||
if !ok {
|
if !ok {
|
||||||
fakeResponse.statusCode = 200
|
fakeResponse.statusCode = 200
|
||||||
fakeResponse.content = []byte("{\"kind\": \"List\"}")
|
fakeResponse.content = []byte(`{"apiVersion": "v1", "kind": "List"}`)
|
||||||
}
|
}
|
||||||
response.Header().Set("Content-Type", "application/json")
|
response.Header().Set("Content-Type", "application/json")
|
||||||
response.WriteHeader(fakeResponse.statusCode)
|
response.WriteHeader(fakeResponse.statusCode)
|
||||||
@ -193,7 +193,7 @@ type garbageCollector struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
||||||
dynamicClient, err := dynamic.NewForConfig(config)
|
metadataClient, err := metadata.NewForConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -203,7 +203,7 @@ func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
|||||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||||
alwaysStarted := make(chan struct{})
|
alwaysStarted := make(chan struct{})
|
||||||
close(alwaysStarted)
|
close(alwaysStarted)
|
||||||
gc, err := NewGarbageCollector(dynamicClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, podResource, ignoredResources, sharedInformers, alwaysStarted)
|
gc, err := NewGarbageCollector(metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, podResource, ignoredResources, sharedInformers, alwaysStarted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -221,6 +221,7 @@ func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: podName,
|
Name: podName,
|
||||||
Namespace: "ns1",
|
Namespace: "ns1",
|
||||||
|
UID: "456",
|
||||||
OwnerReferences: ownerReferences,
|
OwnerReferences: ownerReferences,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -811,7 +812,7 @@ func TestGarbageCollectorSync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
rm := &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}
|
rm := &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}
|
||||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
metadataClient, err := metadata.NewForConfig(clientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -822,7 +823,7 @@ func TestGarbageCollectorSync(t *testing.T) {
|
|||||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||||
alwaysStarted := make(chan struct{})
|
alwaysStarted := make(chan struct{})
|
||||||
close(alwaysStarted)
|
close(alwaysStarted)
|
||||||
gc, err := NewGarbageCollector(dynamicClient, rm, podResource, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
gc, err := NewGarbageCollector(metadataClient, rm, podResource, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/metadata"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
@ -88,6 +89,7 @@ type GraphBuilder struct {
|
|||||||
// it is protected by monitorLock.
|
// it is protected by monitorLock.
|
||||||
running bool
|
running bool
|
||||||
|
|
||||||
|
metadataClient metadata.Interface
|
||||||
// monitors are the producer of the graphChanges queue, graphBuilder alters
|
// monitors are the producer of the graphChanges queue, graphBuilder alters
|
||||||
// the in-memory graph according to the changes.
|
// the in-memory graph according to the changes.
|
||||||
graphChanges workqueue.RateLimitingInterface
|
graphChanges workqueue.RateLimitingInterface
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/util/retry"
|
"k8s.io/client-go/util/retry"
|
||||||
@ -57,23 +56,23 @@ func (gc *GarbageCollector) deleteObject(item objectReference, policy *metav1.De
|
|||||||
uid := item.UID
|
uid := item.UID
|
||||||
preconditions := metav1.Preconditions{UID: &uid}
|
preconditions := metav1.Preconditions{UID: &uid}
|
||||||
deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions, PropagationPolicy: policy}
|
deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions, PropagationPolicy: policy}
|
||||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Delete(item.Name, &deleteOptions)
|
return gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Delete(item.Name, &deleteOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gc *GarbageCollector) getObject(item objectReference) (*unstructured.Unstructured, error) {
|
func (gc *GarbageCollector) getObject(item objectReference) (*metav1.PartialObjectMetadata, error) {
|
||||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Get(item.Name, metav1.GetOptions{})
|
return gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Get(item.Name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte, pt types.PatchType) (*unstructured.Unstructured, error) {
|
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte, pt types.PatchType) (*metav1.PartialObjectMetadata, error) {
|
||||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Patch(item.Name, pt, patch, metav1.PatchOptions{})
|
return gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Patch(item.Name, pt, patch, metav1.PatchOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string) error {
|
func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string) error {
|
||||||
@ -105,10 +104,10 @@ func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// remove the owner from dependent's OwnerReferences
|
// remove the owner from dependent's OwnerReferences
|
||||||
patch, err := json.Marshal(map[string]interface{}{
|
patch, err := json.Marshal(&objectForFinalizersPatch{
|
||||||
"metadata": map[string]interface{}{
|
ObjectMetaForFinalizersPatch: ObjectMetaForFinalizersPatch{
|
||||||
"resourceVersion": accessor.GetResourceVersion(),
|
ResourceVersion: accessor.GetResourceVersion(),
|
||||||
"finalizers": newFinalizers,
|
Finalizers: newFinalizers,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||||
@ -51,7 +50,7 @@ func (gc *GarbageCollector) getMetadata(apiVersion, kind, namespace, name string
|
|||||||
m, ok := gc.dependencyGraphBuilder.monitors[apiResource]
|
m, ok := gc.dependencyGraphBuilder.monitors[apiResource]
|
||||||
if !ok || m == nil {
|
if !ok || m == nil {
|
||||||
// If local cache doesn't exist for mapping.Resource, send a GET request to API server
|
// If local cache doesn't exist for mapping.Resource, send a GET request to API server
|
||||||
return gc.dynamicClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
|
return gc.metadataClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
key := name
|
key := name
|
||||||
if len(namespace) != 0 {
|
if len(namespace) != 0 {
|
||||||
@ -63,7 +62,7 @@ func (gc *GarbageCollector) getMetadata(apiVersion, kind, namespace, name string
|
|||||||
}
|
}
|
||||||
if !exist {
|
if !exist {
|
||||||
// If local cache doesn't contain the object, send a GET request to API server
|
// If local cache doesn't contain the object, send a GET request to API server
|
||||||
return gc.dynamicClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
|
return gc.metadataClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
obj, ok := raw.(runtime.Object)
|
obj, ok := raw.(runtime.Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -72,6 +71,15 @@ func (gc *GarbageCollector) getMetadata(apiVersion, kind, namespace, name string
|
|||||||
return meta.Accessor(obj)
|
return meta.Accessor(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type objectForFinalizersPatch struct {
|
||||||
|
ObjectMetaForFinalizersPatch `json:"metadata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ObjectMetaForFinalizersPatch struct {
|
||||||
|
ResourceVersion string `json:"resourceVersion"`
|
||||||
|
Finalizers []string `json:"finalizers"`
|
||||||
|
}
|
||||||
|
|
||||||
type objectForPatch struct {
|
type objectForPatch struct {
|
||||||
ObjectMetaForPatch `json:"metadata"`
|
ObjectMetaForPatch `json:"metadata"`
|
||||||
}
|
}
|
||||||
@ -87,7 +95,7 @@ type jsonMergePatchFunc func(*node) ([]byte, error)
|
|||||||
|
|
||||||
// patch tries strategic merge patch on item first, and if SMP is not supported, it fallbacks to JSON merge
|
// patch tries strategic merge patch on item first, and if SMP is not supported, it fallbacks to JSON merge
|
||||||
// patch.
|
// patch.
|
||||||
func (gc *GarbageCollector) patch(item *node, smp []byte, jmp jsonMergePatchFunc) (*unstructured.Unstructured, error) {
|
func (gc *GarbageCollector) patch(item *node, smp []byte, jmp jsonMergePatchFunc) (*metav1.PartialObjectMetadata, error) {
|
||||||
smpResult, err := gc.patchObject(item.identity, smp, types.StrategicMergePatchType)
|
smpResult, err := gc.patchObject(item.identity, smp, types.StrategicMergePatchType)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return smpResult, nil
|
return smpResult, nil
|
||||||
|
@ -18,8 +18,8 @@ package controller
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
|
"k8s.io/client-go/metadata/metadatainformer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// InformerFactory creates informers for each group version resource.
|
// InformerFactory creates informers for each group version resource.
|
||||||
@ -30,27 +30,27 @@ type InformerFactory interface {
|
|||||||
|
|
||||||
type informerFactory struct {
|
type informerFactory struct {
|
||||||
typedInformerFactory informers.SharedInformerFactory
|
typedInformerFactory informers.SharedInformerFactory
|
||||||
dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory
|
metadataInformerFactory metadatainformer.SharedInformerFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *informerFactory) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) {
|
func (i *informerFactory) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) {
|
||||||
informer, err := i.typedInformerFactory.ForResource(resource)
|
informer, err := i.typedInformerFactory.ForResource(resource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return i.dynamicInformerFactory.ForResource(resource), nil
|
return i.metadataInformerFactory.ForResource(resource), nil
|
||||||
}
|
}
|
||||||
return informer, nil
|
return informer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *informerFactory) Start(stopCh <-chan struct{}) {
|
func (i *informerFactory) Start(stopCh <-chan struct{}) {
|
||||||
i.typedInformerFactory.Start(stopCh)
|
i.typedInformerFactory.Start(stopCh)
|
||||||
i.dynamicInformerFactory.Start(stopCh)
|
i.metadataInformerFactory.Start(stopCh)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInformerFactory creates a new InformerFactory which works with both typed
|
// NewInformerFactory creates a new InformerFactory which works with both typed
|
||||||
// resources and dynamic resources
|
// resources and metadata-only resources
|
||||||
func NewInformerFactory(typedInformerFactory informers.SharedInformerFactory, dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory) InformerFactory {
|
func NewInformerFactory(typedInformerFactory informers.SharedInformerFactory, metadataInformerFactory metadatainformer.SharedInformerFactory) InformerFactory {
|
||||||
return &informerFactory{
|
return &informerFactory{
|
||||||
typedInformerFactory: typedInformerFactory,
|
typedInformerFactory: typedInformerFactory,
|
||||||
dynamicInformerFactory: dynamicInformerFactory,
|
metadataInformerFactory: metadataInformerFactory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -70,9 +70,9 @@ func ConfigFor(inConfig *rest.Config) *rest.Config {
|
|||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfigOrDie creates a new metadata client for the given config and
|
// NewForConfigOrDie creates a new metadata client for the given config and
|
||||||
// panics if there is an error in the config.
|
// panics if there is an error in the config.
|
||||||
func NewConfigOrDie(c *rest.Config) Interface {
|
func NewForConfigOrDie(c *rest.Config) Interface {
|
||||||
ret, err := NewForConfig(c)
|
ret, err := NewForConfig(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -236,7 +236,7 @@ func TestClient(t *testing.T) {
|
|||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
cfg := ConfigFor(&rest.Config{Host: s.URL})
|
cfg := ConfigFor(&rest.Config{Host: s.URL})
|
||||||
client := NewConfigOrDie(cfg).(*Client)
|
client := NewForConfigOrDie(cfg).(*Client)
|
||||||
tt.want(t, client)
|
tt.want(t, client)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -545,7 +545,7 @@ func TestMetadataClient(t *testing.T) {
|
|||||||
return wrapper
|
return wrapper
|
||||||
})
|
})
|
||||||
|
|
||||||
client := metadata.NewConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services"))
|
client := metadata.NewForConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services"))
|
||||||
items, err := client.Namespace(ns).List(metav1.ListOptions{})
|
items, err := client.Namespace(ns).List(metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -622,7 +622,7 @@ func TestMetadataClient(t *testing.T) {
|
|||||||
return wrapper
|
return wrapper
|
||||||
})
|
})
|
||||||
|
|
||||||
client := metadata.NewConfigOrDie(cfg).Resource(crdGVR)
|
client := metadata.NewForConfigOrDie(cfg).Resource(crdGVR)
|
||||||
items, err := client.Namespace(ns).List(metav1.ListOptions{})
|
items, err := client.Namespace(ns).List(metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -688,7 +688,7 @@ func TestMetadataClient(t *testing.T) {
|
|||||||
return wrapper
|
return wrapper
|
||||||
})
|
})
|
||||||
|
|
||||||
client := metadata.NewConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services"))
|
client := metadata.NewForConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services"))
|
||||||
w, err := client.Namespace(ns).Watch(metav1.ListOptions{ResourceVersion: svc.ResourceVersion, Watch: true})
|
w, err := client.Namespace(ns).Watch(metav1.ListOptions{ResourceVersion: svc.ResourceVersion, Watch: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -744,7 +744,7 @@ func TestMetadataClient(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cfg := metadata.ConfigFor(config)
|
cfg := metadata.ConfigFor(config)
|
||||||
client := metadata.NewConfigOrDie(cfg).Resource(crdGVR)
|
client := metadata.NewForConfigOrDie(cfg).Resource(crdGVR)
|
||||||
|
|
||||||
patched, err := client.Namespace(ns).Patch("test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{})
|
patched, err := client.Namespace(ns).Patch("test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -759,7 +759,7 @@ func TestMetadataClient(t *testing.T) {
|
|||||||
wrapper.nested = rt
|
wrapper.nested = rt
|
||||||
return wrapper
|
return wrapper
|
||||||
})
|
})
|
||||||
client = metadata.NewConfigOrDie(cfg).Resource(crdGVR)
|
client = metadata.NewForConfigOrDie(cfg).Resource(crdGVR)
|
||||||
|
|
||||||
w, err := client.Namespace(ns).Watch(metav1.ListOptions{ResourceVersion: cr.GetResourceVersion(), Watch: true})
|
w, err := client.Namespace(ns).Watch(metav1.ListOptions{ResourceVersion: cr.GetResourceVersion(), Watch: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -29,9 +29,10 @@ go_test(
|
|||||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/dynamic/dynamicinformer:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/metadata:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/metadata/metadatainformer:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//test/integration:go_default_library",
|
"//test/integration:go_default_library",
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||||
@ -38,9 +38,10 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/metadata"
|
||||||
|
"k8s.io/client-go/metadata/metadatainformer"
|
||||||
"k8s.io/client-go/restmapper"
|
"k8s.io/client-go/restmapper"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
@ -201,6 +202,7 @@ type testContext struct {
|
|||||||
clientSet clientset.Interface
|
clientSet clientset.Interface
|
||||||
apiExtensionClient apiextensionsclientset.Interface
|
apiExtensionClient apiextensionsclientset.Interface
|
||||||
dynamicClient dynamic.Interface
|
dynamicClient dynamic.Interface
|
||||||
|
metadataClient metadata.Interface
|
||||||
startGC func(workers int)
|
startGC func(workers int)
|
||||||
// syncPeriod is how often the GC started with startGC will be resynced.
|
// syncPeriod is how often the GC started with startGC will be resynced.
|
||||||
syncPeriod time.Duration
|
syncPeriod time.Duration
|
||||||
@ -231,20 +233,24 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
|
|||||||
restMapper.Reset()
|
restMapper.Reset()
|
||||||
deletableResources := garbagecollector.GetDeletableResources(discoveryClient)
|
deletableResources := garbagecollector.GetDeletableResources(discoveryClient)
|
||||||
config := *result.ClientConfig
|
config := *result.ClientConfig
|
||||||
|
metadataClient, err := metadata.NewForConfig(&config)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create metadataClient: %v", err)
|
||||||
|
}
|
||||||
dynamicClient, err := dynamic.NewForConfig(&config)
|
dynamicClient, err := dynamic.NewForConfig(&config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create dynamicClient: %v", err)
|
t.Fatalf("failed to create dynamicClient: %v", err)
|
||||||
}
|
}
|
||||||
sharedInformers := informers.NewSharedInformerFactory(clientSet, 0)
|
sharedInformers := informers.NewSharedInformerFactory(clientSet, 0)
|
||||||
dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 0)
|
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
|
||||||
alwaysStarted := make(chan struct{})
|
alwaysStarted := make(chan struct{})
|
||||||
close(alwaysStarted)
|
close(alwaysStarted)
|
||||||
gc, err := garbagecollector.NewGarbageCollector(
|
gc, err := garbagecollector.NewGarbageCollector(
|
||||||
dynamicClient,
|
metadataClient,
|
||||||
restMapper,
|
restMapper,
|
||||||
deletableResources,
|
deletableResources,
|
||||||
garbagecollector.DefaultIgnoredResources(),
|
garbagecollector.DefaultIgnoredResources(),
|
||||||
controller.NewInformerFactory(sharedInformers, dynamicInformers),
|
controller.NewInformerFactory(sharedInformers, metadataInformers),
|
||||||
alwaysStarted,
|
alwaysStarted,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -278,6 +284,7 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
|
|||||||
clientSet: clientSet,
|
clientSet: clientSet,
|
||||||
apiExtensionClient: apiExtensionClient,
|
apiExtensionClient: apiExtensionClient,
|
||||||
dynamicClient: dynamicClient,
|
dynamicClient: dynamicClient,
|
||||||
|
metadataClient: metadataClient,
|
||||||
startGC: startGC,
|
startGC: startGC,
|
||||||
syncPeriod: syncPeriod,
|
syncPeriod: syncPeriod,
|
||||||
}
|
}
|
||||||
|
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@ -1315,8 +1315,6 @@ k8s.io/client-go/discovery/cached/disk
|
|||||||
k8s.io/client-go/discovery/cached/memory
|
k8s.io/client-go/discovery/cached/memory
|
||||||
k8s.io/client-go/discovery/fake
|
k8s.io/client-go/discovery/fake
|
||||||
k8s.io/client-go/dynamic
|
k8s.io/client-go/dynamic
|
||||||
k8s.io/client-go/dynamic/dynamicinformer
|
|
||||||
k8s.io/client-go/dynamic/dynamiclister
|
|
||||||
k8s.io/client-go/dynamic/fake
|
k8s.io/client-go/dynamic/fake
|
||||||
k8s.io/client-go/informers
|
k8s.io/client-go/informers
|
||||||
k8s.io/client-go/informers/admissionregistration
|
k8s.io/client-go/informers/admissionregistration
|
||||||
@ -1477,6 +1475,8 @@ k8s.io/client-go/listers/storage/v1
|
|||||||
k8s.io/client-go/listers/storage/v1alpha1
|
k8s.io/client-go/listers/storage/v1alpha1
|
||||||
k8s.io/client-go/listers/storage/v1beta1
|
k8s.io/client-go/listers/storage/v1beta1
|
||||||
k8s.io/client-go/metadata
|
k8s.io/client-go/metadata
|
||||||
|
k8s.io/client-go/metadata/metadatainformer
|
||||||
|
k8s.io/client-go/metadata/metadatalister
|
||||||
k8s.io/client-go/pkg/apis/clientauthentication
|
k8s.io/client-go/pkg/apis/clientauthentication
|
||||||
k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
|
k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
|
||||||
k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
|
k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
|
||||||
|
Loading…
Reference in New Issue
Block a user