Merge pull request #93946 from alexzimmer96/68026-pkg-controller-resourcequota

Refactor pkg/controllers/resourcequota to fix golint errors
This commit is contained in:
Kubernetes Prow Robot 2020-09-01 19:41:06 -07:00 committed by GitHub
commit dd6c53d035
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 47 additions and 48 deletions

View File

@ -427,7 +427,7 @@ func startResourceQuotaController(ctx ControllerContext) (http.Handler, bool, er
listerFuncForResource := generic.ListerFuncForResourceFunc(ctx.InformerFactory.ForResource)
quotaConfiguration := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource)
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
resourceQuotaControllerOptions := &resourcequotacontroller.ControllerOptions{
QuotaClient: resourceQuotaControllerClient.CoreV1(),
ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(),
ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaController.ResourceQuotaSyncPeriod.Duration),
@ -444,7 +444,7 @@ func startResourceQuotaController(ctx ControllerContext) (http.Handler, bool, er
}
}
resourceQuotaController, err := resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions)
resourceQuotaController, err := resourcequotacontroller.NewController(resourceQuotaControllerOptions)
if err != nil {
return nil, false, err
}

View File

@ -76,7 +76,6 @@ pkg/controller/replicaset
pkg/controller/replicaset/config/v1alpha1
pkg/controller/replication
pkg/controller/replication/config/v1alpha1
pkg/controller/resourcequota
pkg/controller/resourcequota/config/v1alpha1
pkg/controller/service/config/v1alpha1
pkg/controller/serviceaccount/config/v1alpha1

View File

@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// resourcequota contains a controller that makes resource quota usage observations
// Package resourcequota contains a controller that makes resource quota usage observations
package resourcequota // import "k8s.io/kubernetes/pkg/controller/resourcequota"

View File

@ -42,7 +42,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
quota "k8s.io/kubernetes/pkg/quota/v1"
"k8s.io/kubernetes/pkg/quota/v1"
)
// NamespacedResourcesFunc knows how to discover namespaced resources.
@ -52,8 +52,8 @@ type NamespacedResourcesFunc func() ([]*metav1.APIResourceList, error)
// that may require quota to be recalculated.
type ReplenishmentFunc func(groupResource schema.GroupResource, namespace string)
// ResourceQuotaControllerOptions holds options for creating a quota controller
type ResourceQuotaControllerOptions struct {
// ControllerOptions holds options for creating a quota controller
type ControllerOptions struct {
// Must have authority to list all quotas, and update quota status
QuotaClient corev1client.ResourceQuotasGetter
// Shared informer for resource quotas
@ -74,8 +74,8 @@ type ResourceQuotaControllerOptions struct {
ReplenishmentResyncPeriod controller.ResyncPeriodFunc
}
// ResourceQuotaController is responsible for tracking quota usage status in the system
type ResourceQuotaController struct {
// Controller is responsible for tracking quota usage status in the system
type Controller struct {
// Must have authority to list all resources in the system, and update quota status
rqClient corev1client.ResourceQuotasGetter
// A lister/getter of resource quota objects
@ -100,10 +100,10 @@ type ResourceQuotaController struct {
workerLock sync.RWMutex
}
// NewResourceQuotaController creates a quota controller with specified options
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*ResourceQuotaController, error) {
// NewController creates a quota controller with specified options
func NewController(options *ControllerOptions) (*Controller, error) {
// build the resource quota controller
rq := &ResourceQuotaController{
rq := &Controller{
rqClient: options.QuotaClient,
rqLister: options.ResourceQuotaInformer.Lister(),
informerSyncedFuncs: []cache.InformerSynced{options.ResourceQuotaInformer.Informer().HasSynced},
@ -175,7 +175,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*Resou
}
// enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics
func (rq *ResourceQuotaController) enqueueAll() {
func (rq *Controller) enqueueAll() {
defer klog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
rqs, err := rq.rqLister.List(labels.Everything())
if err != nil {
@ -185,7 +185,7 @@ func (rq *ResourceQuotaController) enqueueAll() {
for i := range rqs {
key, err := controller.KeyFunc(rqs[i])
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", rqs[i], err))
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", rqs[i], err))
continue
}
rq.queue.Add(key)
@ -193,7 +193,7 @@ func (rq *ResourceQuotaController) enqueueAll() {
}
// obj could be an *v1.ResourceQuota, or a DeletionFinalStateUnknown marker item.
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
func (rq *Controller) enqueueResourceQuota(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
@ -202,7 +202,7 @@ func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
rq.queue.Add(key)
}
func (rq *ResourceQuotaController) addQuota(obj interface{}) {
func (rq *Controller) addQuota(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
@ -220,7 +220,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
// if we declared a constraint that has no usage (which this controller can calculate, prioritize it)
for constraint := range resourceQuota.Status.Hard {
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
matchedResources := []v1.ResourceName{v1.ResourceName(constraint)}
matchedResources := []v1.ResourceName{constraint}
for _, evaluator := range rq.registry.List() {
if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 {
rq.missingUsageQueue.Add(key)
@ -235,7 +235,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface) func() {
func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func() {
workFunc := func() bool {
key, quit := queue.Get()
if quit {
@ -265,7 +265,7 @@ func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface)
}
// Run begins quota controller using the specified number of workers
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
func (rq *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer rq.queue.ShutDown()
@ -291,7 +291,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
}
// syncResourceQuotaFromKey syncs a quota key
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
func (rq *Controller) syncResourceQuotaFromKey(key string) (err error) {
startTime := time.Now()
defer func() {
klog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
@ -301,7 +301,7 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
if err != nil {
return err
}
quota, err := rq.rqLister.ResourceQuotas(namespace).Get(name)
resourceQuota, err := rq.rqLister.ResourceQuotas(namespace).Get(name)
if errors.IsNotFound(err) {
klog.Infof("Resource quota has been deleted %v", key)
return nil
@ -310,11 +310,11 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
klog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
return err
}
return rq.syncResourceQuota(quota)
return rq.syncResourceQuota(resourceQuota)
}
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) {
func (rq *Controller) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) {
// quota is dirty if any part of spec hard limits differs from the status hard limits
statusLimitsDirty := !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
@ -329,12 +329,12 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
}
hardLimits := quota.Add(v1.ResourceList{}, resourceQuota.Spec.Hard)
errors := []error{}
var errs []error
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry, resourceQuota.Spec.ScopeSelector)
if err != nil {
// if err is non-nil, remember it to return, but continue updating status with any resources in newUsage
errors = append(errors, err)
errs = append(errs, err)
}
for key, value := range newUsage {
used[key] = value
@ -358,14 +358,14 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
if dirty {
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage, metav1.UpdateOptions{})
if err != nil {
errors = append(errors, err)
errs = append(errs, err)
}
}
return utilerrors.NewAggregate(errors)
return utilerrors.NewAggregate(errs)
}
// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated
func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupResource, namespace string) {
func (rq *Controller) replenishQuota(groupResource schema.GroupResource, namespace string) {
// check if the quota controller can evaluate this groupResource, if not, ignore it altogether...
evaluator := rq.registry.Get(groupResource)
if evaluator == nil {
@ -398,7 +398,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupReso
}
// Sync periodically resyncs the controller when new resources are observed from discovery.
func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, period time.Duration, stopCh <-chan struct{}) {
func (rq *Controller) Sync(discoveryFunc NamespacedResourcesFunc, period time.Duration, stopCh <-chan struct{}) {
// Something has changed, so track the new state and perform a sync.
oldResources := make(map[schema.GroupVersionResource]struct{})
wait.Until(func() {
@ -486,7 +486,7 @@ func waitForStopOrTimeout(stopCh <-chan struct{}, timeout time.Duration) <-chan
// resyncMonitors starts or stops quota monitors as needed to ensure that all
// (and only) those resources present in the map are monitored.
func (rq *ResourceQuotaController) resyncMonitors(resources map[schema.GroupVersionResource]struct{}) error {
func (rq *Controller) resyncMonitors(resources map[schema.GroupVersionResource]struct{}) error {
if rq.quotaMonitor == nil {
return nil
}
@ -510,7 +510,7 @@ func GetQuotableResources(discoveryFunc NamespacedResourcesFunc) (map[schema.Gro
quotableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"create", "list", "watch", "delete"}}, possibleResources)
quotableGroupVersionResources, err := discovery.GroupVersionResources(quotableResources)
if err != nil {
return nil, fmt.Errorf("Failed to parse resources: %v", err)
return nil, fmt.Errorf("failed to parse resources: %v", err)
}
// return the original discovery error (if any) in addition to the list
return quotableGroupVersionResources, discoveryErr

View File

@ -39,7 +39,7 @@ import (
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/controller"
quota "k8s.io/kubernetes/pkg/quota/v1"
"k8s.io/kubernetes/pkg/quota/v1"
"k8s.io/kubernetes/pkg/quota/v1/generic"
"k8s.io/kubernetes/pkg/quota/v1/install"
)
@ -102,7 +102,7 @@ func (errorLister) ByNamespace(namespace string) cache.GenericNamespaceLister {
}
type quotaController struct {
*ResourceQuotaController
*Controller
stop chan struct{}
}
@ -111,7 +111,7 @@ func setupQuotaController(t *testing.T, kubeClient kubernetes.Interface, lister
quotaConfiguration := install.NewQuotaConfigurationForControllers(lister)
alwaysStarted := make(chan struct{})
close(alwaysStarted)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
resourceQuotaControllerOptions := &ControllerOptions{
QuotaClient: kubeClient.CoreV1(),
ResourceQuotaInformer: informerFactory.Core().V1().ResourceQuotas(),
ResyncPeriod: controller.NoResyncPeriodFunc,
@ -122,7 +122,7 @@ func setupQuotaController(t *testing.T, kubeClient kubernetes.Interface, lister
InformersStarted: alwaysStarted,
InformerFactory: informerFactory,
}
qc, err := NewResourceQuotaController(resourceQuotaControllerOptions)
qc, err := NewController(resourceQuotaControllerOptions)
if err != nil {
t.Fatal(err)
}
@ -1156,15 +1156,15 @@ type fakeServerResources struct {
InterfaceUsedCount int
}
func (_ *fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
func (*fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
return nil, nil
}
func (_ *fakeServerResources) ServerResources() ([]*metav1.APIResourceList, error) {
func (*fakeServerResources) ServerResources() ([]*metav1.APIResourceList, error) {
return nil, nil
}
func (_ *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
func (*fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
return nil, nil
}

View File

@ -33,7 +33,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
quota "k8s.io/kubernetes/pkg/quota/v1"
"k8s.io/kubernetes/pkg/quota/v1"
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
"k8s.io/kubernetes/pkg/quota/v1/generic"
)
@ -66,6 +66,7 @@ type event struct {
gvr schema.GroupVersionResource
}
// QuotaMonitor contains all necessary information to track quotas and trigger replenishments
type QuotaMonitor struct {
// each monitor list/watches a resource and determines if we should replenish quota
monitors monitors
@ -101,7 +102,8 @@ type QuotaMonitor struct {
registry quota.Registry
}
func NewQuotaMonitor(informersStarted <-chan struct{}, informerFactory controller.InformerFactory, ignoredResources map[schema.GroupResource]struct{}, resyncPeriod controller.ResyncPeriodFunc, replenishmentFunc ReplenishmentFunc, registry quota.Registry) *QuotaMonitor {
// NewMonitor creates a new instance of a QuotaMonitor
func NewMonitor(informersStarted <-chan struct{}, informerFactory controller.InformerFactory, ignoredResources map[schema.GroupResource]struct{}, resyncPeriod controller.ResyncPeriodFunc, replenishmentFunc ReplenishmentFunc, registry quota.Registry) *QuotaMonitor {
return &QuotaMonitor{
informersStarted: informersStarted,
informerFactory: informerFactory,
@ -131,8 +133,6 @@ func (m *monitor) Run() {
type monitors map[schema.GroupVersionResource]*monitor
func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cache.Controller, error) {
// TODO: pass this down
clock := clock.RealClock{}
handlers := cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
// TODO: leaky abstraction! live w/ it for now, but should pass down an update filter func.
@ -142,7 +142,7 @@ func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cac
case schema.GroupResource{Resource: "pods"}:
oldPod := oldObj.(*v1.Pod)
newPod := newObj.(*v1.Pod)
notifyUpdate = core.QuotaV1Pod(oldPod, clock) && !core.QuotaV1Pod(newPod, clock)
notifyUpdate = core.QuotaV1Pod(oldPod, clock.RealClock{}) && !core.QuotaV1Pod(newPod, clock.RealClock{})
case schema.GroupResource{Resource: "services"}:
oldService := oldObj.(*v1.Service)
newService := newObj.(*v1.Service)
@ -199,7 +199,7 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s
toRemove = monitors{}
}
current := monitors{}
errs := []error{}
var errs []error
kept := 0
added := 0
for resource := range resources {

View File

@ -102,7 +102,7 @@ func TestQuota(t *testing.T) {
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource)
informersStarted := make(chan struct{})
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
resourceQuotaControllerOptions := &resourcequotacontroller.ControllerOptions{
QuotaClient: clientset.CoreV1(),
ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(),
ResyncPeriod: controller.NoResyncPeriodFunc,
@ -113,7 +113,7 @@ func TestQuota(t *testing.T) {
InformersStarted: informersStarted,
Registry: generic.NewRegistry(qc.Evaluators()),
}
resourceQuotaController, err := resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions)
resourceQuotaController, err := resourcequotacontroller.NewController(resourceQuotaControllerOptions)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
@ -300,7 +300,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource)
informersStarted := make(chan struct{})
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
resourceQuotaControllerOptions := &resourcequotacontroller.ControllerOptions{
QuotaClient: clientset.CoreV1(),
ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(),
ResyncPeriod: controller.NoResyncPeriodFunc,
@ -311,7 +311,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
InformersStarted: informersStarted,
Registry: generic.NewRegistry(qc.Evaluators()),
}
resourceQuotaController, err := resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions)
resourceQuotaController, err := resourcequotacontroller.NewController(resourceQuotaControllerOptions)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}