Merge pull request #95113 from Git-Jiro/lint_ttlcontroller

Lint ttl_controller
This commit is contained in:
Kubernetes Prow Robot 2020-10-13 22:51:53 -07:00 committed by GitHub
commit 8647eece9c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 19 additions and 17 deletions

View File

@ -78,7 +78,6 @@ pkg/controller/resourcequota/config/v1alpha1
pkg/controller/serviceaccount/config/v1alpha1 pkg/controller/serviceaccount/config/v1alpha1
pkg/controller/statefulset pkg/controller/statefulset
pkg/controller/statefulset/config/v1alpha1 pkg/controller/statefulset/config/v1alpha1
pkg/controller/ttl
pkg/controller/ttlafterfinished/config/v1alpha1 pkg/controller/ttlafterfinished/config/v1alpha1
pkg/controller/volume/attachdetach pkg/controller/volume/attachdetach
pkg/controller/volume/attachdetach/config/v1alpha1 pkg/controller/volume/attachdetach/config/v1alpha1

View File

@ -52,7 +52,8 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
type TTLController struct { // Controller sets ttl annotations on nodes, based on cluster size.
type Controller struct {
kubeClient clientset.Interface kubeClient clientset.Interface
// nodeStore is a local cache of nodes. // nodeStore is a local cache of nodes.
@ -76,8 +77,9 @@ type TTLController struct {
boundaryStep int boundaryStep int
} }
func NewTTLController(nodeInformer informers.NodeInformer, kubeClient clientset.Interface) *TTLController { // NewTTLController creates a new TTLController
ttlc := &TTLController{ func NewTTLController(nodeInformer informers.NodeInformer, kubeClient clientset.Interface) *Controller {
ttlc := &Controller{
kubeClient: kubeClient, kubeClient: kubeClient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ttlcontroller"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ttlcontroller"),
} }
@ -111,7 +113,8 @@ var (
} }
) )
func (ttlc *TTLController) Run(workers int, stopCh <-chan struct{}) { // Run begins watching and syncing.
func (ttlc *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
defer ttlc.queue.ShutDown() defer ttlc.queue.ShutDown()
@ -129,7 +132,7 @@ func (ttlc *TTLController) Run(workers int, stopCh <-chan struct{}) {
<-stopCh <-stopCh
} }
func (ttlc *TTLController) addNode(obj interface{}) { func (ttlc *Controller) addNode(obj interface{}) {
node, ok := obj.(*v1.Node) node, ok := obj.(*v1.Node)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj)) utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", obj))
@ -148,7 +151,7 @@ func (ttlc *TTLController) addNode(obj interface{}) {
ttlc.enqueueNode(node) ttlc.enqueueNode(node)
} }
func (ttlc *TTLController) updateNode(_, newObj interface{}) { func (ttlc *Controller) updateNode(_, newObj interface{}) {
node, ok := newObj.(*v1.Node) node, ok := newObj.(*v1.Node)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj)) utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj))
@ -162,7 +165,7 @@ func (ttlc *TTLController) updateNode(_, newObj interface{}) {
ttlc.enqueueNode(node) ttlc.enqueueNode(node)
} }
func (ttlc *TTLController) deleteNode(obj interface{}) { func (ttlc *Controller) deleteNode(obj interface{}) {
_, ok := obj.(*v1.Node) _, ok := obj.(*v1.Node)
if !ok { if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown) tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
@ -189,7 +192,7 @@ func (ttlc *TTLController) deleteNode(obj interface{}) {
// We are not processing the node, as it no longer exists. // We are not processing the node, as it no longer exists.
} }
func (ttlc *TTLController) enqueueNode(node *v1.Node) { func (ttlc *Controller) enqueueNode(node *v1.Node) {
key, err := controller.KeyFunc(node) key, err := controller.KeyFunc(node)
if err != nil { if err != nil {
klog.Errorf("Couldn't get key for object %+v", node) klog.Errorf("Couldn't get key for object %+v", node)
@ -198,12 +201,12 @@ func (ttlc *TTLController) enqueueNode(node *v1.Node) {
ttlc.queue.Add(key) ttlc.queue.Add(key)
} }
func (ttlc *TTLController) worker() { func (ttlc *Controller) worker() {
for ttlc.processItem() { for ttlc.processItem() {
} }
} }
func (ttlc *TTLController) processItem() bool { func (ttlc *Controller) processItem() bool {
key, quit := ttlc.queue.Get() key, quit := ttlc.queue.Get()
if quit { if quit {
return false return false
@ -221,7 +224,7 @@ func (ttlc *TTLController) processItem() bool {
return true return true
} }
func (ttlc *TTLController) getDesiredTTLSeconds() int { func (ttlc *Controller) getDesiredTTLSeconds() int {
ttlc.lock.RLock() ttlc.lock.RLock()
defer ttlc.lock.RUnlock() defer ttlc.lock.RUnlock()
return ttlc.desiredTTLSeconds return ttlc.desiredTTLSeconds
@ -251,7 +254,7 @@ func setIntAnnotation(node *v1.Node, annotationKey string, value int) {
node.Annotations[annotationKey] = strconv.Itoa(value) node.Annotations[annotationKey] = strconv.Itoa(value)
} }
func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey string, value int) error { func (ttlc *Controller) patchNodeWithAnnotation(node *v1.Node, annotationKey string, value int) error {
oldData, err := json.Marshal(node) oldData, err := json.Marshal(node)
if err != nil { if err != nil {
return err return err
@ -274,7 +277,7 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey
return nil return nil
} }
func (ttlc *TTLController) updateNodeIfNeeded(key string) error { func (ttlc *Controller) updateNodeIfNeeded(key string) error {
node, err := ttlc.nodeStore.Get(key) node, err := ttlc.nodeStore.Get(key)
if err != nil { if err != nil {
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {

View File

@ -75,7 +75,7 @@ func TestPatchNode(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
ttlController := &TTLController{ ttlController := &Controller{
kubeClient: fakeClient, kubeClient: fakeClient,
} }
err := ttlController.patchNodeWithAnnotation(testCase.node, v1.ObjectTTLAnnotationKey, testCase.ttlSeconds) err := ttlController.patchNodeWithAnnotation(testCase.node, v1.ObjectTTLAnnotationKey, testCase.ttlSeconds)
@ -132,7 +132,7 @@ func TestUpdateNodeIfNeeded(t *testing.T) {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
nodeStore := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) nodeStore := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeStore.Add(testCase.node) nodeStore.Add(testCase.node)
ttlController := &TTLController{ ttlController := &Controller{
kubeClient: fakeClient, kubeClient: fakeClient,
nodeStore: listers.NewNodeLister(nodeStore), nodeStore: listers.NewNodeLister(nodeStore),
desiredTTLSeconds: testCase.desiredTTL, desiredTTLSeconds: testCase.desiredTTL,
@ -213,7 +213,7 @@ func TestDesiredTTL(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
ttlController := &TTLController{ ttlController := &Controller{
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
nodeCount: testCase.nodeCount, nodeCount: testCase.nodeCount,
desiredTTLSeconds: testCase.desiredTTL, desiredTTLSeconds: testCase.desiredTTL,