Merge pull request #115100 from HirazawaUi/delte-pkg-controller-unused-functions

delete unused functions in pkg/controller directory
This commit is contained in:
Kubernetes Prow Robot 2023-04-11 15:35:12 -07:00 committed by GitHub
commit 32cb4a6dc5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 0 additions and 130 deletions

View File

@ -1,92 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"hash/fnv"
"sync"
"github.com/golang/groupcache/lru"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
hashutil "k8s.io/kubernetes/pkg/util/hash"
)
type objectWithMeta interface {
metav1.Object
}
// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object.
// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels,
// they will have the same key.
func keyFunc(obj objectWithMeta) uint64 {
hash := fnv.New32a()
hashutil.DeepHashObject(hash, &equivalenceLabelObj{
namespace: obj.GetNamespace(),
labels: obj.GetLabels(),
})
return uint64(hash.Sum32())
}
type equivalenceLabelObj struct {
namespace string
labels map[string]string
}
// MatchingCache save label and selector matching relationship
type MatchingCache struct {
mutex sync.RWMutex
cache *lru.Cache
}
// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship.
func NewMatchingCache(maxCacheEntries int) *MatchingCache {
return &MatchingCache{
cache: lru.New(maxCacheEntries),
}
}
// Add will add matching information to the cache.
func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) {
key := keyFunc(labelObj)
c.mutex.Lock()
defer c.mutex.Unlock()
c.cache.Add(key, selectorObj)
}
// GetMatchingObject lookup the matching object for a given object.
// Note: the cache information may be invalid since the controller may be deleted or updated,
// we need check in the external request to ensure the cache data is not dirty.
func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) {
key := keyFunc(labelObj)
// NOTE: we use Lock() instead of RLock() here because lru's Get() method also modifies state(
// it need update the least recently usage information). So we can not call it concurrently.
c.mutex.Lock()
defer c.mutex.Unlock()
return c.cache.Get(key)
}
// Update update the cached matching information.
func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) {
c.Add(labelObj, selectorObj)
}
// InvalidateAll invalidate the whole cache.
func (c *MatchingCache) InvalidateAll() {
c.mutex.Lock()
defer c.mutex.Unlock()
c.cache = lru.New(c.cache.MaxEntries)
}

View File

@ -20,20 +20,8 @@ package replication
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewReplicationControllerCondition creates a new replication controller condition.
func NewReplicationControllerCondition(condType v1.ReplicationControllerConditionType, status v1.ConditionStatus, reason, msg string) v1.ReplicationControllerCondition {
return v1.ReplicationControllerCondition{
Type: condType,
Status: status,
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: msg,
}
}
// GetCondition returns a replication controller condition with the provided type if it exists.
func GetCondition(status v1.ReplicationControllerStatus, condType v1.ReplicationControllerConditionType) *v1.ReplicationControllerCondition {
for i := range status.Conditions {

View File

@ -103,19 +103,6 @@ type QuotaMonitor struct {
updateFilter UpdateFilter
}
// NewMonitor creates a new instance of a QuotaMonitor
func NewMonitor(informersStarted <-chan struct{}, informerFactory informerfactory.InformerFactory, ignoredResources map[schema.GroupResource]struct{}, resyncPeriod controller.ResyncPeriodFunc, replenishmentFunc ReplenishmentFunc, registry quota.Registry) *QuotaMonitor {
return &QuotaMonitor{
informersStarted: informersStarted,
informerFactory: informerFactory,
ignoredResources: ignoredResources,
resourceChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_quota_controller_resource_changes"),
resyncPeriod: resyncPeriod,
replenishmentFunc: replenishmentFunc,
registry: registry,
}
}
// monitor runs a Controller with a local stop channel.
type monitor struct {
controller cache.Controller

View File

@ -17,7 +17,6 @@ limitations under the License.
package statefulset
import (
"bytes"
"encoding/json"
"fmt"
"regexp"
@ -481,18 +480,6 @@ func newVersionedStatefulSetPod(currentSet, updateSet *apps.StatefulSet, current
return pod
}
// Match check if the given StatefulSet's template matches the template stored in the given history.
func Match(ss *apps.StatefulSet, history *apps.ControllerRevision) (bool, error) {
// Encoding the set for the patch may update its GVK metadata, which causes data races if this
// set is in an informer cache.
clone := ss.DeepCopy()
patch, err := getPatch(clone)
if err != nil {
return false, err
}
return bytes.Equal(patch, history.Data.Raw), nil
}
// getPatch returns a strategic merge patch that can be applied to restore a StatefulSet to a
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously