mirror of
https://github.com/kubernetes/client-go.git
synced 2025-09-13 05:41:35 +00:00
Replace deprecated sets.String with sets.Set for Index type
updating to include initialization in func Update store to use sets.Set updating tests to use sets.New instead of sets.NewString update store_test update index_test update controller_test file update delta_fifo file update expiration_cache_fakes file update index_test file update thread_safe_store file update events_cache file update thread_safe_store_test update expiration_cache_test small refactor of for loop unexport the Index type -> index Kubernetes-commit: c08b9ab3b5c78023e46ce03fde894b24533c68ef
This commit is contained in:
committed by
Kubernetes Publisher
parent
f8bdeaf0f3
commit
c28edcd52c
14
tools/cache/controller_test.go
vendored
14
tools/cache/controller_test.go
vendored
@@ -110,12 +110,12 @@ func Example() {
|
||||
}
|
||||
|
||||
// Let's wait for the controller to process the things we just added.
|
||||
outputSet := sets.String{}
|
||||
outputSet := sets.Set[string]{}
|
||||
for i := 0; i < len(testIDs); i++ {
|
||||
outputSet.Insert(<-deletionCounter)
|
||||
}
|
||||
|
||||
for _, key := range outputSet.List() {
|
||||
for _, key := range sets.List(outputSet) {
|
||||
fmt.Println(key)
|
||||
}
|
||||
// Output:
|
||||
@@ -168,12 +168,12 @@ func ExampleNewInformer() {
|
||||
}
|
||||
|
||||
// Let's wait for the controller to process the things we just added.
|
||||
outputSet := sets.String{}
|
||||
for i := 0; i < len(testIDs); i++ {
|
||||
outputSet := sets.Set[string]{}
|
||||
for range testIDs {
|
||||
outputSet.Insert(<-deletionCounter)
|
||||
}
|
||||
|
||||
for _, key := range outputSet.List() {
|
||||
for _, key := range sets.List(outputSet) {
|
||||
fmt.Println(key)
|
||||
}
|
||||
// Output:
|
||||
@@ -250,7 +250,7 @@ func TestHammerController(t *testing.T) {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Let's add a few objects to the source.
|
||||
currentNames := sets.String{}
|
||||
currentNames := sets.Set[string]{}
|
||||
rs := rand.NewSource(rand.Int63())
|
||||
f := randfill.New().NilChance(.5).NumElements(0, 2).RandSource(rs)
|
||||
for i := 0; i < 100; i++ {
|
||||
@@ -260,7 +260,7 @@ func TestHammerController(t *testing.T) {
|
||||
f.Fill(&name)
|
||||
isNew = true
|
||||
} else {
|
||||
l := currentNames.List()
|
||||
l := sets.List(currentNames)
|
||||
name = l[rand.Intn(len(l))]
|
||||
}
|
||||
|
||||
|
2
tools/cache/delta_fifo.go
vendored
2
tools/cache/delta_fifo.go
vendored
@@ -554,7 +554,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
keys := make(sets.String, len(list))
|
||||
keys := make(sets.Set[string], len(list))
|
||||
|
||||
// keep backwards compat for old clients
|
||||
action := Sync
|
||||
|
2
tools/cache/expiration_cache_fakes.go
vendored
2
tools/cache/expiration_cache_fakes.go
vendored
@@ -35,7 +35,7 @@ func (c *fakeThreadSafeMap) Delete(key string) {
|
||||
|
||||
// FakeExpirationPolicy keeps the list for keys which never expires.
|
||||
type FakeExpirationPolicy struct {
|
||||
NeverExpire sets.String
|
||||
NeverExpire sets.Set[string]
|
||||
RetrieveKeyFunc KeyFunc
|
||||
}
|
||||
|
||||
|
8
tools/cache/expiration_cache_test.go
vendored
8
tools/cache/expiration_cache_test.go
vendored
@@ -33,7 +33,7 @@ func TestTTLExpirationBasic(t *testing.T) {
|
||||
ttlStore := NewFakeExpirationStore(
|
||||
testStoreKeyFunc, deleteChan,
|
||||
&FakeExpirationPolicy{
|
||||
NeverExpire: sets.NewString(),
|
||||
NeverExpire: sets.New[string](),
|
||||
RetrieveKeyFunc: func(obj interface{}) (string, error) {
|
||||
return obj.(*TimestampedEntry).Obj.(testStoreObject).id, nil
|
||||
},
|
||||
@@ -66,7 +66,7 @@ func TestTTLExpirationBasic(t *testing.T) {
|
||||
func TestReAddExpiredItem(t *testing.T) {
|
||||
deleteChan := make(chan string, 1)
|
||||
exp := &FakeExpirationPolicy{
|
||||
NeverExpire: sets.NewString(),
|
||||
NeverExpire: sets.New[string](),
|
||||
RetrieveKeyFunc: func(obj interface{}) (string, error) {
|
||||
return obj.(*TimestampedEntry).Obj.(testStoreObject).id, nil
|
||||
},
|
||||
@@ -105,7 +105,7 @@ func TestReAddExpiredItem(t *testing.T) {
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Unexpected timeout waiting on delete")
|
||||
}
|
||||
exp.NeverExpire = sets.NewString(testKey)
|
||||
exp.NeverExpire = sets.New[string](testKey)
|
||||
item, exists, err = ttlStore.GetByKey(testKey)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get from store, %v", err)
|
||||
@@ -129,7 +129,7 @@ func TestTTLList(t *testing.T) {
|
||||
ttlStore := NewFakeExpirationStore(
|
||||
testStoreKeyFunc, deleteChan,
|
||||
&FakeExpirationPolicy{
|
||||
NeverExpire: sets.NewString(testObjs[1].id),
|
||||
NeverExpire: sets.New[string](testObjs[1].id),
|
||||
RetrieveKeyFunc: func(obj interface{}) (string, error) {
|
||||
return obj.(*TimestampedEntry).Obj.(testStoreObject).id, nil
|
||||
},
|
||||
|
4
tools/cache/index.go
vendored
4
tools/cache/index.go
vendored
@@ -91,10 +91,10 @@ func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
|
||||
}
|
||||
|
||||
// Index maps the indexed value to a set of keys in the store that match on that value
|
||||
type Index map[string]sets.String
|
||||
type index map[string]sets.Set[string]
|
||||
|
||||
// Indexers maps a name to an IndexFunc
|
||||
type Indexers map[string]IndexFunc
|
||||
|
||||
// Indices maps a name to an Index
|
||||
type Indices map[string]Index
|
||||
type Indices map[string]index
|
||||
|
16
tools/cache/index_test.go
vendored
16
tools/cache/index_test.go
vendored
@@ -71,15 +71,15 @@ func TestMultiIndexKeys(t *testing.T) {
|
||||
index.Add(pod2)
|
||||
index.Add(pod3)
|
||||
|
||||
expected := map[string]sets.String{}
|
||||
expected["ernie"] = sets.NewString("one", "tre")
|
||||
expected["bert"] = sets.NewString("one", "two")
|
||||
expected["elmo"] = sets.NewString("tre")
|
||||
expected["oscar"] = sets.NewString("two")
|
||||
expected["elmo1"] = sets.NewString()
|
||||
expected := map[string]sets.Set[string]{}
|
||||
expected["ernie"] = sets.New("one", "tre")
|
||||
expected["bert"] = sets.New("one", "two")
|
||||
expected["elmo"] = sets.New("tre")
|
||||
expected["oscar"] = sets.New("two")
|
||||
expected["elmo1"] = sets.Set[string]{}
|
||||
{
|
||||
for k, v := range expected {
|
||||
found := sets.String{}
|
||||
found := sets.Set[string]{}
|
||||
indexResults, err := index.ByIndex("byUser", k)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
@@ -88,7 +88,7 @@ func TestMultiIndexKeys(t *testing.T) {
|
||||
found.Insert(item.(*v1.Pod).Name)
|
||||
}
|
||||
if !found.Equal(v) {
|
||||
t.Errorf("missing items, index %s, expected %v but found %v", k, v.List(), found.List())
|
||||
t.Errorf("missing items, index %s, expected %v but found %v", k, sets.List(v), sets.List(found))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
10
tools/cache/shared_informer_test.go
vendored
10
tools/cache/shared_informer_test.go
vendored
@@ -48,7 +48,7 @@ import (
|
||||
type testListener struct {
|
||||
lock sync.RWMutex
|
||||
resyncPeriod time.Duration
|
||||
expectedItemNames sets.String
|
||||
expectedItemNames sets.Set[string]
|
||||
receivedItemNames []string
|
||||
name string
|
||||
}
|
||||
@@ -56,7 +56,7 @@ type testListener struct {
|
||||
func newTestListener(name string, resyncPeriod time.Duration, expected ...string) *testListener {
|
||||
l := &testListener{
|
||||
resyncPeriod: resyncPeriod,
|
||||
expectedItemNames: sets.NewString(expected...),
|
||||
expectedItemNames: sets.New(expected...),
|
||||
name: name,
|
||||
}
|
||||
return l
|
||||
@@ -105,7 +105,7 @@ func (l *testListener) satisfiedExpectations() bool {
|
||||
l.lock.RLock()
|
||||
defer l.lock.RUnlock()
|
||||
|
||||
return sets.NewString(l.receivedItemNames...).Equal(l.expectedItemNames)
|
||||
return sets.New(l.receivedItemNames...).Equal(l.expectedItemNames)
|
||||
}
|
||||
|
||||
func eventHandlerCount(i SharedInformer) int {
|
||||
@@ -439,8 +439,8 @@ func TestSharedInformerWatchDisruption(t *testing.T) {
|
||||
listener.receivedItemNames = []string{}
|
||||
}
|
||||
|
||||
listenerNoResync.expectedItemNames = sets.NewString("pod2", "pod3")
|
||||
listenerResync.expectedItemNames = sets.NewString("pod1", "pod2", "pod3")
|
||||
listenerNoResync.expectedItemNames = sets.New("pod2", "pod3")
|
||||
listenerResync.expectedItemNames = sets.New("pod1", "pod2", "pod3")
|
||||
|
||||
// This calls shouldSync, which deletes noResync from the list of syncingListeners
|
||||
clock.Step(1 * time.Second)
|
||||
|
18
tools/cache/store_test.go
vendored
18
tools/cache/store_test.go
vendored
@@ -56,7 +56,7 @@ func doTestStore(t *testing.T, store Store) {
|
||||
store.Add(mkObj("c", "d"))
|
||||
store.Add(mkObj("e", "e"))
|
||||
{
|
||||
found := sets.String{}
|
||||
found := sets.Set[string]{}
|
||||
for _, item := range store.List() {
|
||||
found.Insert(item.(testStoreObject).val)
|
||||
}
|
||||
@@ -75,7 +75,7 @@ func doTestStore(t *testing.T, store Store) {
|
||||
}, "0")
|
||||
|
||||
{
|
||||
found := sets.String{}
|
||||
found := sets.Set[string]{}
|
||||
for _, item := range store.List() {
|
||||
found.Insert(item.(testStoreObject).val)
|
||||
}
|
||||
@@ -95,17 +95,17 @@ func doTestIndex(t *testing.T, indexer Indexer) {
|
||||
}
|
||||
|
||||
// Test Index
|
||||
expected := map[string]sets.String{}
|
||||
expected["b"] = sets.NewString("a", "c")
|
||||
expected["f"] = sets.NewString("e")
|
||||
expected["h"] = sets.NewString("g")
|
||||
expected := map[string]sets.Set[string]{}
|
||||
expected["b"] = sets.New("a", "c")
|
||||
expected["f"] = sets.New("e")
|
||||
expected["h"] = sets.New("g")
|
||||
indexer.Add(mkObj("a", "b"))
|
||||
indexer.Add(mkObj("c", "b"))
|
||||
indexer.Add(mkObj("e", "f"))
|
||||
indexer.Add(mkObj("g", "h"))
|
||||
{
|
||||
for k, v := range expected {
|
||||
found := sets.String{}
|
||||
found := sets.Set[string]{}
|
||||
indexResults, err := indexer.Index("by_val", mkObj("", k))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
@@ -113,9 +113,9 @@ func doTestIndex(t *testing.T, indexer Indexer) {
|
||||
for _, item := range indexResults {
|
||||
found.Insert(item.(testStoreObject).id)
|
||||
}
|
||||
items := v.List()
|
||||
items := sets.List(v)
|
||||
if !found.HasAll(items...) {
|
||||
t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
|
||||
t.Errorf("missing items, index %s, expected %v but found %v", k, items, sets.List(found))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
34
tools/cache/thread_safe_store.go
vendored
34
tools/cache/thread_safe_store.go
vendored
@@ -70,7 +70,7 @@ func (i *storeIndex) reset() {
|
||||
i.indices = Indices{}
|
||||
}
|
||||
|
||||
func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.String, error) {
|
||||
func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.Set[string], error) {
|
||||
indexFunc := i.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
@@ -82,7 +82,7 @@ func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.S
|
||||
}
|
||||
index := i.indices[indexName]
|
||||
|
||||
var storeKeySet sets.String
|
||||
var storeKeySet sets.Set[string]
|
||||
if len(indexedValues) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
@@ -90,7 +90,7 @@ func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.S
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
storeKeySet = sets.String{}
|
||||
storeKeySet = sets.Set[string]{}
|
||||
for _, indexedValue := range indexedValues {
|
||||
for key := range index[indexedValue] {
|
||||
storeKeySet.Insert(key)
|
||||
@@ -101,7 +101,7 @@ func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.S
|
||||
return storeKeySet, nil
|
||||
}
|
||||
|
||||
func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.String, error) {
|
||||
func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.Set[string], error) {
|
||||
indexFunc := i.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
@@ -121,10 +121,10 @@ func (i *storeIndex) getIndexValues(indexName string) []string {
|
||||
}
|
||||
|
||||
func (i *storeIndex) addIndexers(newIndexers Indexers) error {
|
||||
oldKeys := sets.StringKeySet(i.indexers)
|
||||
newKeys := sets.StringKeySet(newIndexers)
|
||||
oldKeys := sets.KeySet(i.indexers)
|
||||
newKeys := sets.KeySet(newIndexers)
|
||||
|
||||
if oldKeys.HasAny(newKeys.List()...) {
|
||||
if oldKeys.HasAny(sets.List(newKeys)...) {
|
||||
return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
|
||||
}
|
||||
|
||||
@@ -167,10 +167,10 @@ func (i *storeIndex) updateSingleIndex(name string, oldObj interface{}, newObj i
|
||||
indexValues = indexValues[:0]
|
||||
}
|
||||
|
||||
index := i.indices[name]
|
||||
if index == nil {
|
||||
index = Index{}
|
||||
i.indices[name] = index
|
||||
idx := i.indices[name]
|
||||
if idx == nil {
|
||||
idx = index{}
|
||||
i.indices[name] = idx
|
||||
}
|
||||
|
||||
if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
|
||||
@@ -179,10 +179,10 @@ func (i *storeIndex) updateSingleIndex(name string, oldObj interface{}, newObj i
|
||||
}
|
||||
|
||||
for _, value := range oldIndexValues {
|
||||
i.deleteKeyFromIndex(key, value, index)
|
||||
i.deleteKeyFromIndex(key, value, idx)
|
||||
}
|
||||
for _, value := range indexValues {
|
||||
i.addKeyToIndex(key, value, index)
|
||||
i.addKeyToIndex(key, value, idx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,16 +197,16 @@ func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key s
|
||||
}
|
||||
}
|
||||
|
||||
func (i *storeIndex) addKeyToIndex(key, indexValue string, index Index) {
|
||||
func (i *storeIndex) addKeyToIndex(key, indexValue string, index index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
set = sets.Set[string]{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
|
||||
func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index Index) {
|
||||
func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
return
|
||||
@@ -336,7 +336,7 @@ func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return set.List(), nil
|
||||
return sets.List(set), nil
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
|
||||
|
3
tools/cache/thread_safe_store_test.go
vendored
3
tools/cache/thread_safe_store_test.go
vendored
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
func TestThreadSafeStoreDeleteRemovesEmptySetsFromIndex(t *testing.T) {
|
||||
@@ -114,7 +115,7 @@ func TestThreadSafeStoreIndexingFunctionsWithMultipleValues(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
compare := func(key string, expected []string) error {
|
||||
values := store.index.indices[testIndexer][key].List()
|
||||
values := sets.List(store.index.indices[testIndexer][key])
|
||||
if cmp.Equal(values, expected) {
|
||||
return nil
|
||||
}
|
||||
|
@@ -223,7 +223,7 @@ func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messag
|
||||
type aggregateRecord struct {
|
||||
// we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate
|
||||
// if the size of this set exceeds the max, we know we need to aggregate
|
||||
localKeys sets.String
|
||||
localKeys sets.Set[string]
|
||||
// The last time at which the aggregate was recorded
|
||||
lastTimestamp metav1.Time
|
||||
}
|
||||
@@ -257,7 +257,7 @@ func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string)
|
||||
maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second
|
||||
interval := now.Time.Sub(record.lastTimestamp.Time)
|
||||
if interval > maxInterval {
|
||||
record = aggregateRecord{localKeys: sets.NewString()}
|
||||
record = aggregateRecord{localKeys: sets.New[string]()}
|
||||
}
|
||||
|
||||
// Write the new event into the aggregation record and put it on the cache
|
||||
|
Reference in New Issue
Block a user