Substitute boolean maps with String sets

This commit is contained in:
Deepak Nair 2021-02-18 14:53:00 -08:00
parent 20b03f6365
commit 49a9e28dd8
6 changed files with 36 additions and 42 deletions

View File

@ -1331,11 +1331,10 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
func TestNodesWherePreemptionMightHelp(t *testing.T) {
// Prepare 4 nodes names.
nodeNames := []string{"node1", "node2", "node3", "node4"}
tests := []struct {
name string
nodesStatuses framework.NodeToStatusMap
expected map[string]bool // set of expected node names. Value is ignored.
expected sets.String // set of expected node names.
}{
{
name: "No node should be attempted",
@ -1345,7 +1344,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodelabel.ErrReasonPresenceViolated),
},
expected: map[string]bool{},
expected: sets.NewString(),
},
{
name: "ErrReasonAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
@ -1354,7 +1353,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnschedulable),
},
expected: map[string]bool{"node1": true, "node4": true},
expected: sets.NewString("node1", "node4"),
},
{
name: "pod with both pod affinity and anti-affinity should be tried",
@ -1362,7 +1361,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node1": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAffinityNotMatch),
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
},
expected: map[string]bool{"node1": true, "node3": true, "node4": true},
expected: sets.NewString("node1", "node3", "node4"),
},
{
name: "ErrReasonAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrReasonAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
@ -1370,7 +1369,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, interpodaffinity.ErrReasonAffinityRulesNotMatch),
"node2": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAffinityNotMatch),
},
expected: map[string]bool{"node2": true, "node3": true, "node4": true},
expected: sets.NewString("node2", "node3", "node4"),
},
{
name: "Mix of failed predicates works fine",
@ -1378,14 +1377,14 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
"node2": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
},
expected: map[string]bool{"node2": true, "node3": true, "node4": true},
expected: sets.NewString("node2", "node3", "node4"),
},
{
name: "Node condition errors should be considered unresolvable",
nodesStatuses: framework.NodeToStatusMap{
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnknownCondition),
},
expected: map[string]bool{"node2": true, "node3": true, "node4": true},
expected: sets.NewString("node2", "node3", "node4"),
},
{
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
@ -1394,7 +1393,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonNodeConflict)),
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict)),
},
expected: map[string]bool{"node4": true},
expected: sets.NewString("node4"),
},
{
name: "ErrReasonConstraintsNotMatch should be tried as it indicates that the pod is unschedulable due to topology spread constraints",
@ -1403,7 +1402,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
"node3": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
},
expected: map[string]bool{"node1": true, "node3": true, "node4": true},
expected: sets.NewString("node1", "node3", "node4"),
},
{
name: "UnschedulableAndUnresolvable status should be skipped but Unschedulable should be tried",
@ -1412,7 +1411,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node3": framework.NewStatus(framework.Unschedulable, ""),
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
},
expected: map[string]bool{"node1": true, "node3": true},
expected: sets.NewString("node1", "node3"),
},
{
name: "ErrReasonNodeLabelNotMatch should not be tried as it indicates that the pod is unschedulable due to node doesn't have the required label",
@ -1421,7 +1420,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node3": framework.NewStatus(framework.Unschedulable, ""),
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
},
expected: map[string]bool{"node1": true, "node3": true},
expected: sets.NewString("node1", "node3"),
},
}

View File

@ -27,6 +27,7 @@ import (
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
corelisters "k8s.io/client-go/listers/core/v1"
@ -202,7 +203,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod
return nil
}
newVolumes := make(map[string]bool)
newVolumes := make(sets.String)
if err := pl.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
return framework.AsStatus(err)
}
@ -234,7 +235,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod
}
// count unique volumes
existingVolumes := make(map[string]bool)
existingVolumes := make(sets.String)
for _, existingPod := range nodeInfo.Pods {
if err := pl.filterVolumes(existingPod.Pod.Spec.Volumes, existingPod.Pod.Namespace, existingVolumes); err != nil {
return framework.AsStatus(err)
@ -266,11 +267,11 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod
return nil
}
func (pl *nonCSILimits) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error {
func (pl *nonCSILimits) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes sets.String) error {
for i := range volumes {
vol := &volumes[i]
if id, ok := pl.filter.FilterVolume(vol); ok {
filteredVolumes[id] = true
filteredVolumes.Insert(id)
} else if vol.PersistentVolumeClaim != nil {
pvcName := vol.PersistentVolumeClaim.ClaimName
if pvcName == "" {
@ -298,7 +299,7 @@ func (pl *nonCSILimits) filterVolumes(volumes []v1.Volume, namespace string, fil
// it belongs to the running predicate.
if pl.matchProvisioner(pvc) {
klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
filteredVolumes[pvID] = true
filteredVolumes.Insert(pvID)
}
continue
}
@ -309,13 +310,13 @@ func (pl *nonCSILimits) filterVolumes(volumes []v1.Volume, namespace string, fil
// log the error and count the PV towards the PV limit.
if pl.matchProvisioner(pvc) {
klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
filteredVolumes[pvID] = true
filteredVolumes.Insert(pvID)
}
continue
}
if id, ok := pl.filter.FilterPersistentVolume(pv); ok {
filteredVolumes[id] = true
filteredVolumes.Insert(id)
}
}
}

View File

@ -9,6 +9,7 @@ go_library(
"//pkg/scheduler/framework:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@ -89,10 +90,10 @@ func haveOverlap(a1, a2 []string) bool {
if len(a1) > len(a2) {
a1, a2 = a2, a1
}
m := map[string]bool{}
m := make(sets.String)
for _, val := range a1 {
m[val] = true
m.Insert(val)
}
for _, val := range a2 {
if _, ok := m[val]; ok {

View File

@ -63,7 +63,7 @@ type schedulerCache struct {
mu sync.RWMutex
// a set of assumed pod keys.
// The key could further be used to get an entry in podStates.
assumedPods map[string]bool
assumedPods sets.String
// a map from pod key to podState.
podStates map[string]*podState
nodes map[string]*nodeInfoListItem
@ -106,7 +106,7 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul
nodes: make(map[string]*nodeInfoListItem),
nodeTree: newNodeTree(nil),
assumedPods: make(map[string]bool),
assumedPods: make(sets.String),
podStates: make(map[string]*podState),
imageStates: make(map[string]*imageState),
}
@ -183,14 +183,9 @@ func (cache *schedulerCache) Dump() *Dump {
nodes[k] = v.info.Clone()
}
assumedPods := make(map[string]bool, len(cache.assumedPods))
for k, v := range cache.assumedPods {
assumedPods[k] = v
}
return &Dump{
Nodes: nodes,
AssumedPods: assumedPods,
AssumedPods: cache.assumedPods.Union(nil),
}
}
@ -375,7 +370,7 @@ func (cache *schedulerCache) AssumePod(pod *v1.Pod) error {
pod: pod,
}
cache.podStates[key] = ps
cache.assumedPods[key] = true
cache.assumedPods.Insert(key)
return nil
}
@ -395,7 +390,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
klog.V(5).Infof("Finished binding for pod %v. Can be expired.", key)
currState, ok := cache.podStates[key]
if ok && cache.assumedPods[key] {
if ok && cache.assumedPods.Has(key) {
dl := now.Add(cache.ttl)
currState.bindingFinished = true
currState.deadline = &dl
@ -419,7 +414,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
switch {
// Only assumed pod can be forgotten.
case ok && cache.assumedPods[key]:
case ok && cache.assumedPods.Has(key):
err := cache.removePod(pod)
if err != nil {
return err
@ -484,7 +479,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
currState, ok := cache.podStates[key]
switch {
case ok && cache.assumedPods[key]:
case ok && cache.assumedPods.Has(key):
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
// The pod was added to a different node than it was assumed to.
klog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
@ -523,7 +518,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
switch {
// An assumed pod won't have Update/Remove event. It needs to have Add event
// before Update event, in which case the state would change from Assumed to Added.
case ok && !cache.assumedPods[key]:
case ok && !cache.assumedPods.Has(key):
if currState.pod.Spec.NodeName != newPod.Spec.NodeName {
klog.Errorf("Pod %v updated on a different node than previously added to.", key)
klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
@ -551,7 +546,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
switch {
// An assumed pod won't have Delete/Remove event. It needs to have Add event
// before Remove event, in which case the state would change from Assumed to Added.
case ok && !cache.assumedPods[key]:
case ok && !cache.assumedPods.Has(key):
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
klog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
@ -576,11 +571,7 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
cache.mu.RLock()
defer cache.mu.RUnlock()
b, found := cache.assumedPods[key]
if !found {
return false, nil
}
return b, nil
return cache.assumedPods.Has(key), nil
}
// GetPod might return a pod for which its node has already been deleted from

View File

@ -17,7 +17,8 @@ limitations under the License.
package cache
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@ -114,6 +115,6 @@ type Cache interface {
// Dump is a dump of the cache state.
type Dump struct {
AssumedPods map[string]bool
AssumedPods sets.String
Nodes map[string]*framework.NodeInfo
}