mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 19:23:40 +00:00
Merge pull request #123481 from sanposhiho/mindomain-stable
graduate MinDomainsInPodTopologySpread to stable
This commit is contained in:
commit
6929a11f69
2
api/openapi-spec/swagger.json
generated
2
api/openapi-spec/swagger.json
generated
@ -10925,7 +10925,7 @@
|
||||
"type": "integer"
|
||||
},
|
||||
"minDomains": {
|
||||
"description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).",
|
||||
"description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
|
@ -7703,7 +7703,7 @@
|
||||
"type": "integer"
|
||||
},
|
||||
"minDomains": {
|
||||
"description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).",
|
||||
"description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
|
@ -4748,7 +4748,7 @@
|
||||
"type": "integer"
|
||||
},
|
||||
"minDomains": {
|
||||
"description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).",
|
||||
"description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
|
@ -3903,7 +3903,7 @@
|
||||
"type": "integer"
|
||||
},
|
||||
"minDomains": {
|
||||
"description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).",
|
||||
"description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
|
@ -559,7 +559,6 @@ func dropDisabledFields(
|
||||
|
||||
dropDisabledProcMountField(podSpec, oldPodSpec)
|
||||
|
||||
dropDisabledTopologySpreadConstraintsFields(podSpec, oldPodSpec)
|
||||
dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec)
|
||||
dropDisabledMatchLabelKeysFieldInTopologySpread(podSpec, oldPodSpec)
|
||||
dropDisabledMatchLabelKeysFieldInPodAffinity(podSpec, oldPodSpec)
|
||||
@ -737,33 +736,6 @@ func dropEphemeralResourceClaimRequests(containers []api.EphemeralContainer) {
|
||||
}
|
||||
}
|
||||
|
||||
// dropDisabledTopologySpreadConstraintsFields removes disabled fields from PodSpec related
|
||||
// to TopologySpreadConstraints only if it is not already used by the old spec.
|
||||
func dropDisabledTopologySpreadConstraintsFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MinDomainsInPodTopologySpread) &&
|
||||
!minDomainsInUse(oldPodSpec) &&
|
||||
podSpec != nil {
|
||||
for i := range podSpec.TopologySpreadConstraints {
|
||||
podSpec.TopologySpreadConstraints[i].MinDomains = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// minDomainsInUse returns true if the pod spec is non-nil
|
||||
// and has non-nil MinDomains field in TopologySpreadConstraints.
|
||||
func minDomainsInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range podSpec.TopologySpreadConstraints {
|
||||
if c.MinDomains != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// dropDisabledProcMountField removes disabled fields from PodSpec related
|
||||
// to ProcMount only if it is not already used by the old spec
|
||||
func dropDisabledProcMountField(podSpec, oldPodSpec *api.PodSpec) {
|
||||
|
@ -998,173 +998,6 @@ func TestValidatePodDeletionCostOption(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropDisabledTopologySpreadConstraintsFields(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
enabled bool
|
||||
podSpec *api.PodSpec
|
||||
oldPodSpec *api.PodSpec
|
||||
wantPodSpec *api.PodSpec
|
||||
}{
|
||||
{
|
||||
name: "TopologySpreadConstraints is nil",
|
||||
podSpec: &api.PodSpec{},
|
||||
oldPodSpec: &api.PodSpec{},
|
||||
wantPodSpec: &api.PodSpec{},
|
||||
},
|
||||
{
|
||||
name: "TopologySpreadConstraints is empty",
|
||||
podSpec: &api.PodSpec{TopologySpreadConstraints: []api.TopologySpreadConstraint{}},
|
||||
oldPodSpec: &api.PodSpec{TopologySpreadConstraints: []api.TopologySpreadConstraint{}},
|
||||
wantPodSpec: &api.PodSpec{TopologySpreadConstraints: []api.TopologySpreadConstraint{}},
|
||||
},
|
||||
{
|
||||
name: "TopologySpreadConstraints is not empty, but all constraints don't have minDomains",
|
||||
podSpec: &api.PodSpec{TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
}},
|
||||
oldPodSpec: &api.PodSpec{TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
}},
|
||||
wantPodSpec: &api.PodSpec{TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "one constraint in podSpec has non-empty minDomains, feature gate is disabled " +
|
||||
"and all constraint in oldPodSpec doesn't have minDomains",
|
||||
enabled: false,
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MinDomains: pointer.Int32(2),
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
// cleared.
|
||||
MinDomains: nil,
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one constraint in podSpec has non-empty minDomains, feature gate is disabled " +
|
||||
"and one constraint in oldPodSpec has minDomains",
|
||||
enabled: false,
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MinDomains: pointer.Int32(2),
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MinDomains: pointer.Int32(2),
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
// not cleared.
|
||||
MinDomains: pointer.Int32(2),
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one constraint in podSpec has non-empty minDomains, feature gate is enabled" +
|
||||
"and all constraint in oldPodSpec doesn't have minDomains",
|
||||
enabled: true,
|
||||
podSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MinDomains: pointer.Int32(2),
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPodSpec: &api.PodSpec{
|
||||
TopologySpreadConstraints: []api.TopologySpreadConstraint{
|
||||
{
|
||||
// not cleared.
|
||||
MinDomains: pointer.Int32(2),
|
||||
},
|
||||
{
|
||||
MinDomains: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MinDomainsInPodTopologySpread, tc.enabled)()
|
||||
dropDisabledFields(tc.podSpec, nil, tc.oldPodSpec, nil)
|
||||
if diff := cmp.Diff(tc.wantPodSpec, tc.podSpec); diff != "" {
|
||||
t.Errorf("unexpected pod spec (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropDisabledPodStatusFields(t *testing.T) {
|
||||
podWithHostIPs := func() *api.PodStatus {
|
||||
return &api.PodStatus{
|
||||
|
@ -6208,8 +6208,6 @@ type TopologySpreadConstraint struct {
|
||||
// In this situation, new pod with the same labelSelector cannot be scheduled,
|
||||
// because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
|
||||
// it will violate MaxSkew.
|
||||
//
|
||||
// This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).
|
||||
// +optional
|
||||
MinDomains *int32
|
||||
// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
|
||||
|
@ -488,6 +488,7 @@ const (
|
||||
// kep: https://kep.k8s.io/3022
|
||||
// alpha: v1.24
|
||||
// beta: v1.25
|
||||
// GA: v1.30
|
||||
//
|
||||
// Enable MinDomains in Pod Topology Spread.
|
||||
MinDomainsInPodTopologySpread featuregate.Feature = "MinDomainsInPodTopologySpread"
|
||||
@ -1076,7 +1077,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
MemoryQoS: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
MinDomainsInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
|
||||
MinDomainsInPodTopologySpread: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
|
||||
|
||||
MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
|
2
pkg/generated/openapi/zz_generated.openapi.go
generated
2
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -29983,7 +29983,7 @@ func schema_k8sio_api_core_v1_TopologySpreadConstraint(ref common.ReferenceCallb
|
||||
},
|
||||
"minDomains": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).",
|
||||
Description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.",
|
||||
Type: []string{"integer"},
|
||||
Format: "int32",
|
||||
},
|
||||
|
@ -22,7 +22,6 @@ package feature
|
||||
type Features struct {
|
||||
EnableDynamicResourceAllocation bool
|
||||
EnableVolumeCapacityPriority bool
|
||||
EnableMinDomainsInPodTopologySpread bool
|
||||
EnableNodeInclusionPolicyInPodTopologySpread bool
|
||||
EnableMatchLabelKeysInPodTopologySpread bool
|
||||
EnablePodDisruptionConditions bool
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
type topologyPair struct {
|
||||
@ -112,13 +113,10 @@ func (pl *PodTopologySpread) filterTopologySpreadConstraints(constraints []v1.To
|
||||
MaxSkew: c.MaxSkew,
|
||||
TopologyKey: c.TopologyKey,
|
||||
Selector: selector,
|
||||
MinDomains: 1, // If MinDomains is nil, we treat MinDomains as 1.
|
||||
MinDomains: ptr.Deref(c.MinDomains, 1), // If MinDomains is nil, we treat MinDomains as 1.
|
||||
NodeAffinityPolicy: v1.NodeInclusionPolicyHonor, // If NodeAffinityPolicy is nil, we treat NodeAffinityPolicy as "Honor".
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore, // If NodeTaintsPolicy is nil, we treat NodeTaintsPolicy as "Ignore".
|
||||
}
|
||||
if pl.enableMinDomainsInPodTopologySpread && c.MinDomains != nil {
|
||||
tsc.MinDomains = *c.MinDomains
|
||||
}
|
||||
if pl.enableNodeInclusionPolicyInPodTopologySpread {
|
||||
if c.NodeAffinityPolicy != nil {
|
||||
tsc.NodeAffinityPolicy = *c.NodeAffinityPolicy
|
||||
|
@ -51,17 +51,13 @@ type preFilterState struct {
|
||||
}
|
||||
|
||||
// minMatchNum returns the global minimum for the calculation of skew while taking MinDomains into account.
|
||||
func (s *preFilterState) minMatchNum(tpKey string, minDomains int32, enableMinDomainsInPodTopologySpread bool) (int, error) {
|
||||
func (s *preFilterState) minMatchNum(tpKey string, minDomains int32) (int, error) {
|
||||
paths, ok := s.TpKeyToCriticalPaths[tpKey]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("failed to retrieve path by topology key")
|
||||
}
|
||||
|
||||
minMatchNum := paths[0].MatchNum
|
||||
if !enableMinDomainsInPodTopologySpread {
|
||||
return minMatchNum, nil
|
||||
}
|
||||
|
||||
domainsNum, ok := s.TpKeyToDomainsNum[tpKey]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("failed to retrieve the number of domains by topology key")
|
||||
@ -296,11 +292,9 @@ func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod)
|
||||
s.TpPairToMatchNum[tp] += count
|
||||
}
|
||||
}
|
||||
if pl.enableMinDomainsInPodTopologySpread {
|
||||
s.TpKeyToDomainsNum = make(map[string]int, len(constraints))
|
||||
for tp := range s.TpPairToMatchNum {
|
||||
s.TpKeyToDomainsNum[tp.key]++
|
||||
}
|
||||
s.TpKeyToDomainsNum = make(map[string]int, len(constraints))
|
||||
for tp := range s.TpPairToMatchNum {
|
||||
s.TpKeyToDomainsNum[tp.key]++
|
||||
}
|
||||
|
||||
// calculate min match for each topology pair
|
||||
@ -341,7 +335,7 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C
|
||||
|
||||
// judging criteria:
|
||||
// 'existing matching num' + 'if self-match (1 or 0)' - 'global minimum' <= 'maxSkew'
|
||||
minMatchNum, err := s.minMatchNum(tpKey, c.MinDomains, pl.enableMinDomainsInPodTopologySpread)
|
||||
minMatchNum, err := s.minMatchNum(tpKey, c.MinDomains)
|
||||
if err != nil {
|
||||
logger.Error(err, "Internal error occurred while retrieving value precalculated in PreFilter", "topologyKey", tpKey, "paths", s.TpKeyToCriticalPaths)
|
||||
continue
|
||||
|
@ -68,7 +68,6 @@ func (p *criticalPaths) sort() {
|
||||
}
|
||||
|
||||
func TestPreFilterState(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
@ -78,7 +77,6 @@ func TestPreFilterState(t *testing.T) {
|
||||
defaultConstraints []v1.TopologySpreadConstraint
|
||||
want *preFilterState
|
||||
wantPrefilterStatus *framework.Status
|
||||
enableMinDomains bool
|
||||
enableNodeInclusionPolicy bool
|
||||
enableMatchLabelKeys bool
|
||||
}{
|
||||
@ -104,6 +102,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 0}, {"zone2", 0}},
|
||||
},
|
||||
@ -142,6 +141,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 2}, {"zone1", 3}},
|
||||
},
|
||||
@ -180,6 +180,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 0}, {"zone1", 0}},
|
||||
},
|
||||
@ -220,6 +221,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone3", 0}, {"zone2", 2}},
|
||||
},
|
||||
@ -259,6 +261,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 1}, {"zone1", 2}},
|
||||
},
|
||||
@ -308,6 +311,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 4},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 3}, {"zone2", 4}},
|
||||
"node": {{"node-x", 0}, {"node-b", 1}},
|
||||
@ -363,6 +367,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 3}, {"zone2", 4}},
|
||||
"node": {{"node-b", 1}, {"node-a", 2}},
|
||||
@ -410,6 +415,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 0}, {"zone1", 1}},
|
||||
"node": {{"node-a", 0}, {"node-y", 0}},
|
||||
@ -462,6 +468,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 3}, {"zone2", 4}},
|
||||
"node": {{"node-b", 0}, {"node-a", 1}},
|
||||
@ -516,6 +523,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 3}, {"zone2", 4}},
|
||||
"node": {{"node-b", 1}, {"node-a", 2}},
|
||||
@ -559,6 +567,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": newCriticalPaths(),
|
||||
"rack": newCriticalPaths(),
|
||||
@ -600,6 +609,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": newCriticalPaths(),
|
||||
},
|
||||
@ -638,7 +648,6 @@ func TestPreFilterState(t *testing.T) {
|
||||
st.MakePod().Name("p-y3").Node("node-y").Label("foo", "").Obj(),
|
||||
st.MakePod().Name("p-y4").Node("node-y").Label("foo", "").Obj(),
|
||||
},
|
||||
enableMinDomains: true,
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{
|
||||
{
|
||||
@ -704,6 +713,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-a", 1}, {"node-b", 2}},
|
||||
},
|
||||
@ -742,6 +752,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-a", 1}, {"node-b", 2}},
|
||||
},
|
||||
@ -780,6 +791,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-c", 0}, {"node-a", 1}},
|
||||
},
|
||||
@ -819,6 +831,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-a", 1}, {"node-b", 2}},
|
||||
},
|
||||
@ -857,6 +870,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-c", 0}, {"node-a", 1}},
|
||||
},
|
||||
@ -895,6 +909,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-c", 0}, {"node-a", 1}},
|
||||
},
|
||||
@ -933,6 +948,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-c", 0}, {"node-a", 1}},
|
||||
},
|
||||
@ -971,6 +987,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyHonor,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-a", 1}, {"node-b", 2}},
|
||||
},
|
||||
@ -1009,6 +1026,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyHonor,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-c", 0}, {"node-a", 1}},
|
||||
},
|
||||
@ -1056,6 +1074,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 0}, {"zone2", 1}},
|
||||
"node": {{"node-a", 0}, {"node-x", 1}},
|
||||
@ -1105,6 +1124,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 0}, {"zone2", 1}},
|
||||
"node": {{"node-a", 0}, {"node-x", 1}},
|
||||
@ -1157,6 +1177,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 0}, {"zone2", 1}},
|
||||
"node": {{"node-b", 0}, {"node-x", 1}},
|
||||
@ -1209,6 +1230,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyHonor,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 2}, {"zone2", 3}},
|
||||
"node": {{"node-y", 1}, {"node-x", 2}},
|
||||
@ -1251,6 +1273,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 2}, {"zone1", 3}},
|
||||
},
|
||||
@ -1290,6 +1313,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 1}, {"zone1", 1}},
|
||||
},
|
||||
@ -1329,6 +1353,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 2}, {"zone1", 3}},
|
||||
},
|
||||
@ -1368,6 +1393,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 0}, {"zone1", 0}},
|
||||
},
|
||||
@ -1407,6 +1433,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 0}, {"zone1", 0}},
|
||||
},
|
||||
@ -1445,6 +1472,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 2}, {"zone1", 3}},
|
||||
},
|
||||
@ -1475,7 +1503,6 @@ func TestPreFilterState(t *testing.T) {
|
||||
}
|
||||
|
||||
p := plugintesting.SetupPluginWithInformers(ctx, t, topologySpreadFunc, args, cache.NewSnapshot(tt.existingPods, tt.nodes), tt.objs)
|
||||
p.(*PodTopologySpread).enableMinDomainsInPodTopologySpread = tt.enableMinDomains
|
||||
p.(*PodTopologySpread).enableNodeInclusionPolicyInPodTopologySpread = tt.enableNodeInclusionPolicy
|
||||
p.(*PodTopologySpread).enableMatchLabelKeysInPodTopologySpread = tt.enableMatchLabelKeys
|
||||
|
||||
@ -1534,7 +1561,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{nodeConstraint},
|
||||
Constraints: []topologySpreadConstraint{nodeConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-b", 0}, {"node-a", 1}},
|
||||
},
|
||||
@ -1559,7 +1587,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{nodeConstraint},
|
||||
Constraints: []topologySpreadConstraint{nodeConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-a", 1}, {"node-b", 1}},
|
||||
},
|
||||
@ -1584,7 +1613,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{nodeConstraint},
|
||||
Constraints: []topologySpreadConstraint{nodeConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-a", 0}, {"node-b", 1}},
|
||||
},
|
||||
@ -1609,7 +1639,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{nodeConstraint},
|
||||
Constraints: []topologySpreadConstraint{nodeConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"node": {{"node-a", 0}, {"node-b", 2}},
|
||||
},
|
||||
@ -1633,7 +1664,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint, nodeConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint, nodeConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 0}, {"zone1", 1}},
|
||||
"node": {{"node-x", 0}, {"node-a", 1}},
|
||||
@ -1662,7 +1694,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint, nodeConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint, nodeConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 1}, {"zone2", 1}},
|
||||
"node": {{"node-a", 1}, {"node-x", 1}},
|
||||
@ -1694,7 +1727,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint, nodeConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint, nodeConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 1}, {"zone1", 3}},
|
||||
"node": {{"node-a", 1}, {"node-x", 1}},
|
||||
@ -1738,6 +1772,7 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 1}, {"zone1", 2}},
|
||||
"node": {{"node-a", 0}, {"node-b", 1}},
|
||||
@ -1781,6 +1816,7 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
|
||||
},
|
||||
},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 1}, {"zone2", 1}},
|
||||
"node": {{"node-a", 1}, {"node-b", 1}},
|
||||
@ -1810,7 +1846,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Label("foo", "").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 1},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 1}, {MatchNum: math.MaxInt32}},
|
||||
},
|
||||
@ -1836,7 +1873,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Label("foo", "").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 1},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 1}, {MatchNum: math.MaxInt32}},
|
||||
},
|
||||
@ -1862,7 +1900,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Label("foo", "").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 1}, {"zone2", 2}},
|
||||
},
|
||||
@ -1889,7 +1928,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Label("foo", "").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 1}, {"zone2", 2}},
|
||||
},
|
||||
@ -1916,7 +1956,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Label("foo", "").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 1}, {"zone2", 1}},
|
||||
},
|
||||
@ -1943,7 +1984,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 1}, {"zone2", 2}},
|
||||
},
|
||||
@ -1970,7 +2012,8 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 1}, {"zone2", 2}},
|
||||
},
|
||||
@ -2054,7 +2097,8 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
deletedPodIdx: 0, // remove pod "p-a1"
|
||||
nodeIdx: 0, // node-a
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 1}, {"zone2", 1}},
|
||||
},
|
||||
@ -2084,7 +2128,8 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
deletedPodIdx: 0, // remove pod "p-a1"
|
||||
nodeIdx: 0, // node-a
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 1}, {"zone2", 2}},
|
||||
},
|
||||
@ -2115,7 +2160,8 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
deletedPodIdx: 0, // remove pod "p-a0"
|
||||
nodeIdx: 0, // node-a
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 2}, {"zone2", 2}},
|
||||
},
|
||||
@ -2146,7 +2192,8 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
deletedPod: st.MakePod().Name("p-a0").Node("node-a").Label("bar", "").Obj(),
|
||||
nodeIdx: 0, // node-a
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone1", 2}, {"zone2", 2}},
|
||||
},
|
||||
@ -2177,7 +2224,8 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
deletedPodIdx: 3, // remove pod "p-x1"
|
||||
nodeIdx: 2, // node-x
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint, nodeConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint, nodeConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2, "node": 3},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 1}, {"zone1", 3}},
|
||||
"node": {{"node-b", 1}, {"node-x", 1}},
|
||||
@ -2207,7 +2255,8 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Label("foo", "").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 1},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 1}, {MatchNum: math.MaxInt32}},
|
||||
},
|
||||
@ -2233,7 +2282,8 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Label("foo", "").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 1},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 1}, {MatchNum: math.MaxInt32}},
|
||||
},
|
||||
@ -2259,7 +2309,8 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Label("foo", "").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 0}, {"zone1", 1}},
|
||||
},
|
||||
@ -2286,7 +2337,8 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone2").Label("node", "node-b").Label("foo", "").Obj(),
|
||||
},
|
||||
want: &preFilterState{
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
Constraints: []topologySpreadConstraint{zoneConstraint},
|
||||
TpKeyToDomainsNum: map[string]int{"zone": 2},
|
||||
TpKeyToCriticalPaths: map[string]*criticalPaths{
|
||||
"zone": {{"zone2", 0}, {"zone1", 1}},
|
||||
},
|
||||
@ -2416,7 +2468,6 @@ func TestSingleConstraint(t *testing.T) {
|
||||
nodes []*v1.Node
|
||||
existingPods []*v1.Pod
|
||||
wantStatusCode map[string]framework.Code
|
||||
enableMinDomains bool
|
||||
enableNodeInclusionPolicy bool
|
||||
}{
|
||||
{
|
||||
@ -2748,7 +2799,6 @@ func TestSingleConstraint(t *testing.T) {
|
||||
st.MakePod().Name("p-b2").Node("node-b").Label("foo", "").Obj(),
|
||||
st.MakePod().Name("p-c1").Node("node-c").Label("foo", "").Obj(),
|
||||
},
|
||||
enableMinDomains: true,
|
||||
wantStatusCode: map[string]framework.Code{
|
||||
"node-a": framework.Unschedulable,
|
||||
"node-b": framework.Unschedulable,
|
||||
@ -2779,7 +2829,6 @@ func TestSingleConstraint(t *testing.T) {
|
||||
st.MakePod().Name("p-b2").Node("node-b").Label("foo", "").Obj(),
|
||||
st.MakePod().Name("p-c1").Node("node-c").Label("foo", "").Obj(),
|
||||
},
|
||||
enableMinDomains: true,
|
||||
wantStatusCode: map[string]framework.Code{
|
||||
"node-a": framework.Success,
|
||||
"node-b": framework.Success,
|
||||
@ -2809,7 +2858,6 @@ func TestSingleConstraint(t *testing.T) {
|
||||
st.MakePod().Name("p-b2").Node("node-b").Label("foo", "").Obj(),
|
||||
st.MakePod().Name("p-y1").Node("node-y").Label("foo", "").Obj(),
|
||||
},
|
||||
enableMinDomains: true,
|
||||
wantStatusCode: map[string]framework.Code{
|
||||
"node-a": framework.Unschedulable,
|
||||
"node-b": framework.Unschedulable,
|
||||
@ -2840,7 +2888,6 @@ func TestSingleConstraint(t *testing.T) {
|
||||
st.MakePod().Name("p-b2").Node("node-b").Label("foo", "").Obj(),
|
||||
st.MakePod().Name("p-y1").Node("node-y").Label("foo", "").Obj(),
|
||||
},
|
||||
enableMinDomains: true,
|
||||
wantStatusCode: map[string]framework.Code{
|
||||
"node-a": framework.Success,
|
||||
"node-b": framework.Success,
|
||||
@ -3009,7 +3056,6 @@ func TestSingleConstraint(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
pl := plugintesting.SetupPlugin(ctx, t, topologySpreadFunc, &config.PodTopologySpreadArgs{DefaultingType: config.ListDefaulting}, snapshot)
|
||||
p := pl.(*PodTopologySpread)
|
||||
p.enableMinDomainsInPodTopologySpread = tt.enableMinDomains
|
||||
p.enableNodeInclusionPolicyInPodTopologySpread = tt.enableNodeInclusionPolicy
|
||||
state := framework.NewCycleState()
|
||||
if _, s := p.PreFilter(ctx, state, tt.pod); !s.IsSuccess() {
|
||||
|
@ -67,7 +67,6 @@ type PodTopologySpread struct {
|
||||
replicationCtrls corelisters.ReplicationControllerLister
|
||||
replicaSets appslisters.ReplicaSetLister
|
||||
statefulSets appslisters.StatefulSetLister
|
||||
enableMinDomainsInPodTopologySpread bool
|
||||
enableNodeInclusionPolicyInPodTopologySpread bool
|
||||
enableMatchLabelKeysInPodTopologySpread bool
|
||||
}
|
||||
@ -99,10 +98,9 @@ func New(_ context.Context, plArgs runtime.Object, h framework.Handle, fts featu
|
||||
return nil, err
|
||||
}
|
||||
pl := &PodTopologySpread{
|
||||
parallelizer: h.Parallelizer(),
|
||||
sharedLister: h.SnapshotSharedLister(),
|
||||
defaultConstraints: args.DefaultConstraints,
|
||||
enableMinDomainsInPodTopologySpread: fts.EnableMinDomainsInPodTopologySpread,
|
||||
parallelizer: h.Parallelizer(),
|
||||
sharedLister: h.SnapshotSharedLister(),
|
||||
defaultConstraints: args.DefaultConstraints,
|
||||
enableNodeInclusionPolicyInPodTopologySpread: fts.EnableNodeInclusionPolicyInPodTopologySpread,
|
||||
enableMatchLabelKeysInPodTopologySpread: fts.EnableMatchLabelKeysInPodTopologySpread,
|
||||
}
|
||||
|
@ -48,7 +48,6 @@ func NewInTreeRegistry() runtime.Registry {
|
||||
fts := plfeature.Features{
|
||||
EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation),
|
||||
EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
|
||||
EnableMinDomainsInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.MinDomainsInPodTopologySpread),
|
||||
EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
|
||||
EnableMatchLabelKeysInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodTopologySpread),
|
||||
EnablePodDisruptionConditions: feature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions),
|
||||
|
@ -6046,8 +6046,6 @@ message TopologySpreadConstraint {
|
||||
// In this situation, new pod with the same labelSelector cannot be scheduled,
|
||||
// because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
|
||||
// it will violate MaxSkew.
|
||||
//
|
||||
// This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).
|
||||
// +optional
|
||||
optional int32 minDomains = 5;
|
||||
|
||||
|
@ -4007,8 +4007,6 @@ type TopologySpreadConstraint struct {
|
||||
// In this situation, new pod with the same labelSelector cannot be scheduled,
|
||||
// because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
|
||||
// it will violate MaxSkew.
|
||||
//
|
||||
// This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).
|
||||
// +optional
|
||||
MinDomains *int32 `json:"minDomains,omitempty" protobuf:"varint,5,opt,name=minDomains"`
|
||||
// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
|
||||
|
@ -1363,7 +1363,6 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
fits bool
|
||||
nodes []*v1.Node
|
||||
candidateNodes []string // nodes expected to schedule onto
|
||||
enableMinDomains bool
|
||||
enableNodeInclusionPolicy bool
|
||||
enableMatchLabelKeys bool
|
||||
}{
|
||||
@ -1483,10 +1482,9 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
st.MakePod().ZeroTerminationGracePeriod().Name("p2b").Node("node-2").Label("foo", "").Container(pause).Obj(),
|
||||
st.MakePod().ZeroTerminationGracePeriod().Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
|
||||
},
|
||||
fits: true,
|
||||
nodes: defaultNodes,
|
||||
candidateNodes: []string{"node-3"},
|
||||
enableMinDomains: true,
|
||||
fits: true,
|
||||
nodes: defaultNodes,
|
||||
candidateNodes: []string{"node-3"},
|
||||
},
|
||||
{
|
||||
name: "pods spread across nodes as 2/2/1, maxSkew is 2, and the number of domains > minDomains, then the all nodes fit",
|
||||
@ -1510,10 +1508,9 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
st.MakePod().ZeroTerminationGracePeriod().Name("p2b").Node("node-2").Label("foo", "").Container(pause).Obj(),
|
||||
st.MakePod().ZeroTerminationGracePeriod().Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
|
||||
},
|
||||
fits: true,
|
||||
nodes: defaultNodes,
|
||||
candidateNodes: []string{"node-1", "node-2", "node-3"},
|
||||
enableMinDomains: true,
|
||||
fits: true,
|
||||
nodes: defaultNodes,
|
||||
candidateNodes: []string{"node-1", "node-2", "node-3"},
|
||||
},
|
||||
{
|
||||
name: "pods spread across zone as 2/1, maxSkew is 2 and the number of domains < minDomains, then the third and fourth nodes fit",
|
||||
@ -1533,10 +1530,9 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
st.MakePod().Name("p2a").Node("node-1").Label("foo", "").Container(pause).Obj(),
|
||||
st.MakePod().Name("p3a").Node("node-2").Label("foo", "").Container(pause).Obj(),
|
||||
},
|
||||
fits: true,
|
||||
nodes: defaultNodes,
|
||||
candidateNodes: []string{"node-2", "node-3"},
|
||||
enableMinDomains: true,
|
||||
fits: true,
|
||||
nodes: defaultNodes,
|
||||
candidateNodes: []string{"node-2", "node-3"},
|
||||
},
|
||||
{
|
||||
name: "pods spread across zone as 2/1, maxSkew is 2 and the number of domains < minDomains, then the all nodes fit",
|
||||
@ -1556,10 +1552,9 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
st.MakePod().Name("p2a").Node("node-2").Label("foo", "").Container(pause).Obj(),
|
||||
st.MakePod().Name("p3a").Node("node-3").Label("foo", "").Container(pause).Obj(),
|
||||
},
|
||||
fits: true,
|
||||
nodes: defaultNodes,
|
||||
candidateNodes: []string{"node-0", "node-1", "node-2", "node-3"},
|
||||
enableMinDomains: true,
|
||||
fits: true,
|
||||
nodes: defaultNodes,
|
||||
candidateNodes: []string{"node-0", "node-1", "node-2", "node-3"},
|
||||
},
|
||||
{
|
||||
name: "NodeAffinityPolicy honored with labelSelectors, pods spread across zone as 2/1",
|
||||
@ -1772,7 +1767,6 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MinDomainsInPodTopologySpread, tt.enableMinDomains)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeInclusionPolicyInPodTopologySpread, tt.enableNodeInclusionPolicy)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodTopologySpread, tt.enableMatchLabelKeys)()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user