EvenPodsSpread: auto-gen files

This commit is contained in:
Wei Huang 2019-07-18 11:01:17 -07:00
parent 49da505a9a
commit eafd4e96d9
No known key found for this signature in database
GPG Key ID: BE5E9752F8B6E005
7 changed files with 1459 additions and 950 deletions

View File

@ -9924,6 +9924,20 @@
},
"type": "array"
},
"topologySpreadConstraints": {
"description": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.",
"items": {
"$ref": "#/definitions/io.k8s.api.core.v1.TopologySpreadConstraint"
},
"type": "array",
"x-kubernetes-list-map-keys": [
"topologyKey",
"whenUnsatisfiable"
],
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "topologyKey",
"x-kubernetes-patch-strategy": "merge"
},
"volumes": {
"description": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
"items": {
@ -11494,6 +11508,34 @@
},
"type": "object"
},
"io.k8s.api.core.v1.TopologySpreadConstraint": {
"description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
"properties": {
"labelSelector": {
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector",
"description": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain."
},
"maxSkew": {
"description": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It's a required field. Default value is 1 and 0 is not allowed.",
"format": "int32",
"type": "integer"
},
"topologyKey": {
"description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.",
"type": "string"
},
"whenUnsatisfiable": {
"description": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.",
"type": "string"
}
},
"required": [
"maxSkew",
"topologyKey",
"whenUnsatisfiable"
],
"type": "object"
},
"io.k8s.api.core.v1.TypedLocalObjectReference": {
"description": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.",
"properties": {

View File

@ -1930,6 +1930,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.TopologySpreadConstraint)(nil), (*core.TopologySpreadConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(a.(*v1.TopologySpreadConstraint), b.(*core.TopologySpreadConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.TopologySpreadConstraint)(nil), (*v1.TopologySpreadConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(a.(*core.TopologySpreadConstraint), b.(*v1.TopologySpreadConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.TypedLocalObjectReference)(nil), (*core.TypedLocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(a.(*v1.TypedLocalObjectReference), b.(*core.TypedLocalObjectReference), scope)
}); err != nil {
@ -5661,6 +5671,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s
out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks))
out.PreemptionPolicy = (*core.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
out.Overhead = *(*core.ResourceList)(unsafe.Pointer(&in.Overhead))
out.TopologySpreadConstraints = *(*[]core.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
return nil
}
@ -5710,6 +5721,7 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s
out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName))
out.Overhead = *(*v1.ResourceList)(unsafe.Pointer(&in.Overhead))
out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks))
out.TopologySpreadConstraints = *(*[]v1.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
return nil
}
@ -7348,6 +7360,32 @@ func Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.Topol
return autoConvert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in, out, s)
}
func autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in *v1.TopologySpreadConstraint, out *core.TopologySpreadConstraint, s conversion.Scope) error {
out.MaxSkew = in.MaxSkew
out.TopologyKey = in.TopologyKey
out.WhenUnsatisfiable = core.UnsatisfiableConstraintAction(in.WhenUnsatisfiable)
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
return nil
}
// Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint is an autogenerated conversion function.
func Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in *v1.TopologySpreadConstraint, out *core.TopologySpreadConstraint, s conversion.Scope) error {
return autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in, out, s)
}
func autoConvert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in *core.TopologySpreadConstraint, out *v1.TopologySpreadConstraint, s conversion.Scope) error {
out.MaxSkew = in.MaxSkew
out.TopologyKey = in.TopologyKey
out.WhenUnsatisfiable = v1.UnsatisfiableConstraintAction(in.WhenUnsatisfiable)
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
return nil
}
// Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint is an autogenerated conversion function.
func Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in *core.TopologySpreadConstraint, out *v1.TopologySpreadConstraint, s conversion.Scope) error {
return autoConvert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in, out, s)
}
func autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *v1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind

View File

@ -3663,6 +3663,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
*out = new(bool)
**out = **in
}
if in.TopologySpreadConstraints != nil {
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
*out = make([]TopologySpreadConstraint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -5189,6 +5196,27 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint.
func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint {
if in == nil {
return nil
}
out := new(TopologySpreadConstraint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
*out = *in

File diff suppressed because it is too large Load Diff

View File

@ -3246,6 +3246,19 @@ message PodSpec {
// This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.
// +optional
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> overhead = 32;
// TopologySpreadConstraints describes how a group of pods ought to spread across topology
// domains. Scheduler will schedule pods in a way which abides by the constraints.
// This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread
// feature.
// All topologySpreadConstraints are ANDed.
// +optional
// +patchMergeKey=topologyKey
// +patchStrategy=merge
// +listType=map
// +listMapKey=topologyKey
// +listMapKey=whenUnsatisfiable
repeated TopologySpreadConstraint topologySpreadConstraints = 33;
}
// PodStatus represents information about the status of a pod. Status may trail the actual
@ -4642,6 +4655,59 @@ message TopologySelectorTerm {
repeated TopologySelectorLabelRequirement matchLabelExpressions = 1;
}
// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
message TopologySpreadConstraint {
// MaxSkew describes the degree to which pods may be unevenly distributed.
// It's the maximum permitted difference between the number of matching pods in
// any two topology domains of a given topology type.
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 1/1/0:
// +-------+-------+-------+
// | zone1 | zone2 | zone3 |
// +-------+-------+-------+
// | P | P | |
// +-------+-------+-------+
// - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1;
// scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2)
// violate MaxSkew(1).
// - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
// It's a required field. Default value is 1 and 0 is not allowed.
optional int32 maxSkew = 1;
// TopologyKey is the key of node labels. Nodes that have a label with this key
// and identical values are considered to be in the same topology.
// We consider each <key, value> as a "bucket", and try to put balanced number
// of pods into each bucket.
// It's a required field.
optional string topologyKey = 2;
// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
// the spread constraint.
// - DoNotSchedule (default) tells the scheduler not to schedule it
// - ScheduleAnyway tells the scheduler to still schedule it
// It's considered as "Unsatisfiable" if and only if placing incoming pod on any
// topology violates "MaxSkew".
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 3/1/1:
// +-------+-------+-------+
// | zone1 | zone2 | zone3 |
// +-------+-------+-------+
// | P P P | P | P |
// +-------+-------+-------+
// If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
// to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
// MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
// won't make it *more* imbalanced.
// It's a required field.
optional string whenUnsatisfiable = 3;
// LabelSelector is used to find matching pods.
// Pods that match this label selector are counted to determine the number of pods
// in their corresponding topology domain.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4;
}
// TypedLocalObjectReference contains enough information to let you locate the
// typed referenced object inside the same namespace.
message TypedLocalObjectReference {

View File

@ -1568,6 +1568,7 @@ var map_PodSpec = map[string]string{
"enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
"preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.",
"overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.",
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.",
}
func (PodSpec) SwaggerDoc() map[string]string {
@ -2252,6 +2253,18 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string {
return map_TopologySelectorTerm
}
var map_TopologySpreadConstraint = map[string]string{
"": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
"maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ",
"topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.",
"whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
"labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
}
func (TopologySpreadConstraint) SwaggerDoc() map[string]string {
return map_TopologySpreadConstraint
}
var map_TypedLocalObjectReference = map[string]string{
"": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.",
"apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",

View File

@ -3661,6 +3661,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
(*out)[key] = val.DeepCopy()
}
}
if in.TopologySpreadConstraints != nil {
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
*out = make([]TopologySpreadConstraint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -5204,6 +5211,27 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint.
func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint {
if in == nil {
return nil
}
out := new(TopologySpreadConstraint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
*out = *in