mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 23:37:01 +00:00
Update types from api review
This commit is contained in:
parent
d419bdfcd9
commit
10c514725b
@ -47,28 +47,26 @@ type RuntimeClass struct {
|
||||
// immutable.
|
||||
Handler string
|
||||
|
||||
// Topology describes the set of nodes in the cluster that support this
|
||||
// RuntimeClass. The rules are applied applied to pods running with this
|
||||
// RuntimeClass and semantically merged with other scheduling constraints on
|
||||
// the pod.
|
||||
// If topology is nil, this RuntimeClass is assumed to be supported by all
|
||||
// Scheduling holds the scheduling constraints to ensure that pods running
|
||||
// with this RuntimeClass are scheduled to nodes that support it.
|
||||
// If scheduling is nil, this RuntimeClass is assumed to be supported by all
|
||||
// nodes.
|
||||
// +optional
|
||||
Topology *Topology
|
||||
Scheduling *Scheduling
|
||||
}
|
||||
|
||||
// Topology specifies the scheduling constraints for nodes supporting a
|
||||
// Scheduling specifies the scheduling constraints for nodes supporting a
|
||||
// RuntimeClass.
|
||||
type Topology struct {
|
||||
// NodeSelector selects the set of nodes that support this RuntimeClass.
|
||||
// Pods using this RuntimeClass can only be scheduled to a node matched by
|
||||
// this selector. The NodeSelector is intersected (AND) with a pod's other
|
||||
// NodeAffinity or NodeSelector requirements.
|
||||
// A nil NodeSelector selects all nodes.
|
||||
type Scheduling struct {
|
||||
// nodeSelector lists labels that must be present on nodes that support this
|
||||
// RuntimeClass. Pods using this RuntimeClass can only be scheduled to a
|
||||
// node matched by this selector. The RuntimeClass nodeSelector is merged
|
||||
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
|
||||
// be rejected in admission.
|
||||
// +optional
|
||||
NodeSelector *core.NodeSelector
|
||||
NodeSelector map[string]string
|
||||
|
||||
// Tolerations are appended (excluding duplicates) to pods running with this
|
||||
// tolerations are appended (excluding duplicates) to pods running with this
|
||||
// RuntimeClass during admission, effectively unioning the set of nodes
|
||||
// tolerated by the pod and the RuntimeClass.
|
||||
// +optional
|
||||
|
@ -33,9 +33,9 @@ func addConversionFuncs(s *runtime.Scheme) error {
|
||||
func Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass(in *v1alpha1.RuntimeClass, out *node.RuntimeClass, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Handler = in.Spec.RuntimeHandler
|
||||
if in.Spec.Topology != nil {
|
||||
out.Topology = new(node.Topology)
|
||||
autoConvert_v1alpha1_Topology_To_node_Topology(in.Spec.Topology, out.Topology, s)
|
||||
if in.Spec.Scheduling != nil {
|
||||
out.Scheduling = new(node.Scheduling)
|
||||
autoConvert_v1alpha1_Scheduling_To_node_Scheduling(in.Spec.Scheduling, out.Scheduling, s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -43,9 +43,9 @@ func Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass(in *v1alpha1.RuntimeClas
|
||||
func Convert_node_RuntimeClass_To_v1alpha1_RuntimeClass(in *node.RuntimeClass, out *v1alpha1.RuntimeClass, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.RuntimeHandler = in.Handler
|
||||
if in.Topology != nil {
|
||||
out.Spec.Topology = new(v1alpha1.Topology)
|
||||
autoConvert_node_Topology_To_v1alpha1_Topology(in.Topology, out.Spec.Topology, s)
|
||||
if in.Scheduling != nil {
|
||||
out.Spec.Scheduling = new(v1alpha1.Scheduling)
|
||||
autoConvert_node_Scheduling_To_v1alpha1_Scheduling(in.Scheduling, out.Spec.Scheduling, s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -41,15 +41,8 @@ func TestRuntimeClassConversion(t *testing.T) {
|
||||
internal: &node.RuntimeClass{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Handler: handler,
|
||||
Topology: &node.Topology{
|
||||
NodeSelector: &core.NodeSelector{
|
||||
NodeSelectorTerms: []core.NodeSelectorTerm{{
|
||||
MatchExpressions: []core.NodeSelectorRequirement{{
|
||||
Key: "extra-soft",
|
||||
Operator: core.NodeSelectorOpExists,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
Scheduling: &node.Scheduling{
|
||||
NodeSelector: map[string]string{"extra-soft": "true"},
|
||||
Tolerations: []core.Toleration{{
|
||||
Key: "stinky",
|
||||
Operator: core.TolerationOpExists,
|
||||
@ -61,15 +54,8 @@ func TestRuntimeClassConversion(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1alpha1.RuntimeClassSpec{
|
||||
RuntimeHandler: handler,
|
||||
Topology: &v1alpha1.Topology{
|
||||
NodeSelector: &corev1.NodeSelector{
|
||||
NodeSelectorTerms: []corev1.NodeSelectorTerm{{
|
||||
MatchExpressions: []corev1.NodeSelectorRequirement{{
|
||||
Key: "extra-soft",
|
||||
Operator: corev1.NodeSelectorOpExists,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
Scheduling: &v1alpha1.Scheduling{
|
||||
NodeSelector: map[string]string{"extra-soft": "true"},
|
||||
Tolerations: []corev1.Toleration{{
|
||||
Key: "stinky",
|
||||
Operator: corev1.TolerationOpExists,
|
||||
@ -79,17 +65,17 @@ func TestRuntimeClassConversion(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"empty-topology": {
|
||||
"empty-scheduling": {
|
||||
internal: &node.RuntimeClass{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Handler: handler,
|
||||
Topology: &node.Topology{},
|
||||
Scheduling: &node.Scheduling{},
|
||||
},
|
||||
external: &v1alpha1.RuntimeClass{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1alpha1.RuntimeClassSpec{
|
||||
RuntimeHandler: handler,
|
||||
Topology: &v1alpha1.Topology{},
|
||||
Scheduling: &v1alpha1.Scheduling{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -18,6 +18,7 @@ package validation
|
||||
|
||||
import (
|
||||
apivalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/node"
|
||||
@ -31,8 +32,8 @@ func ValidateRuntimeClass(rc *node.RuntimeClass) field.ErrorList {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("handler"), rc.Handler, msg))
|
||||
}
|
||||
|
||||
if rc.Topology != nil {
|
||||
allErrs = append(allErrs, validateTopology(rc.Topology, field.NewPath("topology"))...)
|
||||
if rc.Scheduling != nil {
|
||||
allErrs = append(allErrs, validateScheduling(rc.Scheduling, field.NewPath("scheduling"))...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
@ -47,11 +48,11 @@ func ValidateRuntimeClassUpdate(new, old *node.RuntimeClass) field.ErrorList {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateTopology(t *node.Topology, fldPath *field.Path) field.ErrorList {
|
||||
func validateScheduling(s *node.Scheduling, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if t.NodeSelector != nil {
|
||||
allErrs = append(allErrs, corevalidation.ValidateNodeSelector(t.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
if s.NodeSelector != nil {
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(s.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
}
|
||||
allErrs = append(allErrs, corevalidation.ValidateTolerations(t.Tolerations, fldPath.Child("tolerations"))...)
|
||||
allErrs = append(allErrs, corevalidation.ValidateTolerations(s.Tolerations, fldPath.Child("tolerations"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
@ -128,22 +128,15 @@ func TestValidateRuntimeUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateTopology(t *testing.T) {
|
||||
func TestValidateScheduling(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
topology *node.Topology
|
||||
scheduling *node.Scheduling
|
||||
expectErrs int
|
||||
}{{
|
||||
name: "valid topology",
|
||||
topology: &node.Topology{
|
||||
NodeSelector: &core.NodeSelector{
|
||||
NodeSelectorTerms: []core.NodeSelectorTerm{{
|
||||
MatchExpressions: []core.NodeSelectorRequirement{{
|
||||
Key: "valid",
|
||||
Operator: core.NodeSelectorOpExists,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
name: "valid scheduling",
|
||||
scheduling: &node.Scheduling{
|
||||
NodeSelector: map[string]string{"valid": "yes"},
|
||||
Tolerations: []core.Toleration{{
|
||||
Key: "valid",
|
||||
Operator: core.TolerationOpExists,
|
||||
@ -151,24 +144,17 @@ func TestValidateTopology(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
}, {
|
||||
name: "empty topology",
|
||||
topology: &node.Topology{},
|
||||
name: "empty scheduling",
|
||||
scheduling: &node.Scheduling{},
|
||||
}, {
|
||||
name: "invalid nodeSelector",
|
||||
topology: &node.Topology{
|
||||
NodeSelector: &core.NodeSelector{
|
||||
NodeSelectorTerms: []core.NodeSelectorTerm{{
|
||||
MatchExpressions: []core.NodeSelectorRequirement{{
|
||||
Key: "not a valid key!!!",
|
||||
Operator: core.NodeSelectorOpExists,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
scheduling: &node.Scheduling{
|
||||
NodeSelector: map[string]string{"not a valid key!!!": "nope"},
|
||||
},
|
||||
expectErrs: 1,
|
||||
}, {
|
||||
name: "invalid toleration",
|
||||
topology: &node.Topology{
|
||||
scheduling: &node.Scheduling{
|
||||
Tolerations: []core.Toleration{{
|
||||
Key: "valid",
|
||||
Operator: core.TolerationOpExists,
|
||||
@ -181,16 +167,9 @@ func TestValidateTopology(t *testing.T) {
|
||||
},
|
||||
expectErrs: 1,
|
||||
}, {
|
||||
name: "invalid topology",
|
||||
topology: &node.Topology{
|
||||
NodeSelector: &core.NodeSelector{
|
||||
NodeSelectorTerms: []core.NodeSelectorTerm{{
|
||||
MatchExpressions: []core.NodeSelectorRequirement{{
|
||||
Key: "not a valid label key!!!",
|
||||
Operator: core.NodeSelectorOpExists,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
name: "invalid scheduling",
|
||||
scheduling: &node.Scheduling{
|
||||
NodeSelector: map[string]string{"not a valid key!!!": "nope"},
|
||||
Tolerations: []core.Toleration{{
|
||||
Key: "valid",
|
||||
Operator: core.TolerationOpExists,
|
||||
@ -209,7 +188,7 @@ func TestValidateTopology(t *testing.T) {
|
||||
rc := &node.RuntimeClass{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
Handler: "bar",
|
||||
Topology: test.topology,
|
||||
Scheduling: test.scheduling,
|
||||
}
|
||||
assert.Len(t, ValidateRuntimeClass(rc), test.expectErrs)
|
||||
})
|
||||
|
@ -60,28 +60,26 @@ type RuntimeClassSpec struct {
|
||||
// and is immutable.
|
||||
RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"`
|
||||
|
||||
// Topology describes the set of nodes in the cluster that support this
|
||||
// RuntimeClass. The rules are applied applied to pods running with this
|
||||
// RuntimeClass and semantically merged with other scheduling constraints on
|
||||
// the pod.
|
||||
// If topology is nil, this RuntimeClass is assumed to be supported by all
|
||||
// Scheduling holds the scheduling constraints to ensure that pods running
|
||||
// with this RuntimeClass are scheduled to nodes that support it.
|
||||
// If scheduling is nil, this RuntimeClass is assumed to be supported by all
|
||||
// nodes.
|
||||
// +optional
|
||||
Topology *Topology `json:"topology,omitempty" protobuf:"bytes,3,opt,name=topology"`
|
||||
Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,3,opt,name=scheduling"`
|
||||
}
|
||||
|
||||
// Topology specifies the scheduling constraints for nodes supporting a
|
||||
// Scheduling specifies the scheduling constraints for nodes supporting a
|
||||
// RuntimeClass.
|
||||
type Topology struct {
|
||||
// NodeSelector selects the set of nodes that support this RuntimeClass.
|
||||
// Pods using this RuntimeClass can only be scheduled to a node matched by
|
||||
// this selector. The NodeSelector is intersected (AND) with a pod's other
|
||||
// NodeAffinity or NodeSelector requirements.
|
||||
// A nil NodeSelector selects all nodes.
|
||||
type Scheduling struct {
|
||||
// nodeSelector lists labels that must be present on nodes that support this
|
||||
// RuntimeClass. Pods using this RuntimeClass can only be scheduled to a
|
||||
// node matched by this selector. The RuntimeClass nodeSelector is merged
|
||||
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
|
||||
// be rejected in admission.
|
||||
// +optional
|
||||
NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
|
||||
|
||||
// Tolerations are appended (excluding duplicates) to pods running with this
|
||||
// tolerations are appended (excluding duplicates) to pods running with this
|
||||
// RuntimeClass during admission, effectively unioning the set of nodes
|
||||
// tolerated by the pod and the RuntimeClass.
|
||||
// +optional
|
||||
|
@ -50,28 +50,26 @@ type RuntimeClass struct {
|
||||
// immutable.
|
||||
Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"`
|
||||
|
||||
// Topology describes the set of nodes in the cluster that support this
|
||||
// RuntimeClass. The rules are applied applied to pods running with this
|
||||
// RuntimeClass and semantically merged with other scheduling constraints on
|
||||
// the pod.
|
||||
// If topology is nil, this RuntimeClass is assumed to be supported by all
|
||||
// Scheduling holds the scheduling constraints to ensure that pods running
|
||||
// with this RuntimeClass are scheduled to nodes that support it.
|
||||
// If scheduling is nil, this RuntimeClass is assumed to be supported by all
|
||||
// nodes.
|
||||
// +optional
|
||||
Topology *Topology `json:"topology,omitempty" protobuf:"bytes,3,opt,name=topology"`
|
||||
Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,3,opt,name=scheduling"`
|
||||
}
|
||||
|
||||
// Topology specifies the scheduling constraints for nodes supporting a
|
||||
// Scheduling specifies the scheduling constraints for nodes supporting a
|
||||
// RuntimeClass.
|
||||
type Topology struct {
|
||||
// NodeSelector selects the set of nodes that support this RuntimeClass.
|
||||
// Pods using this RuntimeClass can only be scheduled to a node matched by
|
||||
// this selector. The NodeSelector is intersected (AND) with a pod's other
|
||||
// NodeAffinity or NodeSelector requirements.
|
||||
// A nil NodeSelector selects all nodes.
|
||||
type Scheduling struct {
|
||||
// nodeSelector lists labels that must be present on nodes that support this
|
||||
// RuntimeClass. Pods using this RuntimeClass can only be scheduled to a
|
||||
// node matched by this selector. The RuntimeClass nodeSelector is merged
|
||||
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
|
||||
// be rejected in admission.
|
||||
// +optional
|
||||
NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
|
||||
|
||||
// Tolerations are appended (excluding duplicates) to pods running with this
|
||||
// tolerations are appended (excluding duplicates) to pods running with this
|
||||
// RuntimeClass during admission, effectively unioning the set of nodes
|
||||
// tolerated by the pod and the RuntimeClass.
|
||||
// +optional
|
||||
|
Loading…
Reference in New Issue
Block a user