Merge pull request #97989 from Danil-Grigorev/atomic-label-selectors

Make selectors atomic
This commit is contained in:
Kubernetes Prow Robot 2021-04-08 14:27:31 -07:00 committed by GitHub
commit 1e05d25890
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 436 additions and 28 deletions

View File

@ -8891,7 +8891,8 @@
"type": "string"
},
"description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
"type": "object"
"type": "object",
"x-kubernetes-map-type": "atomic"
},
"overhead": {
"additionalProperties": {
@ -9520,7 +9521,8 @@
"type": "string"
},
"description": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
"type": "object"
"type": "object",
"x-kubernetes-map-type": "atomic"
},
"template": {
"$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec",
@ -10441,7 +10443,8 @@
"type": "string"
},
"description": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/",
"type": "object"
"type": "object",
"x-kubernetes-map-type": "atomic"
},
"sessionAffinity": {
"description": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
@ -13329,7 +13332,8 @@
"type": "string"
},
"description": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.",
"type": "object"
"type": "object",
"x-kubernetes-map-type": "atomic"
},
"tolerations": {
"description": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.",
@ -13451,7 +13455,8 @@
"type": "string"
},
"description": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.",
"type": "object"
"type": "object",
"x-kubernetes-map-type": "atomic"
},
"tolerations": {
"description": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.",
@ -13560,7 +13565,8 @@
"type": "string"
},
"description": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.",
"type": "object"
"type": "object",
"x-kubernetes-map-type": "atomic"
},
"tolerations": {
"description": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.",

View File

@ -592,6 +592,7 @@ message ScaleStatus {
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
// +mapType=atomic
map<string, string> selector = 2;
// label selector for pods that should match the replicas count. This is a serializated

View File

@ -45,6 +45,7 @@ type ScaleStatus struct {
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
// +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// label selector for pods that should match the replicas count. This is a serializated

View File

@ -3450,6 +3450,7 @@ message PodSpec {
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
// +mapType=atomic
map<string, string> nodeSelector = 7;
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
@ -4138,6 +4139,7 @@ message ReplicationControllerSpec {
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
// +mapType=atomic
map<string, string> selector = 2;
// Template is the object that describes the pod that will be created if
@ -4841,6 +4843,7 @@ message ServiceSpec {
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
// +mapType=atomic
map<string, string> selector = 2;
// clusterIP is the IP address of the service and is usually assigned

View File

@ -3005,6 +3005,7 @@ type PodSpec struct {
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
// +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
@ -3785,6 +3786,7 @@ type ReplicationControllerSpec struct {
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
// +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
@ -4083,6 +4085,7 @@ type ServiceSpec struct {
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
// +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned

View File

@ -1237,6 +1237,7 @@ message ScaleStatus {
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
// +mapType=atomic
map<string, string> selector = 2;
// label selector for pods that should match the replicas count. This is a serializated

View File

@ -37,6 +37,7 @@ type ScaleStatus struct {
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
// +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// label selector for pods that should match the replicas count. This is a serializated

View File

@ -97,6 +97,7 @@ message Scheduling {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
// +mapType=atomic
map<string, string> nodeSelector = 1;
// tolerations are appended (excluding duplicates) to pods running with this

View File

@ -82,6 +82,7 @@ type Scheduling struct {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
// +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
// tolerations are appended (excluding duplicates) to pods running with this

View File

@ -106,6 +106,7 @@ message Scheduling {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
// +mapType=atomic
map<string, string> nodeSelector = 1;
// tolerations are appended (excluding duplicates) to pods running with this

View File

@ -91,6 +91,7 @@ type Scheduling struct {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
// +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
// tolerations are appended (excluding duplicates) to pods running with this

View File

@ -96,6 +96,7 @@ message Scheduling {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
// +mapType=atomic
map<string, string> nodeSelector = 1;
// tolerations are appended (excluding duplicates) to pods running with this

View File

@ -83,6 +83,7 @@ type Scheduling struct {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
// +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
// tolerations are appended (excluding duplicates) to pods running with this

View File

@ -79,6 +79,7 @@ func (d *fakeObjectDefaulter) Default(in runtime.Object) {}
type TestFieldManager struct {
fieldManager *FieldManager
apiVersion string
emptyObj runtime.Object
liveObj runtime.Object
}
@ -122,6 +123,7 @@ func NewTestFieldManager(gvk schema.GroupVersionKind, ignoreManagedFieldsFromReq
}
return TestFieldManager{
fieldManager: NewFieldManager(f, ignoreManagedFieldsFromRequestObject),
apiVersion: gvk.GroupVersion().String(),
emptyObj: live,
liveObj: live.DeepCopyObject(),
}
@ -147,10 +149,18 @@ func NewFakeOpenAPIModels() proto.Models {
return m
}
func (f *TestFieldManager) APIVersion() string {
return f.apiVersion
}
func (f *TestFieldManager) Reset() {
f.liveObj = f.emptyObj.DeepCopyObject()
}
func (f *TestFieldManager) Get() runtime.Object {
return f.liveObj.DeepCopyObject()
}
func (f *TestFieldManager) Apply(obj runtime.Object, manager string, force bool) error {
out, err := f.fieldManager.Apply(f.liveObj, obj, manager, force)
if err == nil {

View File

@ -21,6 +21,7 @@ import (
"reflect"
"testing"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -30,19 +31,21 @@ import (
"sigs.k8s.io/yaml"
)
type testArgs struct {
lastApplied []byte
original []byte
applied []byte
fieldManager string
expectConflictSet *fieldpath.Set
}
// TestApplyUsingLastAppliedAnnotation tests that applying to an object
// created with the client-side apply last-applied annotation
// will not give conflicts
func TestApplyUsingLastAppliedAnnotation(t *testing.T) {
f := NewDefaultTestFieldManager(schema.FromAPIVersionAndKind("apps/v1", "Deployment"))
tests := []struct {
lastApplied []byte
original []byte
applied []byte
fieldManager string
expectConflictSet *fieldpath.Set
}{
tests := []testArgs{
{
fieldManager: "kubectl",
lastApplied: []byte(`
@ -556,6 +559,362 @@ spec:
},
}
testConflicts(t, f, tests)
}
func TestServiceApply(t *testing.T) {
f := NewDefaultTestFieldManager(schema.FromAPIVersionAndKind("v1", "Service"))
tests := []testArgs{
{
fieldManager: "kubectl",
original: []byte(`
apiVersion: v1
kind: Service
metadata:
name: test
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: 8443
selector:
old: test
`),
applied: []byte(`
# All accepted while using the same field manager
apiVersion: v1
kind: Service
metadata:
name: test
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: 8444
selector:
new: test
`),
},
{
fieldManager: "kubectl",
original: []byte(`
apiVersion: v1
kind: Service
metadata:
name: test
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: 8443
selector:
old: test
`),
applied: []byte(`
# Allowed to remove selectors while using the same field manager
apiVersion: v1
kind: Service
metadata:
name: test
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: 8444
selector: {}
`),
},
{
fieldManager: "not_kubectl",
original: []byte(`
apiVersion: v1
kind: Service
metadata:
name: test
spec:
ports:
- name: https
port: 443
protocol: TCP # TODO: issue - this is a defaulted field, should not be required in a new spec
targetPort: 8443
selector:
old: test
`),
applied: []byte(`
# test selector update not allowed by last-applied
apiVersion: v1
kind: Service
metadata:
name: test
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: 8444
selector:
new: test
`),
expectConflictSet: fieldpath.NewSet(
fieldpath.MakePathOrDie("spec", "selector"), // selector is atomic
fieldpath.MakePathOrDie("spec", "ports", fieldpath.KeyByFields("port", 443, "protocol", "TCP"), "targetPort"),
),
},
}
testConflicts(t, f, tests)
}
func TestReplicationControllerApply(t *testing.T) {
f := NewDefaultTestFieldManager(schema.FromAPIVersionAndKind("v1", "ReplicationController"))
tests := []testArgs{
{
fieldManager: "kubectl",
original: []byte(`
apiVersion: v1
kind: ReplicationController
metadata:
name: test
spec:
replicas: 0
selector:
old: test
`),
applied: []byte(`
# All accepted while using the same field manager
apiVersion: v1
kind: ReplicationController
metadata:
name: test
spec:
replicas: 3
selector:
new: test
`),
},
{
fieldManager: "not_kubectl",
original: []byte(`
apiVersion: v1
kind: ReplicationController
metadata:
name: test
spec:
replicas: 0
selector:
old: test
`),
applied: []byte(`
# test selector update not allowed by last-applied
apiVersion: v1
kind: ReplicationController
metadata:
name: test
spec:
replicas: 3
selector:
new: test
`),
expectConflictSet: fieldpath.NewSet(
fieldpath.MakePathOrDie("spec", "selector"), // selector is atomic
fieldpath.MakePathOrDie("spec", "replicas"),
),
},
}
testConflicts(t, f, tests)
}
func TestPodApply(t *testing.T) {
f := NewDefaultTestFieldManager(schema.FromAPIVersionAndKind("v1", "Pod"))
tests := []testArgs{
{
fieldManager: "kubectl",
original: []byte(`
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: test
spec:
containers:
- args:
- -v=2
command:
- controller
image: some.registry/app:latest
name: doJob
nodeName: definetlyControlPlane
nodeSelector:
node-role.kubernetes.io/master: ""
`),
applied: []byte(`
# All accepted while using the same field manager
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: test
spec:
containers:
- args:
- -v=2
command:
- controller
image: some.registry/app:latest
name: doJob
nodeSelector:
node-role.kubernetes.io/worker: ""
`),
},
{
fieldManager: "not_kubectl",
original: []byte(`
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: test
spec:
containers:
- args:
- -v=2
command:
- controller
image: some.registry/app:latest
name: doJob
nodeName: definetlyControlPlane
nodeSelector:
node-role.kubernetes.io/master: ""
`),
applied: []byte(`
# test selector update not allowed by last-applied
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: test
spec:
containers:
- args:
- -v=2
command:
- controller
image: some.registry/app:latest
name: doJob
nodeName: definetlyControlPlane
nodeSelector:
node-role.kubernetes.io/master: ""
otherNodeType: ""
`),
expectConflictSet: fieldpath.NewSet(
fieldpath.MakePathOrDie("spec", "nodeSelector"), // selector is atomic
),
},
{
fieldManager: "not_kubectl",
original: []byte(`
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: test
spec:
containers:
- args:
- -v=2
command:
- controller
image: some.registry/app:latest
name: doJob
nodeName: definetlyControlPlane
nodeSelector:
node-role.kubernetes.io/master: ""
`),
applied: []byte(`
# purging selector not allowed for different manager
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: test
spec:
containers:
- args:
- -v=2
command:
- controller
image: some.registry/app:latest
name: doJob
nodeName: another
nodeSelector: {}
`),
expectConflictSet: fieldpath.NewSet(
fieldpath.MakePathOrDie("spec", "nodeSelector"), // selector is atomic
fieldpath.MakePathOrDie("spec", "nodeName"),
),
},
{
fieldManager: "kubectl",
original: []byte(`
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: test
spec:
containers:
- args:
- -v=2
command:
- controller
image: some.registry/app:latest
name: doJob
nodeName: definetlyControlPlane
nodeSelector:
node-role.kubernetes.io/master: ""
`),
applied: []byte(`
# same manager could purge nodeSelector
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: test
spec:
containers:
- args:
- -v=2
command:
- controller
image: some.registry/app:latest
name: doJob
nodeName: another
nodeSelector: {}
`),
},
}
testConflicts(t, f, tests)
}
func testConflicts(t *testing.T, f TestFieldManager, tests []testArgs) {
for i, test := range tests {
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
f.Reset()
@ -587,23 +946,34 @@ spec:
if err != nil {
t.Errorf("expected no error but got %v", err)
}
return
}
} else {
if err == nil || !apierrors.IsConflict(err) {
t.Errorf("expected to get conflicts but got %v", err)
}
if err == nil || !apierrors.IsConflict(err) {
t.Errorf("expected to get conflicts but got %v", err)
}
expectedConflicts := merge.Conflicts{}
test.expectConflictSet.Iterate(func(p fieldpath.Path) {
expectedConflicts = append(expectedConflicts, merge.Conflict{
Manager: `{"manager":"test_client_side_apply","operation":"Update","apiVersion":"apps/v1"}`,
Path: p,
expectedConflicts := merge.Conflicts{}
test.expectConflictSet.Iterate(func(p fieldpath.Path) {
expectedConflicts = append(expectedConflicts, merge.Conflict{
Manager: fmt.Sprintf(`{"manager":"test_client_side_apply","operation":"Update","apiVersion":"%s"}`, f.APIVersion()),
Path: p,
})
})
})
expectedConflictErr := internal.NewConflictError(expectedConflicts)
if !reflect.DeepEqual(expectedConflictErr, err) {
t.Errorf("expected to get\n%+v\nbut got\n%+v", expectedConflictErr, err)
expectedConflictErr := internal.NewConflictError(expectedConflicts)
if !reflect.DeepEqual(expectedConflictErr, err) {
t.Errorf("expected to get\n%+v\nbut got\n%+v", expectedConflictErr, err)
}
// Yet force should resolve all conflicts
err = f.Apply(appliedObj, test.fieldManager, true)
if err != nil {
t.Errorf("unexpected error during force ownership apply: %v", err)
}
}
// Eventually resource should contain applied changes
if !apiequality.Semantic.DeepDerivative(appliedObj, f.Get()) {
t.Errorf("expected equal resource: \n%#v, got: \n%#v", appliedObj, f.Get())
}
})
}

View File

@ -5290,6 +5290,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
elementRelationship: atomic
- name: overhead
type:
map:
@ -5673,6 +5674,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
elementRelationship: atomic
- name: template
type:
namedType: io.k8s.api.core.v1.PodTemplateSpec
@ -6195,6 +6197,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
elementRelationship: atomic
- name: sessionAffinity
type:
scalar: string
@ -8859,6 +8862,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
elementRelationship: atomic
- name: tolerations
type:
list:
@ -8911,6 +8915,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
elementRelationship: atomic
- name: tolerations
type:
list:
@ -8956,6 +8961,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
elementRelationship: atomic
- name: tolerations
type:
list: