api: resource.k8s.io PodScheduling -> PodSchedulingContext

The name "PodScheduling" was unusual because in contrast to most other names,
it was impossible to put an article in front of it. Now PodSchedulingContext is
used instead.
This commit is contained in:
Patrick Ohly
2023-03-06 20:57:35 +01:00
parent ee18f60252
commit fec5233668
24 changed files with 675 additions and 669 deletions

View File

@@ -64,7 +64,7 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`,
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulings.
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulingcontexts.
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`,
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
// standard for []metav1.Condition
@@ -88,10 +88,10 @@ var noConflicts = map[string]struct{}{
// namespaces only have a spec.finalizers field which is also skipped,
// thus it will never have a conflict.
"namespaces": {},
// podschedulings.status only has a list which contains items with a list,
// podschedulingcontexts.status only has a list which contains items with a list,
// therefore apply works because it simply merges either the outer or
// the inner list.
"podschedulings": {},
"podschedulingcontexts": {},
}
var image2 = image.GetE2EImage(image.Etcd)
@@ -148,7 +148,7 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{
gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`,
gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"spec": {"selectedNode": "node2name"}}`,
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"spec": {"selectedNode": "node2name"}}`,
gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): `{"driverName": "other.example.com"}`,
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test.
gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`,

View File

@@ -54,7 +54,7 @@ var statusData = map[schema.GroupVersionResource]string{
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": true}}`,
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "example.com"}}`,
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
// standard for []metav1.Condition

View File

@@ -469,9 +469,9 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
Stub: `{"metadata": {"name": "claimtemplate1name"}, "spec": {"spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}}`,
ExpectedEtcdPath: "/registry/resourceclaimtemplates/" + namespace + "/claimtemplate1name",
},
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): {
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): {
Stub: `{"metadata": {"name": "pod1name"}, "spec": {"selectedNode": "node1name", "potentialNodes": ["node1name", "node2name"]}}`,
ExpectedEtcdPath: "/registry/podschedulings/" + namespace + "/pod1name",
ExpectedEtcdPath: "/registry/podschedulingcontexts/" + namespace + "/pod1name",
},
// --