mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #55103 from ConnorDoyle/remove-oir
Automatic merge from submit-queue (batch tested with PRs 55103, 56036, 56186). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Removed opaque integer resources (deprecated in v1.8) **What this PR does / why we need it**: * Remove opaque integer resources (OIR) support from the code base. This feature was deprecated in v1.8 and replaced by Extended Resources (ER). **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #55102 **Release note**: ```release-note Remove opaque integer resources (OIR) support (deprecated in v1.8.) ```
This commit is contained in:
commit
2a18a2aadf
@ -147,10 +147,9 @@ func IsStandardContainerResourceName(str string) bool {
|
||||
}
|
||||
|
||||
// IsExtendedResourceName returns true if the resource name is not in the
|
||||
// default namespace, or it has the opaque integer resource prefix.
|
||||
// default namespace.
|
||||
func IsExtendedResourceName(name core.ResourceName) bool {
|
||||
// TODO: Remove OIR part following deprecation.
|
||||
return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name)
|
||||
return !IsDefaultNamespaceResource(name)
|
||||
}
|
||||
|
||||
// IsDefaultNamespaceResource returns true if the resource name is in the
|
||||
@ -161,22 +160,6 @@ func IsDefaultNamespaceResource(name core.ResourceName) bool {
|
||||
strings.Contains(string(name), core.ResourceDefaultNamespacePrefix)
|
||||
}
|
||||
|
||||
// IsOpaqueIntResourceName returns true if the resource name has the opaque
|
||||
// integer resource prefix.
|
||||
func IsOpaqueIntResourceName(name core.ResourceName) bool {
|
||||
return strings.HasPrefix(string(name), core.ResourceOpaqueIntPrefix)
|
||||
}
|
||||
|
||||
// OpaqueIntResourceName returns a ResourceName with the canonical opaque
|
||||
// integer prefix prepended. If the argument already has the prefix, it is
|
||||
// returned unmodified.
|
||||
func OpaqueIntResourceName(name string) core.ResourceName {
|
||||
if IsOpaqueIntResourceName(core.ResourceName(name)) {
|
||||
return core.ResourceName(name)
|
||||
}
|
||||
return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceOpaqueIntPrefix, name))
|
||||
}
|
||||
|
||||
var overcommitBlacklist = sets.NewString(string(core.ResourceNvidiaGPU))
|
||||
|
||||
// IsOvercommitAllowed returns true if the resource is in the default
|
||||
|
@ -3553,8 +3553,6 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
// Namespace prefix for opaque counted resources (alpha).
|
||||
ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-"
|
||||
// Default namespace prefix.
|
||||
ResourceDefaultNamespacePrefix = "kubernetes.io/"
|
||||
// Name prefix for huge page resources (alpha).
|
||||
|
@ -30,10 +30,9 @@ import (
|
||||
)
|
||||
|
||||
// IsExtendedResourceName returns true if the resource name is not in the
|
||||
// default namespace, or it has the opaque integer resource prefix.
|
||||
// default namespace.
|
||||
func IsExtendedResourceName(name v1.ResourceName) bool {
|
||||
// TODO: Remove OIR part following deprecation.
|
||||
return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name)
|
||||
return !IsDefaultNamespaceResource(name)
|
||||
}
|
||||
|
||||
// IsDefaultNamespaceResource returns true if the resource name is in the
|
||||
@ -69,22 +68,6 @@ func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, erro
|
||||
return resource.ParseQuantity(pageSize)
|
||||
}
|
||||
|
||||
// IsOpaqueIntResourceName returns true if the resource name has the opaque
|
||||
// integer resource prefix.
|
||||
func IsOpaqueIntResourceName(name v1.ResourceName) bool {
|
||||
return strings.HasPrefix(string(name), v1.ResourceOpaqueIntPrefix)
|
||||
}
|
||||
|
||||
// OpaqueIntResourceName returns a ResourceName with the canonical opaque
|
||||
// integer prefix prepended. If the argument already has the prefix, it is
|
||||
// returned unmodified.
|
||||
func OpaqueIntResourceName(name string) v1.ResourceName {
|
||||
if IsOpaqueIntResourceName(v1.ResourceName(name)) {
|
||||
return v1.ResourceName(name)
|
||||
}
|
||||
return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceOpaqueIntPrefix, name))
|
||||
}
|
||||
|
||||
var overcommitBlacklist = sets.NewString(string(v1.ResourceNvidiaGPU))
|
||||
|
||||
// IsOvercommitAllowed returns true if the resource is in the default
|
||||
|
@ -152,63 +152,6 @@ func TestIsOvercommitAllowed(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestIsOpaqueIntResourceName(t *testing.T) { // resourceName input with the correct OpaqueIntResourceName prefix ("pod.alpha.kubernetes.io/opaque-int-resource-") should pass
|
||||
testCases := []struct {
|
||||
resourceName v1.ResourceName
|
||||
expectVal bool
|
||||
}{
|
||||
{
|
||||
resourceName: "pod.alpha.kubernetes.io/opaque-int-resource-foo",
|
||||
expectVal: true, // resourceName should pass because the resourceName has the correct prefix.
|
||||
},
|
||||
{
|
||||
resourceName: "foo",
|
||||
expectVal: false, // resourceName should fail because the resourceName has the wrong prefix.
|
||||
},
|
||||
{
|
||||
resourceName: "",
|
||||
expectVal: false, // resourceName should fail, empty resourceName.
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("resourceName input=%s, expected value=%v", tc.resourceName, tc.expectVal), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := IsOpaqueIntResourceName(tc.resourceName)
|
||||
if v != tc.expectVal {
|
||||
t.Errorf("Got %v but expected %v", v, tc.expectVal)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpaqueIntResourceName(t *testing.T) { // each output should have the correct appended prefix ("pod.alpha.kubernetes.io/opaque-int-resource-") for opaque counted resources.
|
||||
testCases := []struct {
|
||||
name string
|
||||
expectVal v1.ResourceName
|
||||
}{
|
||||
{
|
||||
name: "foo",
|
||||
expectVal: "pod.alpha.kubernetes.io/opaque-int-resource-foo", // append prefix to input string foo
|
||||
},
|
||||
{
|
||||
name: "",
|
||||
expectVal: "pod.alpha.kubernetes.io/opaque-int-resource-", // append prefix to input empty string
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("name input=%s, expected value=%s", tc.name, tc.expectVal), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := OpaqueIntResourceName(tc.name)
|
||||
if v != tc.expectVal {
|
||||
t.Errorf("Got %v but expected %v", v, tc.expectVal)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddToNodeAddresses(t *testing.T) {
|
||||
testCases := []struct {
|
||||
|
@ -1,19 +1,13 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["validation.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/apis/core/v1/validation",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
@ -22,6 +16,18 @@ go_library(
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validation_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/apis/core/v1/validation",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
@ -33,16 +39,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validation_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/apis/core/v1/validation",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
@ -20,8 +20,6 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -105,12 +103,6 @@ func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) f
|
||||
// Validate compute resource typename.
|
||||
// Refer to docs/design/resources.md for more details.
|
||||
func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
|
||||
// Opaque integer resources (OIR) deprecation began in v1.8
|
||||
// TODO: Remove warning after OIR deprecation cycle.
|
||||
if v1helper.IsOpaqueIntResourceName(v1.ResourceName(value)) {
|
||||
glog.Errorf("DEPRECATION WARNING! Opaque integer resources are deprecated starting with v1.8: %s", value)
|
||||
}
|
||||
|
||||
allErrs := field.ErrorList{}
|
||||
for _, msg := range validation.IsQualifiedName(value) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
|
||||
|
@ -3944,12 +3944,6 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
|
||||
// Validate compute resource typename.
|
||||
// Refer to docs/design/resources.md for more details.
|
||||
func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
|
||||
// Opaque integer resources (OIR) deprecation began in v1.8
|
||||
// TODO: Remove warning after OIR deprecation cycle.
|
||||
if helper.IsOpaqueIntResourceName(core.ResourceName(value)) {
|
||||
glog.Errorf("DEPRECATION WARNING! Opaque integer resources are deprecated starting with v1.8: %s", value)
|
||||
}
|
||||
|
||||
allErrs := field.ErrorList{}
|
||||
for _, msg := range validation.IsQualifiedName(value) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
|
||||
|
@ -6088,20 +6088,20 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // valid opaque integer resources for init container
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-opaque-int", Namespace: "ns"},
|
||||
{ // valid extended resources for init container
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-extended", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
InitContainers: []core.Container{
|
||||
{
|
||||
Name: "valid-opaque-int",
|
||||
Name: "valid-extended",
|
||||
Image: "image",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("10"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("10"),
|
||||
},
|
||||
Limits: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("20"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("10"),
|
||||
},
|
||||
},
|
||||
TerminationMessagePolicy: "File",
|
||||
@ -6112,21 +6112,21 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // valid opaque integer resources for regular container
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-opaque-int", Namespace: "ns"},
|
||||
{ // valid extended resources for regular container
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-extended", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
InitContainers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: "valid-opaque-int",
|
||||
Name: "valid-extended",
|
||||
Image: "image",
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("10"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("10"),
|
||||
},
|
||||
Limits: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("20"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("10"),
|
||||
},
|
||||
},
|
||||
TerminationMessagePolicy: "File",
|
||||
@ -6738,8 +6738,8 @@ func TestValidatePod(t *testing.T) {
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
},
|
||||
"invalid opaque integer resource requirement: request must be <= limit": {
|
||||
expectedError: "must be less than or equal to pod.alpha.kubernetes.io/opaque-int-resource-A",
|
||||
"invalid extended resource requirement: request must be == limit": {
|
||||
expectedError: "must be equal to example.com/a",
|
||||
spec: core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
@ -6750,10 +6750,10 @@ func TestValidatePod(t *testing.T) {
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("2"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("2"),
|
||||
},
|
||||
Limits: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("1"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -6763,7 +6763,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"invalid fractional opaque integer resource in container request": {
|
||||
"invalid fractional extended resource in container request": {
|
||||
expectedError: "must be an integer",
|
||||
spec: core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
@ -6775,7 +6775,7 @@ func TestValidatePod(t *testing.T) {
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("500m"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("500m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -6785,7 +6785,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"invalid fractional opaque integer resource in init container request": {
|
||||
"invalid fractional extended resource in init container request": {
|
||||
expectedError: "must be an integer",
|
||||
spec: core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
@ -6797,7 +6797,7 @@ func TestValidatePod(t *testing.T) {
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("500m"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("500m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -6808,7 +6808,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"invalid fractional opaque integer resource in container limit": {
|
||||
"invalid fractional extended resource in container limit": {
|
||||
expectedError: "must be an integer",
|
||||
spec: core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
@ -6820,10 +6820,10 @@ func TestValidatePod(t *testing.T) {
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("5"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("5"),
|
||||
},
|
||||
Limits: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("2.5"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("2.5"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -6833,7 +6833,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"invalid fractional opaque integer resource in init container limit": {
|
||||
"invalid fractional extended resource in init container limit": {
|
||||
expectedError: "must be an integer",
|
||||
spec: core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
@ -6845,10 +6845,10 @@ func TestValidatePod(t *testing.T) {
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("5"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("2.5"),
|
||||
},
|
||||
Limits: core.ResourceList{
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("2.5"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("2.5"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -9611,55 +9611,55 @@ func TestValidateNodeUpdate(t *testing.T) {
|
||||
}, false},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-opaque-int-resources",
|
||||
Name: "valid-extended-resources",
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-opaque-int-resources",
|
||||
Name: "valid-extended-resources",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName(core.ResourceCPU): resource.MustParse("10"),
|
||||
core.ResourceName(core.ResourceMemory): resource.MustParse("10G"),
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("5"),
|
||||
helper.OpaqueIntResourceName("B"): resource.MustParse("10"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("5"),
|
||||
core.ResourceName("example.com/b"): resource.MustParse("10"),
|
||||
},
|
||||
},
|
||||
}, true},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "invalid-fractional-opaque-int-capacity",
|
||||
Name: "invalid-fractional-extended-capacity",
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "invalid-fractional-opaque-int-capacity",
|
||||
Name: "invalid-fractional-extended-capacity",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName(core.ResourceCPU): resource.MustParse("10"),
|
||||
core.ResourceName(core.ResourceMemory): resource.MustParse("10G"),
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("500m"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("500m"),
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "invalid-fractional-opaque-int-allocatable",
|
||||
Name: "invalid-fractional-extended-allocatable",
|
||||
},
|
||||
}, core.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "invalid-fractional-opaque-int-allocatable",
|
||||
Name: "invalid-fractional-extended-allocatable",
|
||||
},
|
||||
Status: core.NodeStatus{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceName(core.ResourceCPU): resource.MustParse("10"),
|
||||
core.ResourceName(core.ResourceMemory): resource.MustParse("10G"),
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("5"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("5"),
|
||||
},
|
||||
Allocatable: core.ResourceList{
|
||||
core.ResourceName(core.ResourceCPU): resource.MustParse("10"),
|
||||
core.ResourceName(core.ResourceMemory): resource.MustParse("10G"),
|
||||
helper.OpaqueIntResourceName("A"): resource.MustParse("4.5"),
|
||||
core.ResourceName("example.com/a"): resource.MustParse("4.5"),
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
|
@ -75,32 +75,32 @@ func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.Pe
|
||||
}
|
||||
|
||||
var (
|
||||
opaqueResourceA = v1helper.OpaqueIntResourceName("AAA")
|
||||
opaqueResourceB = v1helper.OpaqueIntResourceName("BBB")
|
||||
extendedResourceA = v1.ResourceName("example.com/aaa")
|
||||
extendedResourceB = v1.ResourceName("example.com/bbb")
|
||||
hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
|
||||
)
|
||||
|
||||
func makeResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage, hugePageA int64) v1.NodeResources {
|
||||
func makeResources(milliCPU, memory, nvidiaGPUs, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
|
||||
return v1.NodeResources{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI),
|
||||
opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI),
|
||||
extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
|
||||
hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage, hugePageA int64) v1.ResourceList {
|
||||
func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
|
||||
return v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||
v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI),
|
||||
opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI),
|
||||
extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
|
||||
hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
|
||||
}
|
||||
@ -240,99 +240,99 @@ func TestPodFitsResources(t *testing.T) {
|
||||
test: "equal edge case for init container",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
|
||||
pod: newResourcePod(schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})),
|
||||
fits: true,
|
||||
test: "opaque resource fits",
|
||||
test: "extended resource fits",
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})),
|
||||
fits: true,
|
||||
test: "opaque resource fits for init container",
|
||||
test: "extended resource fits for init container",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
fits: false,
|
||||
test: "opaque resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)},
|
||||
test: "extended resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
fits: false,
|
||||
test: "opaque resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)},
|
||||
test: "extended resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
fits: false,
|
||||
test: "opaque resource allocatable enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)},
|
||||
test: "extended resource allocatable enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
fits: false,
|
||||
test: "opaque resource allocatable enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)},
|
||||
test: "extended resource allocatable enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
fits: false,
|
||||
test: "opaque resource allocatable enforced for multiple containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)},
|
||||
test: "extended resource allocatable enforced for multiple containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
fits: true,
|
||||
test: "opaque resource allocatable admits multiple init containers",
|
||||
test: "extended resource allocatable admits multiple init containers",
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 6}},
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
fits: false,
|
||||
test: "opaque resource allocatable enforced for multiple init containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)},
|
||||
test: "extended resource allocatable enforced for multiple init containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
test: "opaque resource allocatable enforced for unknown resource",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceB, 1, 0, 0)},
|
||||
test: "extended resource allocatable enforced for unknown resource",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
test: "opaque resource allocatable enforced for unknown resource for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceB, 1, 0, 0)},
|
||||
test: "extended resource allocatable enforced for unknown resource for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
|
@ -1,10 +1,4 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -15,6 +9,7 @@ go_library(
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
@ -35,7 +30,6 @@ go_test(
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//plugin/pkg/scheduler/util:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
@ -58,4 +52,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util"
|
||||
)
|
||||
@ -53,9 +52,9 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-nonzero", "", "", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "oir-foo:3", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "oir-foo:5", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "random-invalid-oir-key:100", []v1.ContainerPort{{}}),
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "example.com/foo:3", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "example.com/foo:5", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||
makeBasePod(t, nodeName, "test", "100m", "500", "random-invalid-extended-key:100", []v1.ContainerPort{{}}),
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@ -113,7 +112,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
requestedResource: &Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
ScalarResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 3},
|
||||
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3},
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
MilliCPU: 100,
|
||||
@ -129,7 +128,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
requestedResource: &Resource{
|
||||
MilliCPU: 300,
|
||||
Memory: 1524,
|
||||
ScalarResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 8},
|
||||
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8},
|
||||
},
|
||||
nonzeroRequest: &Resource{
|
||||
MilliCPU: 300,
|
||||
@ -689,7 +688,7 @@ func TestNodeOperators(t *testing.T) {
|
||||
mem_100m := resource.MustParse("100m")
|
||||
cpu_half := resource.MustParse("500m")
|
||||
mem_50m := resource.MustParse("50m")
|
||||
resourceFooName := "pod.alpha.kubernetes.io/opaque-int-resource-foo"
|
||||
resourceFooName := "example.com/foo"
|
||||
resourceFoo := resource.MustParse("1")
|
||||
|
||||
tests := []struct {
|
||||
@ -896,25 +895,19 @@ type testingMode interface {
|
||||
Fatalf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func makeBasePod(t testingMode, nodeName, objName, cpu, mem, oir string, ports []v1.ContainerPort) *v1.Pod {
|
||||
func makeBasePod(t testingMode, nodeName, objName, cpu, mem, extended string, ports []v1.ContainerPort) *v1.Pod {
|
||||
req := v1.ResourceList{}
|
||||
if cpu != "" {
|
||||
req = v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(cpu),
|
||||
v1.ResourceMemory: resource.MustParse(mem),
|
||||
}
|
||||
if oir != "" {
|
||||
if len(strings.Split(oir, ":")) != 2 {
|
||||
t.Fatalf("Invalid OIR string")
|
||||
if extended != "" {
|
||||
parts := strings.Split(extended, ":")
|
||||
if len(parts) != 2 {
|
||||
t.Fatalf("Invalid extended resource string: \"%s\"", extended)
|
||||
}
|
||||
var name v1.ResourceName
|
||||
if strings.Split(oir, ":")[0] != "random-invalid-oir-key" {
|
||||
name = v1helper.OpaqueIntResourceName(strings.Split(oir, ":")[0])
|
||||
} else {
|
||||
name = v1.ResourceName(strings.Split(oir, ":")[0])
|
||||
}
|
||||
quantity := resource.MustParse(strings.Split(oir, ":")[1])
|
||||
req[name] = quantity
|
||||
req[v1.ResourceName(parts[0])] = resource.MustParse(parts[1])
|
||||
}
|
||||
}
|
||||
return &v1.Pod{
|
||||
|
@ -3981,8 +3981,6 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
// Namespace prefix for opaque counted resources (alpha).
|
||||
ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-"
|
||||
// Default namespace prefix.
|
||||
ResourceDefaultNamespacePrefix = "kubernetes.io/"
|
||||
// Name prefix for huge page resources (alpha).
|
||||
|
@ -1,10 +1,4 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -14,7 +8,6 @@ go_library(
|
||||
"framework.go",
|
||||
"limit_range.go",
|
||||
"nvidia-gpus.go",
|
||||
"opaque_resource.go",
|
||||
"predicates.go",
|
||||
"preemption.go",
|
||||
"priorities.go",
|
||||
@ -22,13 +15,12 @@ go_library(
|
||||
"resource_quota.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/scheduling",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/quota/evaluator/core:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
@ -45,7 +37,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
@ -54,25 +45,11 @@ go_library(
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["taints_test.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/scheduling",
|
||||
library = ":go_default_library",
|
||||
tags = ["e2e"],
|
||||
deps = [
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@ -87,3 +64,17 @@ go_test(
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
@ -1,305 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduling
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Opaque resources [Feature:OpaqueResources]", func() {
|
||||
f := framework.NewDefaultFramework("opaque-resource")
|
||||
opaqueResName := v1helper.OpaqueIntResourceName("foo")
|
||||
var node *v1.Node
|
||||
|
||||
BeforeEach(func() {
|
||||
if node == nil {
|
||||
// Priming invocation; select the first non-master node.
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, n := range nodes.Items {
|
||||
if !system.IsMasterNode(n.Name) {
|
||||
node = &n
|
||||
break
|
||||
}
|
||||
}
|
||||
if node == nil {
|
||||
framework.Failf("unable to select a non-master node")
|
||||
}
|
||||
}
|
||||
|
||||
addOpaqueResource(f, node.Name, opaqueResName)
|
||||
})
|
||||
|
||||
// TODO: The suite times out if removeOpaqueResource is called as part of
|
||||
// an AfterEach closure. For now, it is the last statement in each
|
||||
// It block.
|
||||
// AfterEach(func() {
|
||||
// removeOpaqueResource(f, node.Name, opaqueResName)
|
||||
// })
|
||||
|
||||
It("should not break pods that do not consume opaque integer resources.", func() {
|
||||
defer removeOpaqueResource(f, node.Name, opaqueResName)
|
||||
|
||||
By("Creating a vanilla pod")
|
||||
requests := v1.ResourceList{v1.ResourceCPU: resource.MustParse("0.1")}
|
||||
limits := v1.ResourceList{v1.ResourceCPU: resource.MustParse("0.2")}
|
||||
pod := f.NewTestPod("without-oir", requests, limits)
|
||||
|
||||
By("Observing an event that indicates the pod was scheduled")
|
||||
action := func() error {
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
return err
|
||||
}
|
||||
// Here we don't check for the bound node name since it can land on
|
||||
// any one (this pod doesn't require any of the opaque resource.)
|
||||
predicate := scheduleSuccessEvent(pod.Name, "")
|
||||
success, err := common.ObserveEventAfterAction(f, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
})
|
||||
|
||||
It("should schedule pods that do consume opaque integer resources.", func() {
|
||||
defer removeOpaqueResource(f, node.Name, opaqueResName)
|
||||
|
||||
By("Creating a pod that requires less of the opaque resource than is allocatable on a node.")
|
||||
requests := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0.1"),
|
||||
opaqueResName: resource.MustParse("1"),
|
||||
}
|
||||
limits := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0.2"),
|
||||
opaqueResName: resource.MustParse("2"),
|
||||
}
|
||||
pod := f.NewTestPod("min-oir", requests, limits)
|
||||
|
||||
By("Observing an event that indicates the pod was scheduled")
|
||||
action := func() error {
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
return err
|
||||
}
|
||||
predicate := scheduleSuccessEvent(pod.Name, node.Name)
|
||||
success, err := common.ObserveEventAfterAction(f, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
})
|
||||
|
||||
It("should not schedule pods that exceed the available amount of opaque integer resource.", func() {
|
||||
defer removeOpaqueResource(f, node.Name, opaqueResName)
|
||||
|
||||
By("Creating a pod that requires more of the opaque resource than is allocatable on any node")
|
||||
requests := v1.ResourceList{opaqueResName: resource.MustParse("6")}
|
||||
limits := v1.ResourceList{}
|
||||
|
||||
By("Observing an event that indicates the pod was not scheduled")
|
||||
action := func() error {
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(f.NewTestPod("over-max-oir", requests, limits))
|
||||
return err
|
||||
}
|
||||
predicate := scheduleFailureEvent("over-max-oir")
|
||||
success, err := common.ObserveEventAfterAction(f, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
})
|
||||
|
||||
It("should account opaque integer resources in pods with multiple containers.", func() {
|
||||
defer removeOpaqueResource(f, node.Name, opaqueResName)
|
||||
|
||||
By("Creating a pod with two containers that together require less of the opaque resource than is allocatable on a node")
|
||||
requests := v1.ResourceList{opaqueResName: resource.MustParse("1")}
|
||||
limits := v1.ResourceList{}
|
||||
image := framework.GetPauseImageName(f.ClientSet)
|
||||
// This pod consumes 2 "foo" resources.
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mult-container-oir",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: image,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: requests,
|
||||
Limits: limits,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "pause-sidecar",
|
||||
Image: image,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: requests,
|
||||
Limits: limits,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("Observing an event that indicates the pod was scheduled")
|
||||
action := func() error {
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
return err
|
||||
}
|
||||
predicate := scheduleSuccessEvent(pod.Name, node.Name)
|
||||
success, err := common.ObserveEventAfterAction(f, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
|
||||
By("Creating a pod with two containers that together require more of the opaque resource than is allocatable on any node")
|
||||
requests = v1.ResourceList{opaqueResName: resource.MustParse("3")}
|
||||
limits = v1.ResourceList{}
|
||||
// This pod consumes 6 "foo" resources.
|
||||
pod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mult-container-over-max-oir",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: image,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: requests,
|
||||
Limits: limits,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "pause-sidecar",
|
||||
Image: image,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: requests,
|
||||
Limits: limits,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("Observing an event that indicates the pod was not scheduled")
|
||||
action = func() error {
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
return err
|
||||
}
|
||||
predicate = scheduleFailureEvent(pod.Name)
|
||||
success, err = common.ObserveEventAfterAction(f, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
})
|
||||
|
||||
It("should schedule pods that initially do not fit after enough opaque integer resources are freed.", func() {
|
||||
defer removeOpaqueResource(f, node.Name, opaqueResName)
|
||||
|
||||
By("Creating a pod that requires less of the opaque resource than is allocatable on a node.")
|
||||
requests := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0.1"),
|
||||
opaqueResName: resource.MustParse("3"),
|
||||
}
|
||||
limits := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0.2"),
|
||||
opaqueResName: resource.MustParse("3"),
|
||||
}
|
||||
pod1 := f.NewTestPod("oir-1", requests, limits)
|
||||
pod2 := f.NewTestPod("oir-2", requests, limits)
|
||||
|
||||
By("Observing an event that indicates one pod was scheduled")
|
||||
action := func() error {
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod1)
|
||||
return err
|
||||
}
|
||||
predicate := scheduleSuccessEvent(pod1.Name, node.Name)
|
||||
success, err := common.ObserveEventAfterAction(f, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
|
||||
By("Observing an event that indicates a subsequent pod was not scheduled")
|
||||
action = func() error {
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
|
||||
return err
|
||||
}
|
||||
predicate = scheduleFailureEvent(pod2.Name)
|
||||
success, err = common.ObserveEventAfterAction(f, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
|
||||
By("Observing an event that indicates the second pod was scheduled after deleting the first pod")
|
||||
action = func() error {
|
||||
err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod1.Name, nil)
|
||||
return err
|
||||
}
|
||||
predicate = scheduleSuccessEvent(pod2.Name, node.Name)
|
||||
success, err = common.ObserveEventAfterAction(f, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
})
|
||||
})
|
||||
|
||||
// Adds the opaque resource to a node.
|
||||
func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1.ResourceName) {
|
||||
action := func() error {
|
||||
By(fmt.Sprintf("Adding OIR to node [%s]", nodeName))
|
||||
patch := []byte(fmt.Sprintf(`[{"op": "add", "path": "/status/capacity/%s", "value": "5"}]`, escapeForJSONPatch(opaqueResName)))
|
||||
return f.ClientSet.CoreV1().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do().Error()
|
||||
}
|
||||
predicate := func(n *v1.Node) bool {
|
||||
capacity, foundCap := n.Status.Capacity[opaqueResName]
|
||||
allocatable, foundAlloc := n.Status.Allocatable[opaqueResName]
|
||||
By(fmt.Sprintf("Node [%s] has OIR capacity: [%t] (%s), has OIR allocatable: [%t] (%s)", n.Name, foundCap, capacity.String(), foundAlloc, allocatable.String()))
|
||||
return foundCap && capacity.MilliValue() == int64(5000) &&
|
||||
foundAlloc && allocatable.MilliValue() == int64(5000)
|
||||
}
|
||||
success, err := common.ObserveNodeUpdateAfterAction(f, nodeName, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
}
|
||||
|
||||
// Removes the opaque resource from a node.
|
||||
func removeOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1.ResourceName) {
|
||||
action := func() error {
|
||||
By(fmt.Sprintf("Removing OIR from node [%s]", nodeName))
|
||||
patch := []byte(fmt.Sprintf(`[{"op": "remove", "path": "/status/capacity/%s"}]`, escapeForJSONPatch(opaqueResName)))
|
||||
f.ClientSet.CoreV1().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do()
|
||||
return nil // Ignore error -- the opaque resource may not exist.
|
||||
}
|
||||
predicate := func(n *v1.Node) bool {
|
||||
capacity, foundCap := n.Status.Capacity[opaqueResName]
|
||||
allocatable, foundAlloc := n.Status.Allocatable[opaqueResName]
|
||||
By(fmt.Sprintf("Node [%s] has OIR capacity: [%t] (%s), has OIR allocatable: [%t] (%s)", n.Name, foundCap, capacity.String(), foundAlloc, allocatable.String()))
|
||||
return (!foundCap || capacity.IsZero()) && (!foundAlloc || allocatable.IsZero())
|
||||
}
|
||||
success, err := common.ObserveNodeUpdateAfterAction(f, nodeName, predicate, action)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(success).To(Equal(true))
|
||||
}
|
||||
|
||||
func escapeForJSONPatch(resName v1.ResourceName) string {
|
||||
// Escape forward slashes in the resource name per the JSON Pointer spec.
|
||||
// See https://tools.ietf.org/html/rfc6901#section-3
|
||||
return strings.Replace(string(resName), "/", "~1", -1)
|
||||
}
|
Loading…
Reference in New Issue
Block a user