mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
test(runtimeclass): add e2e tests for runtimeclass scheduling
This commit is contained in:
parent
11678fb1c0
commit
702646df24
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -37,15 +37,6 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// PreconfiguredRuntimeHandler is the name of the runtime handler that is expected to be
|
||||
// preconfigured in the test environment.
|
||||
PreconfiguredRuntimeHandler = "test-handler"
|
||||
// DockerRuntimeHandler is a hardcoded runtime handler that is accepted by dockershim, and
|
||||
// treated equivalently to a nil runtime handler.
|
||||
DockerRuntimeHandler = "docker"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
|
||||
f := framework.NewDefaultFramework("runtimeclass")
|
||||
|
||||
@ -64,10 +55,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
|
||||
// This test requires that the PreconfiguredRuntimeHandler has already been set up on nodes.
|
||||
ginkgo.It("should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]", func() {
|
||||
// The built-in docker runtime does not support configuring runtime handlers.
|
||||
handler := PreconfiguredRuntimeHandler
|
||||
if framework.TestContext.ContainerRuntime == "docker" {
|
||||
handler = DockerRuntimeHandler
|
||||
}
|
||||
handler := framework.PreconfiguredRuntimeClassHandler()
|
||||
|
||||
rcName := createRuntimeClass(f, "preconfigured-handler", handler)
|
||||
pod := f.PodClient().Create(newRuntimeClassPod(rcName))
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -884,3 +884,18 @@ func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
|
||||
writer.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// preconfiguredRuntimeHandler is the name of the runtime handler that is expected to be
|
||||
// preconfigured in the test environment.
|
||||
preconfiguredRuntimeHandler = "test-handler"
|
||||
)
|
||||
|
||||
// PreconfiguredRuntimeClassHandler returns configured runtime handler.
|
||||
func PreconfiguredRuntimeClassHandler() string {
|
||||
if TestContext.ContainerRuntime == "docker" {
|
||||
return TestContext.ContainerRuntime
|
||||
}
|
||||
|
||||
return preconfiguredRuntimeHandler
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ go_library(
|
||||
"pod_gc.go",
|
||||
"pods.go",
|
||||
"pre_stop.go",
|
||||
"runtimeclass.go",
|
||||
"security_context.go",
|
||||
"ssh.go",
|
||||
"ttlafterfinished.go",
|
||||
@ -22,10 +23,13 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/runtimeclass/testing:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/node/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
@ -46,10 +50,12 @@ go_library(
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
143
test/e2e/node/runtimeclass.go
Normal file
143
test/e2e/node/runtimeclass.go
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/node/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
|
||||
f := framework.NewDefaultFramework("runtimeclass")
|
||||
|
||||
ginkgo.It("should reject a Pod requesting a RuntimeClass with conflicting node selector", func() {
|
||||
scheduling := &v1beta1.Scheduling{
|
||||
NodeSelector: map[string]string{
|
||||
"foo": "conflict",
|
||||
},
|
||||
}
|
||||
|
||||
runtimeClass := newRuntimeClass(f.Namespace.Name, "conflict-runtimeclass")
|
||||
runtimeClass.Scheduling = scheduling
|
||||
rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(runtimeClass)
|
||||
framework.ExpectNoError(err, "failed to create RuntimeClass resource")
|
||||
|
||||
pod := newRuntimeClassPod(rc.GetName())
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
framework.ExpectError(err, "should be forbidden")
|
||||
gomega.Expect(apierrs.IsForbidden(err)).To(gomega.BeTrue(), "should be forbidden error")
|
||||
})
|
||||
|
||||
ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling [NodeFeature:RuntimeHandler] ", func() {
|
||||
nodeName := scheduling.GetNodeThatCanRunPod(f)
|
||||
nodeSelector := map[string]string{
|
||||
"foo": "bar",
|
||||
"fizz": "buzz",
|
||||
}
|
||||
tolerations := []v1.Toleration{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "bar",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
scheduling := &v1beta1.Scheduling{
|
||||
NodeSelector: nodeSelector,
|
||||
Tolerations: tolerations,
|
||||
}
|
||||
|
||||
ginkgo.By("Trying to apply a label on the found node.")
|
||||
for key, value := range nodeSelector {
|
||||
framework.AddOrUpdateLabelOnNode(f.ClientSet, nodeName, key, value)
|
||||
framework.ExpectNodeHasLabel(f.ClientSet, nodeName, key, value)
|
||||
defer framework.RemoveLabelOffNode(f.ClientSet, nodeName, key)
|
||||
}
|
||||
|
||||
ginkgo.By("Trying to apply taint on the found node.")
|
||||
taint := v1.Taint{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
framework.AddOrUpdateTaintOnNode(f.ClientSet, nodeName, taint)
|
||||
framework.ExpectNodeHasTaint(f.ClientSet, nodeName, &taint)
|
||||
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, taint)
|
||||
|
||||
ginkgo.By("Trying to create runtimeclass and pod")
|
||||
runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass")
|
||||
runtimeClass.Scheduling = scheduling
|
||||
rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(runtimeClass)
|
||||
framework.ExpectNoError(err, "failed to create RuntimeClass resource")
|
||||
|
||||
pod := newRuntimeClassPod(rc.GetName())
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
pod = f.PodClient().Create(pod)
|
||||
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name))
|
||||
|
||||
// check that pod got scheduled on specified node.
|
||||
scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(nodeName, scheduledPod.Spec.NodeName)
|
||||
framework.ExpectEqual(nodeSelector, pod.Spec.NodeSelector)
|
||||
gomega.Expect(pod.Spec.Tolerations).To(gomega.ContainElement(tolerations[0]))
|
||||
})
|
||||
})
|
||||
|
||||
// newRuntimeClass returns a test runtime class.
|
||||
func newRuntimeClass(namespace, name string) *v1beta1.RuntimeClass {
|
||||
uniqueName := fmt.Sprintf("%s-%s", namespace, name)
|
||||
return runtimeclasstest.NewRuntimeClass(uniqueName, framework.PreconfiguredRuntimeClassHandler())
|
||||
}
|
||||
|
||||
// newRuntimeClassPod returns a test pod with the given runtimeClassName.
|
||||
func newRuntimeClassPod(runtimeClassName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("test-runtimeclass-%s-", runtimeClassName),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RuntimeClassName: &runtimeClassName,
|
||||
Containers: []v1.Container{{
|
||||
Name: "test",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"true"},
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
AutomountServiceAccountToken: utilpointer.BoolPtr(false),
|
||||
},
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user