mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 03:33:56 +00:00
Merge pull request #82094 from tallclair/runtime-class-admission
Enable the RuntimeClass admission controller on GCE & CI
This commit is contained in:
commit
7a7b8a7305
@ -356,7 +356,7 @@ fi
|
|||||||
CUSTOM_INGRESS_YAML="${CUSTOM_INGRESS_YAML:-}"
|
CUSTOM_INGRESS_YAML="${CUSTOM_INGRESS_YAML:-}"
|
||||||
|
|
||||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||||
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection
|
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,RuntimeClass
|
||||||
|
|
||||||
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
||||||
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
||||||
|
@ -387,7 +387,7 @@ fi
|
|||||||
CUSTOM_INGRESS_YAML="${CUSTOM_INGRESS_YAML:-}"
|
CUSTOM_INGRESS_YAML="${CUSTOM_INGRESS_YAML:-}"
|
||||||
|
|
||||||
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
|
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
|
||||||
ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,PersistentVolumeClaimResize"
|
ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,PersistentVolumeClaimResize,RuntimeClass"
|
||||||
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
||||||
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
||||||
fi
|
fi
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -33,6 +34,7 @@ import (
|
|||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -49,14 +51,13 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
|
|||||||
|
|
||||||
ginkgo.It("should reject a Pod requesting a non-existent RuntimeClass", func() {
|
ginkgo.It("should reject a Pod requesting a non-existent RuntimeClass", func() {
|
||||||
rcName := f.Namespace.Name + "-nonexistent"
|
rcName := f.Namespace.Name + "-nonexistent"
|
||||||
pod := createRuntimeClassPod(f, rcName)
|
expectPodRejection(f, newRuntimeClassPod(rcName))
|
||||||
expectSandboxFailureEvent(f, pod, fmt.Sprintf("\"%s\" not found", rcName))
|
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", func() {
|
ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", func() {
|
||||||
handler := f.Namespace.Name + "-handler"
|
handler := f.Namespace.Name + "-handler"
|
||||||
rcName := createRuntimeClass(f, "unconfigured-handler", handler)
|
rcName := createRuntimeClass(f, "unconfigured-handler", handler)
|
||||||
pod := createRuntimeClassPod(f, rcName)
|
pod := f.PodClient().Create(newRuntimeClassPod(rcName))
|
||||||
expectSandboxFailureEvent(f, pod, handler)
|
expectSandboxFailureEvent(f, pod, handler)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -69,7 +70,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
rcName := createRuntimeClass(f, "preconfigured-handler", handler)
|
rcName := createRuntimeClass(f, "preconfigured-handler", handler)
|
||||||
pod := createRuntimeClassPod(f, rcName)
|
pod := f.PodClient().Create(newRuntimeClassPod(rcName))
|
||||||
expectPodSuccess(f, pod)
|
expectPodSuccess(f, pod)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -94,8 +95,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
|
|||||||
}))
|
}))
|
||||||
})
|
})
|
||||||
|
|
||||||
pod := createRuntimeClassPod(f, rcName)
|
expectPodRejection(f, newRuntimeClassPod(rcName))
|
||||||
expectSandboxFailureEvent(f, pod, fmt.Sprintf("\"%s\" not found", rcName))
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -109,9 +109,9 @@ func createRuntimeClass(f *framework.Framework, name, handler string) string {
|
|||||||
return rc.GetName()
|
return rc.GetName()
|
||||||
}
|
}
|
||||||
|
|
||||||
// createRuntimeClass creates a test pod with the given runtimeClassName.
|
// newRuntimeClassPod generates a test pod with the given runtimeClassName.
|
||||||
func createRuntimeClassPod(f *framework.Framework, runtimeClassName string) *v1.Pod {
|
func newRuntimeClassPod(runtimeClassName string) *v1.Pod {
|
||||||
pod := &v1.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
GenerateName: fmt.Sprintf("test-runtimeclass-%s-", runtimeClassName),
|
GenerateName: fmt.Sprintf("test-runtimeclass-%s-", runtimeClassName),
|
||||||
},
|
},
|
||||||
@ -126,7 +126,19 @@ func createRuntimeClassPod(f *framework.Framework, runtimeClassName string) *v1.
|
|||||||
AutomountServiceAccountToken: utilpointer.BoolPtr(false),
|
AutomountServiceAccountToken: utilpointer.BoolPtr(false),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return f.PodClient().Create(pod)
|
}
|
||||||
|
|
||||||
|
func expectPodRejection(f *framework.Framework, pod *v1.Pod) {
|
||||||
|
// The Node E2E doesn't run the RuntimeClass admission controller, so we expect the rejection to
|
||||||
|
// happen by the Kubelet.
|
||||||
|
if framework.TestContext.NodeE2E {
|
||||||
|
pod = f.PodClient().Create(pod)
|
||||||
|
expectSandboxFailureEvent(f, pod, fmt.Sprintf("\"%s\" not found", *pod.Spec.RuntimeClassName))
|
||||||
|
} else {
|
||||||
|
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
|
framework.ExpectError(err, "should be forbidden")
|
||||||
|
gomega.Expect(apierrs.IsForbidden(err)).To(gomega.BeTrue(), "should be forbidden error")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// expectPodSuccess waits for the given pod to terminate successfully.
|
// expectPodSuccess waits for the given pod to terminate successfully.
|
||||||
|
Loading…
Reference in New Issue
Block a user