fix(test::npd): fix node problem detector test

Since the insecure port of apiserver has been disabled in e2e node tests,
we could create a service account in the test for node problem detector
and then bind the cluster role `system:node-problem-detector` with this
service account.

Signed-off-by: knight42 <anonymousknight96@gmail.com>
This commit is contained in:
knight42 2020-10-30 22:32:10 +08:00
parent f78d095d52
commit 186be6f0d2
2 changed files with 47 additions and 9 deletions

View File

@ -66,6 +66,7 @@ go_library(
] + select({ ] + select({
"@io_bazel_rules_go//go/platform:android": [ "@io_bazel_rules_go//go/platform:android": [
"//pkg/util/procfs:go_default_library", "//pkg/util/procfs:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
@ -85,6 +86,7 @@ go_library(
], ],
"@io_bazel_rules_go//go/platform:linux": [ "@io_bazel_rules_go//go/platform:linux": [
"//pkg/util/procfs:go_default_library", "//pkg/util/procfs:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",

View File

@ -25,7 +25,10 @@ import (
"path" "path"
"time" "time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -33,13 +36,11 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1" coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
) )
var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector] [Serial]", func() { var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector] [Serial]", func() {
@ -152,6 +153,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
} }
] ]
}` }`
ginkgo.By("Generate event list options") ginkgo.By("Generate event list options")
selector := fields.Set{ selector := fields.Set{
"involvedObject.kind": "Node", "involvedObject.kind": "Node",
@ -160,24 +162,54 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
"source": source, "source": source,
}.AsSelector().String() }.AsSelector().String()
eventListOptions = metav1.ListOptions{FieldSelector: selector} eventListOptions = metav1.ListOptions{FieldSelector: selector}
ginkgo.By("Create the test log file")
framework.ExpectNoError(err)
ginkgo.By("Create config map for the node problem detector") ginkgo.By("Create config map for the node problem detector")
_, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), &v1.ConfigMap{ _, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: configName}, ObjectMeta: metav1.ObjectMeta{Name: configName},
Data: map[string]string{path.Base(configFile): config}, Data: map[string]string{path.Base(configFile): config},
}, metav1.CreateOptions{}) }, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Create the service account for node problem detector")
_, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
}, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Bind the cluster role system:node-problem-detector with the service account for node problem detector")
_, err = f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: "system:node-problem-detector",
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: name,
Namespace: ns,
},
},
}, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Create the node problem detector") ginkgo.By("Create the node problem detector")
hostPathType := new(v1.HostPathType) hostPathType := new(v1.HostPathType)
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate)) *hostPathType = v1.HostPathFileOrCreate
pod := f.PodClient().CreateSync(&v1.Pod{ pod := f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
HostNetwork: true, HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{}, SecurityContext: &v1.PodSecurityContext{},
ServiceAccountName: name,
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
Name: configVolume, Name: configVolume,
@ -207,7 +239,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
{ {
Name: name, Name: name,
Image: image, Image: image,
Command: []string{"sh", "-c", "touch " + logFile + " && /node-problem-detector --logtostderr --system-log-monitors=" + configFile + fmt.Sprintf(" --apiserver-override=%s?inClusterConfig=false", framework.TestContext.Host)}, Command: []string{"sh", "-c", "touch " + logFile + " && /node-problem-detector --logtostderr --system-log-monitors=" + configFile + fmt.Sprintf(" --apiserver-override=%s?inClusterConfig=true", framework.TestContext.Host)},
Env: []v1.EnvVar{ Env: []v1.EnvVar{
{ {
Name: "NODE_NAME", Name: "NODE_NAME",
@ -378,6 +410,10 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed()) gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed())
ginkgo.By("Delete the config map") ginkgo.By("Delete the config map")
c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, metav1.DeleteOptions{}) c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, metav1.DeleteOptions{})
ginkgo.By("Delete the service account")
c.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
ginkgo.By("Delete the clusterRoleBinding")
c.RbacV1().ClusterRoleBindings().Delete(context.TODO(), name, metav1.DeleteOptions{})
ginkgo.By("Clean up the events") ginkgo.By("Clean up the events")
gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), *metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed()) gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), *metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed())
ginkgo.By("Clean up the node condition") ginkgo.By("Clean up the node condition")