mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 03:03:59 +00:00
get rid of e2e/framework -> k/k/pkg/kubelet dependency
It's conceptually wrong to have dependencies to k/k/pkg in the e2e framework code. They should be moved to corresponding packages, in this particular case to the test/e2e_node.
This commit is contained in:
parent
139a2c54a2
commit
867be8fc3e
@ -1,13 +1,6 @@
|
|||||||
rules:
|
rules:
|
||||||
# The core E2E framework is meant to be a normal Kubernetes client,
|
# The core E2E framework is meant to be a normal Kubernetes client,
|
||||||
# which means that it shouldn't depend on internal
|
# which means that it shouldn't depend on internal code.
|
||||||
# code. But we are not there yet, so some exceptions
|
|
||||||
# have to be allowed. Over time the list of allowed
|
|
||||||
# packages should get shorter, not longer.
|
|
||||||
- selectorRegexp: ^k8s[.]io/kubernetes/pkg/
|
|
||||||
allowedPrefixes:
|
|
||||||
- k8s.io/kubernetes/pkg/kubelet/apis/
|
|
||||||
|
|
||||||
# The following packages are okay to use:
|
# The following packages are okay to use:
|
||||||
#
|
#
|
||||||
# public API
|
# public API
|
||||||
|
@ -41,7 +41,6 @@ import (
|
|||||||
cliflag "k8s.io/component-base/cli/flag"
|
cliflag "k8s.io/component-base/cli/flag"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework/internal/junit"
|
"k8s.io/kubernetes/test/e2e/framework/internal/junit"
|
||||||
"k8s.io/kubernetes/test/utils/image"
|
"k8s.io/kubernetes/test/utils/image"
|
||||||
"k8s.io/kubernetes/test/utils/kubeconfig"
|
"k8s.io/kubernetes/test/utils/kubeconfig"
|
||||||
@ -244,8 +243,6 @@ type NodeTestContextType struct {
|
|||||||
NodeConformance bool
|
NodeConformance bool
|
||||||
// PrepullImages indicates whether node e2e framework should prepull images.
|
// PrepullImages indicates whether node e2e framework should prepull images.
|
||||||
PrepullImages bool
|
PrepullImages bool
|
||||||
// KubeletConfig is the kubelet configuration the test is running against.
|
|
||||||
KubeletConfig kubeletconfig.KubeletConfiguration
|
|
||||||
// ImageDescription is the description of the image on which the test is running.
|
// ImageDescription is the description of the image on which the test is running.
|
||||||
ImageDescription string
|
ImageDescription string
|
||||||
// RuntimeConfig is a map of API server runtime configuration values.
|
// RuntimeConfig is a map of API server runtime configuration values.
|
||||||
|
@ -248,6 +248,13 @@ var _ = ginkgo.SynchronizedBeforeSuite(func(ctx context.Context) []byte {
|
|||||||
framework.TestContext.BearerToken = string(token)
|
framework.TestContext.BearerToken = string(token)
|
||||||
// update test context with node configuration.
|
// update test context with node configuration.
|
||||||
gomega.Expect(updateTestContext(ctx)).To(gomega.Succeed(), "update test context with node config.")
|
gomega.Expect(updateTestContext(ctx)).To(gomega.Succeed(), "update test context with node config.")
|
||||||
|
|
||||||
|
// Store current Kubelet configuration in the package variable
|
||||||
|
// This assumes all tests which dynamically change kubelet configuration
|
||||||
|
// must: 1) run in serial; 2) restore kubelet configuration after test.
|
||||||
|
var err error
|
||||||
|
kubeletCfg, err = getCurrentKubeletConfig(ctx)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Tear down the kubelet on the node
|
// Tear down the kubelet on the node
|
||||||
@ -334,14 +341,6 @@ func updateTestContext(ctx context.Context) error {
|
|||||||
|
|
||||||
framework.Logf("Node name: %s", framework.TestContext.NodeName)
|
framework.Logf("Node name: %s", framework.TestContext.NodeName)
|
||||||
|
|
||||||
// Update test context with current kubelet configuration.
|
|
||||||
// This assumes all tests which dynamically change kubelet configuration
|
|
||||||
// must: 1) run in serial; 2) restore kubelet configuration after test.
|
|
||||||
kubeletCfg, err := getCurrentKubeletConfig(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get kubelet configuration: %w", err)
|
|
||||||
}
|
|
||||||
framework.TestContext.KubeletConfig = *kubeletCfg // Set kubelet config
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity,
|
|||||||
// convert the cgroup name to its literal form
|
// convert the cgroup name to its literal form
|
||||||
cgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, baseName)
|
cgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, baseName)
|
||||||
cgroupFsName := ""
|
cgroupFsName := ""
|
||||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
if kubeletCfg.CgroupDriver == "systemd" {
|
||||||
cgroupFsName = cgroupName.ToSystemd()
|
cgroupFsName = cgroupName.ToSystemd()
|
||||||
} else {
|
} else {
|
||||||
cgroupFsName = cgroupName.ToCgroupfs()
|
cgroupFsName = cgroupName.ToCgroupfs()
|
||||||
|
@ -48,7 +48,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() {
|
|||||||
staticPodName = "graceful-pod-" + string(uuid.NewUUID())
|
staticPodName = "graceful-pod-" + string(uuid.NewUUID())
|
||||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||||
|
|
||||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
podPath = kubeletCfg.StaticPodPath
|
||||||
|
|
||||||
ginkgo.By("create the static pod")
|
ginkgo.By("create the static pod")
|
||||||
err := createStaticPodWithGracePeriod(podPath, staticPodName, ns)
|
err := createStaticPodWithGracePeriod(podPath, staticPodName, ns)
|
||||||
|
@ -53,7 +53,7 @@ var _ = SIGDescribe("MirrorPod", func() {
|
|||||||
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
||||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||||
|
|
||||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
podPath = kubeletCfg.StaticPodPath
|
||||||
|
|
||||||
ginkgo.By("create the static pod")
|
ginkgo.By("create the static pod")
|
||||||
err := createStaticPod(podPath, staticPodName, ns,
|
err := createStaticPod(podPath, staticPodName, ns,
|
||||||
@ -157,7 +157,7 @@ var _ = SIGDescribe("MirrorPod", func() {
|
|||||||
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
||||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||||
|
|
||||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
podPath = kubeletCfg.StaticPodPath
|
||||||
ginkgo.By("create the static pod")
|
ginkgo.By("create the static pod")
|
||||||
err := createStaticPod(podPath, staticPodName, ns,
|
err := createStaticPod(podPath, staticPodName, ns,
|
||||||
imageutils.GetE2EImage(imageutils.Nginx), v1.RestartPolicyAlways)
|
imageutils.GetE2EImage(imageutils.Nginx), v1.RestartPolicyAlways)
|
||||||
@ -209,7 +209,7 @@ var _ = SIGDescribe("MirrorPod", func() {
|
|||||||
e2evolume.TestServerCleanup(ctx, f, nfsTestConfig)
|
e2evolume.TestServerCleanup(ctx, f, nfsTestConfig)
|
||||||
})
|
})
|
||||||
|
|
||||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
podPath = kubeletCfg.StaticPodPath
|
||||||
staticPodName = "static-pod-nfs-test-pod" + string(uuid.NewUUID())
|
staticPodName = "static-pod-nfs-test-pod" + string(uuid.NewUUID())
|
||||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *v1.Pod {
|
|||||||
// convert the cgroup name to its literal form
|
// convert the cgroup name to its literal form
|
||||||
cgroupFsName := ""
|
cgroupFsName := ""
|
||||||
cgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, baseName)
|
cgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, baseName)
|
||||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
if kubeletCfg.CgroupDriver == "systemd" {
|
||||||
cgroupFsName = cgroupName.ToSystemd()
|
cgroupFsName = cgroupName.ToSystemd()
|
||||||
} else {
|
} else {
|
||||||
cgroupFsName = cgroupName.ToCgroupfs()
|
cgroupFsName = cgroupName.ToCgroupfs()
|
||||||
|
@ -171,7 +171,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
ginkgo.Describe("QOS containers", func() {
|
ginkgo.Describe("QOS containers", func() {
|
||||||
ginkgo.Context("On enabling QOS cgroup hierarchy", func() {
|
ginkgo.Context("On enabling QOS cgroup hierarchy", func() {
|
||||||
ginkgo.It("Top level QoS containers should have been created [NodeConformance]", func(ctx context.Context) {
|
ginkgo.It("Top level QoS containers should have been created [NodeConformance]", func(ctx context.Context) {
|
||||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
if !kubeletCfg.CgroupsPerQOS {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cgroupsToVerify := []string{burstableCgroup, bestEffortCgroup}
|
cgroupsToVerify := []string{burstableCgroup, bestEffortCgroup}
|
||||||
@ -186,7 +186,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
ginkgo.Describe("Pod containers [NodeConformance]", func() {
|
ginkgo.Describe("Pod containers [NodeConformance]", func() {
|
||||||
ginkgo.Context("On scheduling a Guaranteed Pod", func() {
|
ginkgo.Context("On scheduling a Guaranteed Pod", func() {
|
||||||
ginkgo.It("Pod containers should have been created under the cgroup-root", func(ctx context.Context) {
|
ginkgo.It("Pod containers should have been created under the cgroup-root", func(ctx context.Context) {
|
||||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
if !kubeletCfg.CgroupsPerQOS {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
@ -231,7 +231,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
})
|
})
|
||||||
ginkgo.Context("On scheduling a BestEffort Pod", func() {
|
ginkgo.Context("On scheduling a BestEffort Pod", func() {
|
||||||
ginkgo.It("Pod containers should have been created under the BestEffort cgroup", func(ctx context.Context) {
|
ginkgo.It("Pod containers should have been created under the BestEffort cgroup", func(ctx context.Context) {
|
||||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
if !kubeletCfg.CgroupsPerQOS {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
@ -276,7 +276,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
|||||||
})
|
})
|
||||||
ginkgo.Context("On scheduling a Burstable Pod", func() {
|
ginkgo.Context("On scheduling a Burstable Pod", func() {
|
||||||
ginkgo.It("Pod containers should have been created under the Burstable cgroup", func(ctx context.Context) {
|
ginkgo.It("Pod containers should have been created under the Burstable cgroup", func(ctx context.Context) {
|
||||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
if !kubeletCfg.CgroupsPerQOS {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
|
@ -96,7 +96,7 @@ var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() {
|
|||||||
ginkgo.Describe("PodOverhead cgroup accounting", func() {
|
ginkgo.Describe("PodOverhead cgroup accounting", func() {
|
||||||
ginkgo.Context("On running pod with PodOverhead defined", func() {
|
ginkgo.Context("On running pod with PodOverhead defined", func() {
|
||||||
ginkgo.It("Pod cgroup should be sum of overhead and resource limits", func(ctx context.Context) {
|
ginkgo.It("Pod cgroup should be sum of overhead and resource limits", func(ctx context.Context) {
|
||||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
if !kubeletCfg.CgroupsPerQOS {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ var _ = SIGDescribe("[Feature:StandaloneMode] ", func() {
|
|||||||
ginkgo.It("the pod should be running", func(ctx context.Context) {
|
ginkgo.It("the pod should be running", func(ctx context.Context) {
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
staticPodName = "static-pod-" + string(uuid.NewUUID())
|
||||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
podPath = kubeletCfg.StaticPodPath
|
||||||
|
|
||||||
err := createBasicStaticPod(podPath, staticPodName, ns)
|
err := createBasicStaticPod(podPath, staticPodName, ns)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -67,7 +67,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFea
|
|||||||
ginkgo.By("create a static system-node-critical pod")
|
ginkgo.By("create a static system-node-critical pod")
|
||||||
staticPodName = "static-disk-hog-" + string(uuid.NewUUID())
|
staticPodName = "static-disk-hog-" + string(uuid.NewUUID())
|
||||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
podPath = kubeletCfg.StaticPodPath
|
||||||
// define a static pod consuming disk gradually
|
// define a static pod consuming disk gradually
|
||||||
// the upper limit is 1024 (iterations) * 10485760 bytes (10MB) = 10GB
|
// the upper limit is 1024 (iterations) * 10485760 bytes (10MB) = 10GB
|
||||||
err := createStaticSystemNodeCriticalPod(
|
err := createStaticSystemNodeCriticalPod(
|
||||||
|
@ -55,7 +55,7 @@ var _ = SIGDescribe("Unknown Pods [Serial] [Disruptive]", func() {
|
|||||||
staticPodName = "unknown-test-pod-" + string(uuid.NewUUID())
|
staticPodName = "unknown-test-pod-" + string(uuid.NewUUID())
|
||||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||||
|
|
||||||
podPath = framework.TestContext.KubeletConfig.StaticPodPath
|
podPath = kubeletCfg.StaticPodPath
|
||||||
|
|
||||||
framework.Logf("create the static pod %v", staticPodName)
|
framework.Logf("create the static pod %v", staticPodName)
|
||||||
err := createStaticPodWithGracePeriod(podPath, staticPodName, ns)
|
err := createStaticPodWithGracePeriod(podPath, staticPodName, ns)
|
||||||
|
@ -87,9 +87,12 @@ const (
|
|||||||
memoryManagerStateFile = "/var/lib/kubelet/memory_manager_state"
|
memoryManagerStateFile = "/var/lib/kubelet/memory_manager_state"
|
||||||
)
|
)
|
||||||
|
|
||||||
var kubeletHealthCheckURL = fmt.Sprintf("http://127.0.0.1:%d/healthz", ports.KubeletHealthzPort)
|
var (
|
||||||
|
kubeletHealthCheckURL = fmt.Sprintf("http://127.0.0.1:%d/healthz", ports.KubeletHealthzPort)
|
||||||
var containerRuntimeUnitName = ""
|
containerRuntimeUnitName = ""
|
||||||
|
// KubeletConfig is the kubelet configuration the test is running against.
|
||||||
|
kubeletCfg *kubeletconfig.KubeletConfiguration
|
||||||
|
)
|
||||||
|
|
||||||
func getNodeSummary(ctx context.Context) (*stats.Summary, error) {
|
func getNodeSummary(ctx context.Context) (*stats.Summary, error) {
|
||||||
kubeletConfig, err := getCurrentKubeletConfig(ctx)
|
kubeletConfig, err := getCurrentKubeletConfig(ctx)
|
||||||
@ -482,7 +485,7 @@ func kubeletHealthCheck(url string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func toCgroupFsName(cgroupName cm.CgroupName) string {
|
func toCgroupFsName(cgroupName cm.CgroupName) string {
|
||||||
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
|
if kubeletCfg.CgroupDriver == "systemd" {
|
||||||
return cgroupName.ToSystemd()
|
return cgroupName.ToSystemd()
|
||||||
}
|
}
|
||||||
return cgroupName.ToCgroupfs()
|
return cgroupName.ToCgroupfs()
|
||||||
|
Loading…
Reference in New Issue
Block a user