mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 07:27:21 +00:00
e2e: adapt to moved code
This is the result of automatically editing source files like this:
go install golang.org/x/tools/cmd/goimports@latest
find ./test/e2e* -name "*.go" | xargs env PATH=$GOPATH/bin:$PATH ./e2e-framework-sed.sh
with e2e-framework-sed.sh containing this:
sed -i \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainer(/e2epod.ExecCommandInContainer(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainerWithFullOutput(/e2epod.ExecCommandInContainerWithFullOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInContainer(/e2epod.ExecShellInContainer(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPod(/e2epod.ExecShellInPod(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPodWithFullOutput(/e2epod.ExecShellInPodWithFullOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecWithOptions(/e2epod.ExecWithOptions(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.MatchContainerOutput(/e2eoutput.MatchContainerOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClient(/e2epod.NewPodClient(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClientNS(/e2epod.PodClientNS(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutput(/e2eoutput.TestContainerOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutputRegexp(/e2eoutput.TestContainerOutputRegexp(\1, /" \
-e "s/framework.AddOrUpdateLabelOnNode\b/e2enode.AddOrUpdateLabelOnNode/" \
-e "s/framework.AllNodes\b/e2edebug.AllNodes/" \
-e "s/framework.AllNodesReady\b/e2enode.AllNodesReady/" \
-e "s/framework.ContainerResourceGatherer\b/e2edebug.ContainerResourceGatherer/" \
-e "s/framework.ContainerResourceUsage\b/e2edebug.ContainerResourceUsage/" \
-e "s/framework.CreateEmptyFileOnPod\b/e2eoutput.CreateEmptyFileOnPod/" \
-e "s/framework.DefaultPodDeletionTimeout\b/e2epod.DefaultPodDeletionTimeout/" \
-e "s/framework.DumpAllNamespaceInfo\b/e2edebug.DumpAllNamespaceInfo/" \
-e "s/framework.DumpDebugInfo\b/e2eoutput.DumpDebugInfo/" \
-e "s/framework.DumpNodeDebugInfo\b/e2edebug.DumpNodeDebugInfo/" \
-e "s/framework.EtcdUpgrade\b/e2eproviders.EtcdUpgrade/" \
-e "s/framework.EventsLister\b/e2edebug.EventsLister/" \
-e "s/framework.ExecOptions\b/e2epod.ExecOptions/" \
-e "s/framework.ExpectNodeHasLabel\b/e2enode.ExpectNodeHasLabel/" \
-e "s/framework.ExpectNodeHasTaint\b/e2enode.ExpectNodeHasTaint/" \
-e "s/framework.GCEUpgradeScript\b/e2eproviders.GCEUpgradeScript/" \
-e "s/framework.ImagePrePullList\b/e2epod.ImagePrePullList/" \
-e "s/framework.KubectlBuilder\b/e2ekubectl.KubectlBuilder/" \
-e "s/framework.LocationParamGKE\b/e2eproviders.LocationParamGKE/" \
-e "s/framework.LogSizeDataTimeseries\b/e2edebug.LogSizeDataTimeseries/" \
-e "s/framework.LogSizeGatherer\b/e2edebug.LogSizeGatherer/" \
-e "s/framework.LogsSizeData\b/e2edebug.LogsSizeData/" \
-e "s/framework.LogsSizeDataSummary\b/e2edebug.LogsSizeDataSummary/" \
-e "s/framework.LogsSizeVerifier\b/e2edebug.LogsSizeVerifier/" \
-e "s/framework.LookForStringInLog\b/e2eoutput.LookForStringInLog/" \
-e "s/framework.LookForStringInPodExec\b/e2eoutput.LookForStringInPodExec/" \
-e "s/framework.LookForStringInPodExecToContainer\b/e2eoutput.LookForStringInPodExecToContainer/" \
-e "s/framework.MasterAndDNSNodes\b/e2edebug.MasterAndDNSNodes/" \
-e "s/framework.MasterNodes\b/e2edebug.MasterNodes/" \
-e "s/framework.MasterUpgradeGKE\b/e2eproviders.MasterUpgradeGKE/" \
-e "s/framework.NewKubectlCommand\b/e2ekubectl.NewKubectlCommand/" \
-e "s/framework.NewLogsVerifier\b/e2edebug.NewLogsVerifier/" \
-e "s/framework.NewNodeKiller\b/e2enode.NewNodeKiller/" \
-e "s/framework.NewResourceUsageGatherer\b/e2edebug.NewResourceUsageGatherer/" \
-e "s/framework.NodeHasTaint\b/e2enode.NodeHasTaint/" \
-e "s/framework.NodeKiller\b/e2enode.NodeKiller/" \
-e "s/framework.NodesSet\b/e2edebug.NodesSet/" \
-e "s/framework.PodClient\b/e2epod.PodClient/" \
-e "s/framework.RemoveLabelOffNode\b/e2enode.RemoveLabelOffNode/" \
-e "s/framework.ResourceConstraint\b/e2edebug.ResourceConstraint/" \
-e "s/framework.ResourceGathererOptions\b/e2edebug.ResourceGathererOptions/" \
-e "s/framework.ResourceUsagePerContainer\b/e2edebug.ResourceUsagePerContainer/" \
-e "s/framework.ResourceUsageSummary\b/e2edebug.ResourceUsageSummary/" \
-e "s/framework.RunHostCmd\b/e2eoutput.RunHostCmd/" \
-e "s/framework.RunHostCmdOrDie\b/e2eoutput.RunHostCmdOrDie/" \
-e "s/framework.RunHostCmdWithFullOutput\b/e2eoutput.RunHostCmdWithFullOutput/" \
-e "s/framework.RunHostCmdWithRetries\b/e2eoutput.RunHostCmdWithRetries/" \
-e "s/framework.RunKubectl\b/e2ekubectl.RunKubectl/" \
-e "s/framework.RunKubectlInput\b/e2ekubectl.RunKubectlInput/" \
-e "s/framework.RunKubectlOrDie\b/e2ekubectl.RunKubectlOrDie/" \
-e "s/framework.RunKubectlOrDieInput\b/e2ekubectl.RunKubectlOrDieInput/" \
-e "s/framework.RunKubectlWithFullOutput\b/e2ekubectl.RunKubectlWithFullOutput/" \
-e "s/framework.RunKubemciCmd\b/e2ekubectl.RunKubemciCmd/" \
-e "s/framework.RunKubemciWithKubeconfig\b/e2ekubectl.RunKubemciWithKubeconfig/" \
-e "s/framework.SingleContainerSummary\b/e2edebug.SingleContainerSummary/" \
-e "s/framework.SingleLogSummary\b/e2edebug.SingleLogSummary/" \
-e "s/framework.TimestampedSize\b/e2edebug.TimestampedSize/" \
-e "s/framework.WaitForAllNodesSchedulable\b/e2enode.WaitForAllNodesSchedulable/" \
-e "s/framework.WaitForSSHTunnels\b/e2enode.WaitForSSHTunnels/" \
-e "s/framework.WorkItem\b/e2edebug.WorkItem/" \
"$@"
for i in "$@"; do
# Import all sub packages and let goimports figure out which of those
# are redundant (= already imported) or not needed.
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2enode "k8s.io/kubernetes/test/e2e/framework/node"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2epod "k8s.io/kubernetes/test/e2e/framework/pod"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"' "$i"
goimports -w "$i"
done
This commit is contained in:
@@ -161,11 +161,11 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.
|
||||
w := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return f.PodClient().List(context.TODO(), options)
|
||||
return e2epod.NewPodClient(f).List(context.TODO(), options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return f.PodClient().Watch(context.TODO(), options)
|
||||
return e2epod.NewPodClient(f).Watch(context.TODO(), options)
|
||||
},
|
||||
}
|
||||
preconditionFunc := func(store cache.Store) (bool, error) {
|
||||
@@ -202,7 +202,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
p, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
p, err := e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return p.Status
|
||||
}
|
||||
@@ -224,7 +224,7 @@ func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
return f.PodClient().Create(pod)
|
||||
return e2epod.NewPodClient(f).Create(pod)
|
||||
}
|
||||
|
||||
func expectSoftRejection(status v1.PodStatus) {
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
@@ -68,7 +69,7 @@ var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", fu
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
|
||||
ginkgo.It("will checkpoint a container out of a pod", func() {
|
||||
ginkgo.By("creating a target pod")
|
||||
podClient := f.PodClient()
|
||||
podClient := e2epod.NewPodClient(f)
|
||||
pod := podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "checkpoint-container-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubelogs "k8s.io/kubernetes/pkg/kubelet/logs"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
@@ -70,7 +71,7 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func()
|
||||
},
|
||||
},
|
||||
}
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
ginkgo.By("get container log path")
|
||||
framework.ExpectEqual(len(pod.Status.ContainerStatuses), 1)
|
||||
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
@@ -106,7 +107,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
|
||||
framework.ExpectNoError(err, "failed to list all pause processes on the node")
|
||||
existingPausePIDSet := sets.NewInt(existingPausePIDs...)
|
||||
|
||||
podClient := f.PodClient()
|
||||
podClient := e2epod.NewPodClient(f)
|
||||
podName := "besteffort" + string(uuid.NewUUID())
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -173,7 +174,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
|
||||
})
|
||||
})
|
||||
ginkgo.It("guaranteed container's oom-score-adj should be -998", func() {
|
||||
podClient := f.PodClient()
|
||||
podClient := e2epod.NewPodClient(f)
|
||||
podName := "guaranteed" + string(uuid.NewUUID())
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -214,7 +215,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
|
||||
|
||||
})
|
||||
ginkgo.It("burstable container's oom-score-adj should be between [2, 1000)", func() {
|
||||
podClient := f.PodClient()
|
||||
podClient := e2epod.NewPodClient(f)
|
||||
podName := "burstable" + string(uuid.NewUUID())
|
||||
podClient.Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -89,7 +89,7 @@ func deletePodSyncByName(f *framework.Framework, podName string) {
|
||||
delOpts := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout)
|
||||
e2epod.NewPodClient(f).DeleteSync(podName, delOpts, e2epod.DefaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
func deletePods(f *framework.Framework, podNames []string) {
|
||||
@@ -243,7 +243,7 @@ func runGuPodTest(f *framework.Framework, cpuCount int) {
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
// any full CPU is fine - we cannot nor we should predict which one, though
|
||||
@@ -279,7 +279,7 @@ func runNonGuPodTest(f *framework.Framework, cpuCap int64) {
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
|
||||
@@ -287,7 +287,7 @@ func runNonGuPodTest(f *framework.Framework, cpuCap int64) {
|
||||
if cpuCap == 1 {
|
||||
expAllowedCPUsListRegex = "^0\n$"
|
||||
}
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
@@ -313,7 +313,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64
|
||||
},
|
||||
}
|
||||
pod1 = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
pod1 = f.PodClient().CreateSync(pod1)
|
||||
pod1 = e2epod.NewPodClient(f).CreateSync(pod1)
|
||||
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
@@ -323,7 +323,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64
|
||||
},
|
||||
}
|
||||
pod2 = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
||||
pod2 = f.PodClient().CreateSync(pod2)
|
||||
pod2 = e2epod.NewPodClient(f).CreateSync(pod2)
|
||||
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpu1 = 1
|
||||
@@ -337,7 +337,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64
|
||||
}
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
|
||||
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
err = e2epod.NewPodClient(f).MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod1.Spec.Containers[0].Name, pod1.Name)
|
||||
|
||||
@@ -347,7 +347,7 @@ func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64
|
||||
cpuListString = fmt.Sprintf("%s", cset.Difference(cpuset.NewCPUSet(cpu1)))
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
|
||||
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
err = e2epod.NewPodClient(f).MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod2.Spec.Containers[0].Name, pod2.Name)
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
@@ -372,7 +372,7 @@ func runMultipleCPUGuPod(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpuListString = "1-2"
|
||||
@@ -394,7 +394,7 @@ func runMultipleCPUGuPod(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
@@ -423,7 +423,7 @@ func runMultipleCPUContainersGuPod(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpu1, cpu2 = 1, 2
|
||||
@@ -445,11 +445,11 @@ func runMultipleCPUContainersGuPod(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d|%d\n$", cpu1, cpu2)
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[1].Name, expAllowedCPUsListRegex)
|
||||
err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[1].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[1].Name, pod.Name)
|
||||
|
||||
@@ -475,7 +475,7 @@ func runMultipleGuPods(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
pod1 = makeCPUManagerPod("gu-pod1", ctnAttrs)
|
||||
pod1 = f.PodClient().CreateSync(pod1)
|
||||
pod1 = e2epod.NewPodClient(f).CreateSync(pod1)
|
||||
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
@@ -485,7 +485,7 @@ func runMultipleGuPods(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs)
|
||||
pod2 = f.PodClient().CreateSync(pod2)
|
||||
pod2 = e2epod.NewPodClient(f).CreateSync(pod2)
|
||||
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpu1, cpu2 = 1, 2
|
||||
@@ -507,12 +507,12 @@ func runMultipleGuPods(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
|
||||
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
err = e2epod.NewPodClient(f).MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod1.Spec.Containers[0].Name, pod1.Name)
|
||||
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu2)
|
||||
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
err = e2epod.NewPodClient(f).MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod2.Spec.Containers[0].Name, pod2.Name)
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
@@ -594,7 +594,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("gu-pod-testremove", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpu1 = 1
|
||||
@@ -608,7 +608,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
err = e2epod.NewPodClient(f).MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
@@ -691,7 +691,7 @@ func runSMTAlignmentNegativeTests(f *framework.Framework) {
|
||||
}
|
||||
pod := makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
// CreateSync would wait for pod to become Ready - which will never happen if production code works as intended!
|
||||
pod = f.PodClient().Create(pod)
|
||||
pod = e2epod.NewPodClient(f).Create(pod)
|
||||
|
||||
err := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Phase != v1.PodPending {
|
||||
@@ -700,7 +700,7 @@ func runSMTAlignmentNegativeTests(f *framework.Framework) {
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if pod.Status.Phase != v1.PodFailed {
|
||||
@@ -731,7 +731,7 @@ func runSMTAlignmentPositiveTests(f *framework.Framework, smtLevel int) {
|
||||
},
|
||||
}
|
||||
pod := makeCPUManagerPod("gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
|
||||
for _, cnt := range pod.Spec.Containers {
|
||||
ginkgo.By(fmt.Sprintf("validating the container %s on Gu pod %s", cnt.Name, pod.Name))
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
@@ -72,8 +73,8 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
|
||||
}, node)
|
||||
|
||||
// Create pods, starting with non-critical so that the critical preempts the other pods.
|
||||
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
|
||||
f.PodClientNS(kubeapi.NamespaceSystem).CreateSync(criticalPod)
|
||||
e2epod.NewPodClient(f).CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
|
||||
e2epod.PodClientNS(f, kubeapi.NamespaceSystem).CreateSync(criticalPod)
|
||||
|
||||
// Check that non-critical pods other than the besteffort have been evicted
|
||||
updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
@@ -88,10 +89,10 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
// Delete Pods
|
||||
f.PodClient().DeleteSync(guaranteedPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(burstablePodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(bestEffortPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
e2epod.NewPodClient(f).DeleteSync(guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
||||
e2epod.NewPodClient(f).DeleteSync(burstablePodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
||||
e2epod.NewPodClient(f).DeleteSync(bestEffortPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
||||
e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(criticalPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
||||
// Log Events
|
||||
logPodEvents(f)
|
||||
logNodeEvents(f)
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
@@ -69,7 +70,7 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() {
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
// Start a standalone cadvisor pod using 'createSync', the pod is running when it returns
|
||||
f.PodClient().CreateSync(getCadvisorPod())
|
||||
e2epod.NewPodClient(f).CreateSync(getCadvisorPod())
|
||||
// Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with
|
||||
// 1s housingkeeping interval
|
||||
rc = NewResourceCollector(containerStatsPollingPeriod)
|
||||
@@ -426,7 +427,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
||||
ginkgo.By("Creating a batch of background pods")
|
||||
|
||||
// CreatBatch is synchronized, all pods are running when it returns
|
||||
f.PodClient().CreateBatch(bgPods)
|
||||
e2epod.NewPodClient(f).CreateBatch(bgPods)
|
||||
|
||||
time.Sleep(sleepBeforeCreatePods)
|
||||
|
||||
@@ -453,7 +454,7 @@ func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, inter
|
||||
for i := range pods {
|
||||
pod := pods[i]
|
||||
createTimes[pod.ObjectMeta.Name] = metav1.Now()
|
||||
go f.PodClient().Create(pod)
|
||||
go e2epod.NewPodClient(f).Create(pod)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
return createTimes
|
||||
@@ -546,7 +547,7 @@ func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod, podType st
|
||||
for _, pod := range pods {
|
||||
create := metav1.Now()
|
||||
createTimes[pod.Name] = create
|
||||
p := f.PodClient().Create(pod)
|
||||
p := e2epod.NewPodClient(f).Create(pod)
|
||||
framework.ExpectNoError(wait.PollImmediate(2*time.Second, framework.PodStartTimeout, podWatchedRunning(watchTimes, p.Name)))
|
||||
e2eLags = append(e2eLags,
|
||||
e2emetrics.PodLatencyData{Name: pod.Name, Latency: watchTimes[pod.Name].Time.Sub(create.Time)})
|
||||
|
||||
@@ -85,7 +85,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur
|
||||
podName := "gu-pod-rec-pre-1"
|
||||
framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
|
||||
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
|
||||
// now we need to simulate a node drain, so we remove all the pods, including the sriov device plugin.
|
||||
|
||||
@@ -131,7 +131,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur
|
||||
framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
|
||||
pod = makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
|
||||
|
||||
pod = f.PodClient().Create(pod)
|
||||
pod = e2epod.NewPodClient(f).Create(pod)
|
||||
err = e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Phase != v1.PodPending {
|
||||
return true, nil
|
||||
@@ -139,7 +139,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if pod.Status.Phase != v1.PodFailed {
|
||||
@@ -205,7 +205,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur
|
||||
podName := "gu-pod-rec-pre-1"
|
||||
framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
|
||||
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
|
||||
// now we need to simulate a node drain, so we remove all the pods, including the sriov device plugin.
|
||||
|
||||
|
||||
@@ -123,7 +123,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
}
|
||||
}
|
||||
dptemplate = dp.DeepCopy()
|
||||
devicePluginPod = f.PodClient().CreateSync(dp)
|
||||
devicePluginPod = e2epod.NewPodClient(f).CreateSync(dp)
|
||||
|
||||
ginkgo.By("Waiting for devices to become available on the local node")
|
||||
gomega.Eventually(func() bool {
|
||||
@@ -143,10 +143,10 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("Deleting the device plugin pod")
|
||||
f.PodClient().DeleteSync(devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute)
|
||||
|
||||
ginkgo.By("Deleting any Pods created by the test")
|
||||
l, err := f.PodClient().List(context.TODO(), metav1.ListOptions{})
|
||||
l, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, p := range l.Items {
|
||||
if p.Namespace != f.Namespace.Name {
|
||||
@@ -154,7 +154,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
}
|
||||
|
||||
framework.Logf("Deleting pod: %s", p.Name)
|
||||
f.PodClient().DeleteSync(p.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(p.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
}
|
||||
|
||||
restartKubelet(true)
|
||||
@@ -170,7 +170,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
|
||||
ginkgo.It("Can schedule a pod that requires a device", func() {
|
||||
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60"
|
||||
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
deviceIDRE := "stub devices: (Dev-[0-9]+)"
|
||||
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
|
||||
@@ -225,12 +225,12 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
|
||||
ginkgo.It("Keeps device plugin assignments across pod and kubelet restarts", func() {
|
||||
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60"
|
||||
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
deviceIDRE := "stub devices: (Dev-[0-9]+)"
|
||||
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
|
||||
|
||||
pod1, err := f.PodClient().Get(context.TODO(), pod1.Name, metav1.GetOptions{})
|
||||
pod1, err := e2epod.NewPodClient(f).Get(context.TODO(), pod1.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
@@ -243,7 +243,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
restartKubelet(true)
|
||||
|
||||
ginkgo.By("Wait for node to be ready again")
|
||||
framework.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute)
|
||||
e2enode.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute)
|
||||
|
||||
ginkgo.By("Validating that assignment is kept")
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
@@ -254,30 +254,30 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
|
||||
ginkgo.It("Keeps device plugin assignments after the device plugin has been re-registered", func() {
|
||||
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60"
|
||||
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
deviceIDRE := "stub devices: (Dev-[0-9]+)"
|
||||
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
|
||||
|
||||
pod1, err := f.PodClient().Get(context.TODO(), pod1.Name, metav1.GetOptions{})
|
||||
pod1, err := e2epod.NewPodClient(f).Get(context.TODO(), pod1.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Restarting Kubelet")
|
||||
restartKubelet(true)
|
||||
|
||||
ginkgo.By("Wait for node to be ready again")
|
||||
framework.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute)
|
||||
e2enode.WaitForAllNodesSchedulable(f.ClientSet, 5*time.Minute)
|
||||
|
||||
ginkgo.By("Re-Register resources and delete the plugin pod")
|
||||
gp := int64(0)
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(devicePluginPod.Name, deleteOptions, time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(devicePluginPod.Name, deleteOptions, time.Minute)
|
||||
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
||||
|
||||
ginkgo.By("Recreating the plugin pod")
|
||||
devicePluginPod = f.PodClient().CreateSync(dptemplate)
|
||||
devicePluginPod = e2epod.NewPodClient(f).CreateSync(dptemplate)
|
||||
|
||||
ginkgo.By("Confirming that after a kubelet and pod restart, fake-device assignment is kept")
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
@@ -293,7 +293,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||
|
||||
ginkgo.By("Creating another pod")
|
||||
pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
pod2 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD))
|
||||
|
||||
ginkgo.By("Checking that pod got a different fake device")
|
||||
devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
||||
@@ -331,13 +331,13 @@ func makeBusyboxPod(resourceName, cmd string) *v1.Pod {
|
||||
func ensurePodContainerRestart(f *framework.Framework, podName string, contName string) {
|
||||
var initialCount int32
|
||||
var currentCount int32
|
||||
p, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
p, err := e2epod.NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
||||
framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
|
||||
}
|
||||
initialCount = p.Status.ContainerStatuses[0].RestartCount
|
||||
gomega.Eventually(func() bool {
|
||||
p, err = f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
p, err = e2epod.NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@@ -532,7 +533,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
for _, spec := range testSpecs {
|
||||
pods = append(pods, spec.pod)
|
||||
}
|
||||
f.PodClient().CreateBatch(pods)
|
||||
e2epod.NewPodClient(f).CreateBatch(pods)
|
||||
})
|
||||
|
||||
ginkgo.It("should eventually evict all of the correct pods", func() {
|
||||
@@ -603,7 +604,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
ginkgo.By("deleting pods")
|
||||
for _, spec := range testSpecs {
|
||||
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
||||
f.PodClient().DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute)
|
||||
}
|
||||
|
||||
// In case a test fails before verifying that NodeCondition no longer exist on the node,
|
||||
@@ -631,7 +632,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
|
||||
ginkgo.By("making sure we can start a new pod after the test")
|
||||
podName := "test-admit-pod"
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
e2epod.NewPodClient(f).CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
@@ -173,7 +174,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
||||
ginkgo.Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
realPods := getPods(test.testPods)
|
||||
f.PodClient().CreateBatch(realPods)
|
||||
e2epod.NewPodClient(f).CreateBatch(realPods)
|
||||
ginkgo.By("Making sure all containers restart the specified number of times")
|
||||
gomega.Eventually(func() error {
|
||||
for _, podSpec := range test.testPods {
|
||||
@@ -248,7 +249,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
||||
ginkgo.AfterEach(func() {
|
||||
for _, pod := range test.testPods {
|
||||
ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName))
|
||||
f.PodClient().DeleteSync(pod.podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
e2epod.NewPodClient(f).DeleteSync(pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
ginkgo.By("Making sure all containers get cleaned up")
|
||||
|
||||
@@ -323,7 +323,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H
|
||||
ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func() {
|
||||
ginkgo.By("getting mounts for the test pod")
|
||||
command := []string{"mount"}
|
||||
out := f.ExecCommandInContainer(testpod.Name, testpod.Spec.Containers[0].Name, command...)
|
||||
out := e2epod.ExecCommandInContainer(f, testpod.Name, testpod.Spec.Containers[0].Name, command...)
|
||||
|
||||
for _, mount := range mounts {
|
||||
ginkgo.By(fmt.Sprintf("checking that the hugetlb mount %s exists under the container", mount.MountPath))
|
||||
@@ -337,7 +337,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H
|
||||
resourceToCgroup[resourceName],
|
||||
)
|
||||
ginkgo.By("checking if the expected hugetlb settings were applied")
|
||||
f.PodClient().Create(verifyPod)
|
||||
e2epod.NewPodClient(f).Create(verifyPod)
|
||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@@ -356,13 +356,13 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H
|
||||
pod := getHugepagesTestPod(f, limits, mounts, volumes)
|
||||
|
||||
ginkgo.By("by running a test pod that requests hugepages")
|
||||
testpod = f.PodClient().CreateSync(pod)
|
||||
testpod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
})
|
||||
|
||||
// we should use JustAfterEach because framework will teardown the client under the AfterEach method
|
||||
ginkgo.JustAfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name))
|
||||
f.PodClient().DeleteSync(testpod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(testpod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
|
||||
releaseHugepages()
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
@@ -30,10 +31,10 @@ import (
|
||||
var _ = SIGDescribe("ImageCredentialProvider [Feature:KubeletCredentialProviders]", func() {
|
||||
f := framework.NewDefaultFramework("image-credential-provider")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
var podClient *framework.PodClient
|
||||
var podClient *e2epod.PodClient
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
podClient = e2epod.NewPodClient(f)
|
||||
})
|
||||
|
||||
/*
|
||||
|
||||
@@ -18,7 +18,8 @@ package e2enode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
@@ -51,11 +52,11 @@ var _ = SIGDescribe("ImageID [NodeFeature: ImageID]", func() {
|
||||
},
|
||||
}
|
||||
|
||||
pod := f.PodClient().Create(podDesc)
|
||||
pod := e2epod.NewPodClient(f).Create(podDesc)
|
||||
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(
|
||||
f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
runningPod, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
runningPod, err := e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
status := runningPod.Status
|
||||
|
||||
@@ -31,9 +31,9 @@ import (
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
|
||||
e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@@ -68,34 +68,34 @@ var NodePrePullImageList = sets.NewString(
|
||||
imageutils.GetE2EImage(imageutils.Etcd),
|
||||
)
|
||||
|
||||
// updateImageAllowList updates the framework.ImagePrePullList with
|
||||
// updateImageAllowList updates the e2epod.ImagePrePullList with
|
||||
// 1. the hard coded lists
|
||||
// 2. the ones passed in from framework.TestContext.ExtraEnvs
|
||||
// So this function needs to be called after the extra envs are applied.
|
||||
func updateImageAllowList() {
|
||||
// Union NodePrePullImageList and PrePulledImages into the framework image pre-pull list.
|
||||
framework.ImagePrePullList = NodePrePullImageList.Union(commontest.PrePulledImages)
|
||||
e2epod.ImagePrePullList = NodePrePullImageList.Union(commontest.PrePulledImages)
|
||||
// Images from extra envs
|
||||
framework.ImagePrePullList.Insert(getNodeProblemDetectorImage())
|
||||
e2epod.ImagePrePullList.Insert(getNodeProblemDetectorImage())
|
||||
if sriovDevicePluginImage, err := getSRIOVDevicePluginImage(); err != nil {
|
||||
klog.Errorln(err)
|
||||
} else {
|
||||
framework.ImagePrePullList.Insert(sriovDevicePluginImage)
|
||||
e2epod.ImagePrePullList.Insert(sriovDevicePluginImage)
|
||||
}
|
||||
if gpuDevicePluginImage, err := getGPUDevicePluginImage(); err != nil {
|
||||
klog.Errorln(err)
|
||||
} else {
|
||||
framework.ImagePrePullList.Insert(gpuDevicePluginImage)
|
||||
e2epod.ImagePrePullList.Insert(gpuDevicePluginImage)
|
||||
}
|
||||
if kubeVirtPluginImage, err := getKubeVirtDevicePluginImage(); err != nil {
|
||||
klog.Errorln(err)
|
||||
} else {
|
||||
framework.ImagePrePullList.Insert(kubeVirtPluginImage)
|
||||
e2epod.ImagePrePullList.Insert(kubeVirtPluginImage)
|
||||
}
|
||||
if samplePluginImage, err := getSampleDevicePluginImage(); err != nil {
|
||||
klog.Errorln(err)
|
||||
} else {
|
||||
framework.ImagePrePullList.Insert(samplePluginImage)
|
||||
e2epod.ImagePrePullList.Insert(samplePluginImage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ func PrePullAllImages() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
images := framework.ImagePrePullList.List()
|
||||
images := e2epod.ImagePrePullList.List()
|
||||
klog.V(4).Infof("Pre-pulling images with %s %+v", puller.Name(), images)
|
||||
|
||||
imageCh := make(chan int, len(images))
|
||||
|
||||
@@ -39,7 +39,7 @@ const (
|
||||
var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-container-log-path")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
var podClient *framework.PodClient
|
||||
var podClient *e2epod.PodClient
|
||||
|
||||
ginkgo.Describe("Pod with a container", func() {
|
||||
ginkgo.Context("printed log to stdout", func() {
|
||||
@@ -117,7 +117,7 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() {
|
||||
|
||||
var logPodName string
|
||||
ginkgo.BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
podClient = e2epod.NewPodClient(f)
|
||||
logPodName = "log-pod-" + string(uuid.NewUUID())
|
||||
err := createAndWaitPod(makeLogPod(logPodName, logString))
|
||||
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)
|
||||
|
||||
@@ -351,7 +351,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
ginkgo.JustAfterEach(func() {
|
||||
// delete the test pod
|
||||
if testPod != nil && testPod.Name != "" {
|
||||
f.PodClient().DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
}
|
||||
|
||||
// release hugepages
|
||||
@@ -441,7 +441,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
|
||||
ginkgo.It("should succeed to start the pod", func() {
|
||||
ginkgo.By("Running the test pod")
|
||||
testPod = f.PodClient().CreateSync(testPod)
|
||||
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
|
||||
|
||||
// it no taste to verify NUMA pinning when the node has only one NUMA node
|
||||
if !*isMultiNUMASupported {
|
||||
@@ -466,7 +466,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
|
||||
ginkgo.It("should succeed to start the pod", func() {
|
||||
ginkgo.By("Running the test pod")
|
||||
testPod = f.PodClient().CreateSync(testPod)
|
||||
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
|
||||
|
||||
// it no taste to verify NUMA pinning when the node has only one NUMA node
|
||||
if !*isMultiNUMASupported {
|
||||
@@ -497,10 +497,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
|
||||
ginkgo.It("should succeed to start all pods", func() {
|
||||
ginkgo.By("Running the test pod and the test pod 2")
|
||||
testPod = f.PodClient().CreateSync(testPod)
|
||||
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
|
||||
|
||||
ginkgo.By("Running the test pod 2")
|
||||
testPod2 = f.PodClient().CreateSync(testPod2)
|
||||
testPod2 = e2epod.NewPodClient(f).CreateSync(testPod2)
|
||||
|
||||
// it no taste to verify NUMA pinning when the node has only one NUMA node
|
||||
if !*isMultiNUMASupported {
|
||||
@@ -514,10 +514,10 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
|
||||
ginkgo.It("should report memory data for each guaranteed pod and container during request to pod resources List", func() {
|
||||
ginkgo.By("Running the test pod and the test pod 2")
|
||||
testPod = f.PodClient().CreateSync(testPod)
|
||||
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
|
||||
|
||||
ginkgo.By("Running the test pod 2")
|
||||
testPod2 = f.PodClient().CreateSync(testPod2)
|
||||
testPod2 = e2epod.NewPodClient(f).CreateSync(testPod2)
|
||||
|
||||
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -556,7 +556,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
ginkgo.JustAfterEach(func() {
|
||||
// delete the test pod 2
|
||||
if testPod2.Name != "" {
|
||||
f.PodClient().DeleteSync(testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -599,18 +599,18 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
}
|
||||
workloadPod := makeMemoryManagerPod(workloadCtnAttrs[0].ctnName, initCtnParams, workloadCtnAttrs)
|
||||
|
||||
workloadPod = f.PodClient().CreateSync(workloadPod)
|
||||
workloadPod = e2epod.NewPodClient(f).CreateSync(workloadPod)
|
||||
workloadPods = append(workloadPods, workloadPod)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should be rejected", func() {
|
||||
ginkgo.By("Creating the pod")
|
||||
testPod = f.PodClient().Create(testPod)
|
||||
testPod = e2epod.NewPodClient(f).Create(testPod)
|
||||
|
||||
ginkgo.By("Checking that pod failed to start because of admission error")
|
||||
gomega.Eventually(func() bool {
|
||||
tmpPod, err := f.PodClient().Get(context.TODO(), testPod.Name, metav1.GetOptions{})
|
||||
tmpPod, err := e2epod.NewPodClient(f).Get(context.TODO(), testPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if tmpPod.Status.Phase != v1.PodFailed {
|
||||
@@ -635,7 +635,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
ginkgo.JustAfterEach(func() {
|
||||
for _, workloadPod := range workloadPods {
|
||||
if workloadPod.Name != "" {
|
||||
f.PodClient().DeleteSync(workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -679,7 +679,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
|
||||
// TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945
|
||||
ginkgo.It("should not report any memory data during request to pod resources List", func() {
|
||||
testPod = f.PodClient().CreateSync(testPod)
|
||||
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
|
||||
|
||||
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -703,7 +703,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
})
|
||||
|
||||
ginkgo.It("should succeed to start the pod", func() {
|
||||
testPod = f.PodClient().CreateSync(testPod)
|
||||
testPod = e2epod.NewPodClient(f).CreateSync(testPod)
|
||||
|
||||
// it no taste to verify NUMA pinning when the node has only one NUMA node
|
||||
if !*isMultiNUMASupported {
|
||||
|
||||
@@ -103,7 +103,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
|
||||
delOpts := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(pod.Name, delOpts, framework.DefaultPodDeletionTimeout)
|
||||
e2epod.NewPodClient(f).DeleteSync(pod.Name, delOpts, e2epod.DefaultPodDeletionTimeout)
|
||||
|
||||
// We are going to give some more time for the CPU manager to do any clean
|
||||
// up it needs to do now that the pod has been deleted. Otherwise we may
|
||||
@@ -124,7 +124,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
|
||||
// Make the pod for the workload.
|
||||
pod = makeNodePerfPod(wl)
|
||||
// Create the pod.
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
// Wait for pod success.
|
||||
// but avoid using WaitForSuccess because we want the container logs upon failure #109295
|
||||
podErr := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), wl.Timeout(),
|
||||
|
||||
@@ -201,7 +201,7 @@ current-context: local-context
|
||||
ginkgo.By("Create the node problem detector")
|
||||
hostPathType := new(v1.HostPathType)
|
||||
*hostPathType = v1.HostPathFileOrCreate
|
||||
pod := f.PodClient().CreateSync(&v1.Pod{
|
||||
pod := e2epod.NewPodClient(f).CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@@ -432,7 +432,7 @@ current-context: local-context
|
||||
framework.Logf("Node Problem Detector logs:\n %s", log)
|
||||
}
|
||||
ginkgo.By("Delete the node problem detector")
|
||||
f.PodClient().Delete(context.TODO(), name, *metav1.NewDeleteOptions(0))
|
||||
e2epod.NewPodClient(f).Delete(context.TODO(), name, *metav1.NewDeleteOptions(0))
|
||||
ginkgo.By("Wait for the node problem detector to disappear")
|
||||
gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed())
|
||||
ginkgo.By("Delete the config map")
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/godbus/dbus/v5"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -99,9 +100,9 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
}
|
||||
|
||||
ginkgo.By("Creating batch pods")
|
||||
f.PodClient().CreateBatch(pods)
|
||||
e2epod.NewPodClient(f).CreateBatch(pods)
|
||||
|
||||
list, err := f.PodClient().List(context.TODO(), metav1.ListOptions{
|
||||
list, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{
|
||||
FieldSelector: nodeSelector,
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -149,7 +150,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
ginkgo.By("Verifying that non-critical pods are shutdown")
|
||||
// Not critical pod should be shutdown
|
||||
gomega.Eventually(func() error {
|
||||
list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{
|
||||
list, err = e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{
|
||||
FieldSelector: nodeSelector,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -176,7 +177,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
ginkgo.By("Verifying that all pods are shutdown")
|
||||
// All pod should be shutdown
|
||||
gomega.Eventually(func() error {
|
||||
list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{
|
||||
list, err = e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{
|
||||
FieldSelector: nodeSelector,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -368,9 +369,9 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
}
|
||||
|
||||
ginkgo.By("Creating batch pods")
|
||||
f.PodClient().CreateBatch(pods)
|
||||
e2epod.NewPodClient(f).CreateBatch(pods)
|
||||
|
||||
list, err := f.PodClient().List(context.TODO(), metav1.ListOptions{
|
||||
list, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{
|
||||
FieldSelector: nodeSelector,
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -391,7 +392,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
|
||||
for _, step := range downSteps {
|
||||
gomega.Eventually(func() error {
|
||||
list, err = f.PodClient().List(context.TODO(), metav1.ListOptions{
|
||||
list, err = e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{
|
||||
FieldSelector: nodeSelector,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
type numaPodResources struct {
|
||||
@@ -107,7 +108,7 @@ func getCPUToNUMANodeMapFromEnv(f *framework.Framework, pod *v1.Pod, cnt *v1.Con
|
||||
|
||||
cpusPerNUMA := make(map[int][]int)
|
||||
for numaNode := 0; numaNode < numaNodes; numaNode++ {
|
||||
nodeCPUList := f.ExecCommandInContainer(pod.Name, cnt.Name,
|
||||
nodeCPUList := e2epod.ExecCommandInContainer(f, pod.Name, cnt.Name,
|
||||
"/bin/cat", fmt.Sprintf("/sys/devices/system/node/node%d/cpulist", numaNode))
|
||||
|
||||
cpus, err := cpuset.Parse(nodeCPUList)
|
||||
@@ -152,7 +153,7 @@ func getPCIDeviceToNumaNodeMapFromEnv(f *framework.Framework, pod *v1.Pod, cnt *
|
||||
// a single plugin can allocate more than a single device
|
||||
pciDevs := strings.Split(value, ",")
|
||||
for _, pciDev := range pciDevs {
|
||||
pciDevNUMANode := f.ExecCommandInContainer(pod.Name, cnt.Name,
|
||||
pciDevNUMANode := e2epod.ExecCommandInContainer(f, pod.Name, cnt.Name,
|
||||
"/bin/cat", fmt.Sprintf("/sys/bus/pci/devices/%s/numa_node", pciDev))
|
||||
NUMAPerDev[pciDev] = numaNodeFromSysFsEntry(pciDevNUMANode)
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
nodeutil "k8s.io/component-helpers/node/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
@@ -43,8 +44,8 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu
|
||||
ginkgo.Context("Kubelet", func() {
|
||||
ginkgo.It("should reconcile the OS and Arch labels when restarted", func() {
|
||||
node := getLocalNode(f)
|
||||
framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS)
|
||||
framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH)
|
||||
e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS)
|
||||
e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH)
|
||||
|
||||
ginkgo.By("killing and restarting kubelet")
|
||||
// Let's kill the kubelet
|
||||
@@ -57,7 +58,7 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu
|
||||
framework.ExpectNoError(err)
|
||||
// Restart kubelet
|
||||
startKubelet()
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.RestartNodeReadyAgainTimeout))
|
||||
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, framework.RestartNodeReadyAgainTimeout))
|
||||
// If this happens right, node should have all the labels reset properly
|
||||
err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -65,8 +66,8 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu
|
||||
ginkgo.It("should reconcile the OS and Arch labels when running", func() {
|
||||
|
||||
node := getLocalNode(f)
|
||||
framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS)
|
||||
framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH)
|
||||
e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS)
|
||||
e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH)
|
||||
|
||||
// Update labels
|
||||
newNode := node.DeepCopy()
|
||||
|
||||
@@ -89,7 +89,7 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *v1.Pod {
|
||||
func runPodPidsLimitTests(f *framework.Framework) {
|
||||
ginkgo.It("should set pids.max for Pod", func() {
|
||||
ginkgo.By("by creating a G pod")
|
||||
pod := f.PodClient().Create(&v1.Pod{
|
||||
pod := e2epod.NewPodClient(f).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
@@ -112,7 +112,7 @@ func runPodPidsLimitTests(f *framework.Framework) {
|
||||
podUID := string(pod.UID)
|
||||
ginkgo.By("checking if the expected pids settings were applied")
|
||||
verifyPod := makePodToVerifyPids("pod"+podUID, resource.MustParse("1024"))
|
||||
f.PodClient().Create(verifyPod)
|
||||
e2epod.NewPodClient(f).Create(verifyPod)
|
||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -89,7 +89,7 @@ func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, chec
|
||||
},
|
||||
}
|
||||
|
||||
p = f.PodClient().Create(p)
|
||||
p = e2epod.NewPodClient(f).Create(p)
|
||||
|
||||
ginkgo.By("waiting until kubelet has started trying to set up the pod and started to fail")
|
||||
|
||||
@@ -101,7 +101,7 @@ func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, chec
|
||||
}.AsSelector().String()
|
||||
e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, "MountVolume.SetUp failed for volume", framework.PodEventTimeout)
|
||||
|
||||
p, err := f.PodClient().Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
p, err := e2epod.NewPodClient(f).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking pod condition for a pod whose sandbox creation is blocked")
|
||||
@@ -139,10 +139,10 @@ func runPodReadyConditionsTest(f *framework.Framework, hasInitContainers, checkP
|
||||
return func() {
|
||||
ginkgo.By("creating a pod that successfully comes up in a ready/running state")
|
||||
|
||||
p := f.PodClient().Create(webserverPodSpec("pod-"+string(uuid.NewUUID()), "web1", "init1", hasInitContainers))
|
||||
p := e2epod.NewPodClient(f).Create(webserverPodSpec("pod-"+string(uuid.NewUUID()), "web1", "init1", hasInitContainers))
|
||||
e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)
|
||||
|
||||
p, err := f.PodClient().Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
p, err := e2epod.NewPodClient(f).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
isReady, err := testutils.PodRunningReady(p)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
@@ -83,7 +84,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() {
|
||||
pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"}
|
||||
output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, pod.ObjectMeta.Name)}
|
||||
// Create Pod
|
||||
f.TestContainerOutput("shortname only", pod, 0, output)
|
||||
e2eoutput.TestContainerOutput(f, "shortname only", pod, 0, output)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -100,7 +101,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() {
|
||||
pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"}
|
||||
output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, pod.ObjectMeta.Name)}
|
||||
// Create Pod
|
||||
f.TestContainerOutput("shortname only", pod, 0, output)
|
||||
e2eoutput.TestContainerOutput(f, "shortname only", pod, 0, output)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -119,7 +120,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() {
|
||||
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", pod.ObjectMeta.Name, subdomain, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, hostFQDN)}
|
||||
// Create Pod
|
||||
f.TestContainerOutput("shortname and fqdn", pod, 0, output)
|
||||
e2eoutput.TestContainerOutput(f, "shortname and fqdn", pod, 0, output)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -144,7 +145,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() {
|
||||
framework.ExpectEqual(len(hostFQDN) < 65, true, fmt.Sprintf("The FQDN of the Pod cannot be longer than 64 characters, requested %s which is %d characters long.", hostFQDN, len(hostFQDN)))
|
||||
output := []string{fmt.Sprintf("%s;%s;", hostFQDN, hostFQDN)}
|
||||
// Create Pod
|
||||
f.TestContainerOutput("fqdn and fqdn", pod, 0, output)
|
||||
e2eoutput.TestContainerOutput(f, "fqdn and fqdn", pod, 0, output)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -170,9 +171,9 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() {
|
||||
setHostnameAsFQDN := true
|
||||
pod.Spec.SetHostnameAsFQDN = &setHostnameAsFQDN
|
||||
// Create Pod
|
||||
launchedPod := f.PodClient().Create(pod)
|
||||
launchedPod := e2epod.NewPodClient(f).Create(pod)
|
||||
// Ensure we delete pod
|
||||
defer f.PodClient().DeleteSync(launchedPod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
defer e2epod.NewPodClient(f).DeleteSync(launchedPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
||||
|
||||
// Pod should remain in the pending state generating events with reason FailedCreatePodSandBox
|
||||
// Expected Message Error Event
|
||||
|
||||
@@ -164,7 +164,7 @@ func newTestPodData() *testPodData {
|
||||
func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podDesc) {
|
||||
for _, podReq := range podReqs {
|
||||
pod := makePodResourcesTestPod(podReq)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
|
||||
framework.Logf("created pod %s", podReq.podName)
|
||||
tpd.PodMap[podReq.podName] = pod
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@@ -176,7 +176,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
||||
}
|
||||
cgroupsToVerify := []string{burstableCgroup, bestEffortCgroup}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
@@ -194,7 +194,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
||||
podUID string
|
||||
)
|
||||
ginkgo.By("Creating a Guaranteed pod in Namespace", func() {
|
||||
guaranteedPod = f.PodClient().Create(&v1.Pod{
|
||||
guaranteedPod = e2epod.NewPodClient(f).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
@@ -214,16 +214,16 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
||||
ginkgo.By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []string{"pod" + podUID}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
ginkgo.By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
err := f.PodClient().Delete(context.TODO(), guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
err := e2epod.NewPodClient(f).Delete(context.TODO(), guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
framework.ExpectNoError(err)
|
||||
pod := makePodToVerifyCgroupRemoved("pod" + podUID)
|
||||
f.PodClient().Create(pod)
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
@@ -239,7 +239,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
||||
bestEffortPod *v1.Pod
|
||||
)
|
||||
ginkgo.By("Creating a BestEffort pod in Namespace", func() {
|
||||
bestEffortPod = f.PodClient().Create(&v1.Pod{
|
||||
bestEffortPod = e2epod.NewPodClient(f).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
@@ -259,16 +259,16 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
||||
ginkgo.By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []string{"besteffort/pod" + podUID}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
ginkgo.By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
err := f.PodClient().Delete(context.TODO(), bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
err := e2epod.NewPodClient(f).Delete(context.TODO(), bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
framework.ExpectNoError(err)
|
||||
pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID)
|
||||
f.PodClient().Create(pod)
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
@@ -284,7 +284,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
||||
burstablePod *v1.Pod
|
||||
)
|
||||
ginkgo.By("Creating a Burstable pod in Namespace", func() {
|
||||
burstablePod = f.PodClient().Create(&v1.Pod{
|
||||
burstablePod = e2epod.NewPodClient(f).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
@@ -304,16 +304,16 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
|
||||
ginkgo.By("Checking if the pod cgroup was created", func() {
|
||||
cgroupsToVerify := []string{"burstable/pod" + podUID}
|
||||
pod := makePodToVerifyCgroups(cgroupsToVerify)
|
||||
f.PodClient().Create(pod)
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
ginkgo.By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
err := f.PodClient().Delete(context.TODO(), burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
err := e2epod.NewPodClient(f).Delete(context.TODO(), burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
framework.ExpectNoError(err)
|
||||
pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID)
|
||||
f.PodClient().Create(pod)
|
||||
e2epod.NewPodClient(f).Create(pod)
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -379,7 +379,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
|
||||
err := e2epod.NewPodClient(f).Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
@@ -49,7 +50,7 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() {
|
||||
ginkgo.By("Creating test pods to measure their resource usage")
|
||||
numRestarts := int32(1)
|
||||
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
|
||||
f.PodClient().CreateBatch(pods)
|
||||
e2epod.NewPodClient(f).CreateBatch(pods)
|
||||
|
||||
ginkgo.By("restarting the containers to ensure container metrics are still being gathered after a container is restarted")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -113,8 +114,8 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("Deleting test pods")
|
||||
var zero int64 = 0
|
||||
f.PodClient().DeleteSync(pod0, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute)
|
||||
f.PodClient().DeleteSync(pod1, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(pod0, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute)
|
||||
e2epod.NewPodClient(f).DeleteSync(pod1, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute)
|
||||
if !ginkgo.CurrentSpecReport().Failed() {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
@@ -55,7 +56,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
|
||||
// The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to
|
||||
// show the resource usage spikes. But changing its interval increases the overhead
|
||||
// of kubelet. Hence we use a Cadvisor pod.
|
||||
f.PodClient().CreateSync(getCadvisorPod())
|
||||
e2epod.NewPodClient(f).CreateSync(getCadvisorPod())
|
||||
rc = NewResourceCollector(containerStatsPollingPeriod)
|
||||
})
|
||||
|
||||
@@ -155,7 +156,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
defer rc.Stop()
|
||||
|
||||
ginkgo.By("Creating a batch of Pods")
|
||||
f.PodClient().CreateBatch(pods)
|
||||
e2epod.NewPodClient(f).CreateBatch(pods)
|
||||
|
||||
// wait for a while to let the node be steady
|
||||
time.Sleep(sleepAfterCreatePods)
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@@ -42,7 +43,7 @@ import (
|
||||
// If the timeout is hit, it returns the list of currently running pods.
|
||||
func waitForPods(f *framework.Framework, podCount int, timeout time.Duration) (runningPods []*v1.Pod) {
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
||||
podList, err := f.PodClient().List(context.TODO(), metav1.ListOptions{})
|
||||
podList, err := e2epod.NewPodClient(f).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to list pods on node: %v", err)
|
||||
continue
|
||||
|
||||
@@ -22,10 +22,11 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/test/e2e/common/node"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e_node/services"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
@@ -70,7 +71,7 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() {
|
||||
name := "image-pull-test"
|
||||
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
|
||||
container := node.ConformanceContainer{
|
||||
PodClient: f.PodClient(),
|
||||
PodClient: e2epod.NewPodClient(f),
|
||||
Container: v1.Container{
|
||||
Name: name,
|
||||
Image: testCase.image,
|
||||
|
||||
@@ -118,7 +118,7 @@ var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() {
|
||||
framework.ExpectNoError(err, "failed to create RuntimeClass resource")
|
||||
})
|
||||
ginkgo.By("Creating a Guaranteed pod with which has Overhead defined", func() {
|
||||
guaranteedPod = f.PodClient().CreateSync(&v1.Pod{
|
||||
guaranteedPod = e2epod.NewPodClient(f).CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pod-with-overhead-",
|
||||
Namespace: f.Namespace.Name,
|
||||
@@ -140,7 +140,7 @@ var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() {
|
||||
ginkgo.By("Checking if the pod cgroup was created appropriately", func() {
|
||||
cgroupsToVerify := []string{"pod" + podUID}
|
||||
pod := makePodToVerifyCgroupSize(cgroupsToVerify, "30000", "251658240")
|
||||
pod = f.PodClient().Create(pod)
|
||||
pod = e2epod.NewPodClient(f).Create(pod)
|
||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
@@ -60,12 +61,12 @@ var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly
|
||||
|
||||
ginkgo.It("should use the default seccomp profile when unspecified", func() {
|
||||
pod := newPod(nil)
|
||||
f.TestContainerOutput("SeccompDefault", pod, 0, []string{"2"})
|
||||
e2eoutput.TestContainerOutput(f, "SeccompDefault", pod, 0, []string{"2"})
|
||||
})
|
||||
|
||||
ginkgo.It("should use unconfined when specified", func() {
|
||||
pod := newPod(&v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}})
|
||||
f.TestContainerOutput("SeccompDefault-unconfined", pod, 0, []string{"0"})
|
||||
e2eoutput.TestContainerOutput(f, "SeccompDefault-unconfined", pod, 0, []string{"0"})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -37,15 +37,15 @@ import (
|
||||
var _ = SIGDescribe("Security Context", func() {
|
||||
f := framework.NewDefaultFramework("security-context-test")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
var podClient *framework.PodClient
|
||||
var podClient *e2epod.PodClient
|
||||
ginkgo.BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
podClient = e2epod.NewPodClient(f)
|
||||
})
|
||||
|
||||
ginkgo.Context("[NodeConformance][LinuxOnly] Container PID namespace sharing", func() {
|
||||
ginkgo.It("containers in pods using isolated PID namespaces should all receive PID 1", func() {
|
||||
ginkgo.By("Create a pod with isolated PID namespaces.")
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
e2epod.NewPodClient(f).CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "isolated-pid-ns-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
@@ -65,8 +65,8 @@ var _ = SIGDescribe("Security Context", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Check if both containers receive PID 1.")
|
||||
pid1 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||
pid2 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep")
|
||||
pid1 := e2epod.ExecCommandInContainer(f, "isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||
pid2 := e2epod.ExecCommandInContainer(f, "isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep")
|
||||
if pid1 != "1" || pid2 != "1" {
|
||||
framework.Failf("PIDs of different containers are not all 1: test-container-1=%v, test-container-2=%v", pid1, pid2)
|
||||
}
|
||||
@@ -74,7 +74,7 @@ var _ = SIGDescribe("Security Context", func() {
|
||||
|
||||
ginkgo.It("processes in containers sharing a pod namespace should be able to see each other", func() {
|
||||
ginkgo.By("Create a pod with shared PID namespace.")
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
e2epod.NewPodClient(f).CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
ShareProcessNamespace: &[]bool{true}[0],
|
||||
@@ -95,8 +95,8 @@ var _ = SIGDescribe("Security Context", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Check if the process in one container is visible to the process in the other.")
|
||||
pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||
pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top")
|
||||
pid1 := e2epod.ExecCommandInContainer(f, "shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
|
||||
pid2 := e2epod.ExecCommandInContainer(f, "shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top")
|
||||
if pid1 != pid2 {
|
||||
framework.Failf("PIDs are not the same in different containers: test-container-1=%v, test-container-2=%v", pid1, pid2)
|
||||
}
|
||||
@@ -141,7 +141,7 @@ var _ = SIGDescribe("Security Context", func() {
|
||||
true,
|
||||
))
|
||||
|
||||
output := f.ExecShellInContainer(nginxPodName, nginxPodName,
|
||||
output := e2epod.ExecShellInContainer(f, nginxPodName, nginxPodName,
|
||||
"cat /var/run/nginx.pid")
|
||||
nginxPid = strings.TrimSpace(output)
|
||||
})
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
@@ -59,7 +60,7 @@ var _ = SIGDescribe("Summary API [NodeConformance]", func() {
|
||||
ginkgo.By("Creating test pods")
|
||||
numRestarts := int32(1)
|
||||
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
|
||||
f.PodClient().CreateBatch(pods)
|
||||
e2epod.NewPodClient(f).CreateBatch(pods)
|
||||
|
||||
ginkgo.By("restarting the containers to ensure container metrics are still being gathered after a container is restarted")
|
||||
gomega.Eventually(func() error {
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
|
||||
@@ -402,7 +402,7 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr
|
||||
podName := fmt.Sprintf("gu-pod-%d", podID)
|
||||
framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
|
||||
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
pod = e2epod.NewPodClient(f).CreateSync(pod)
|
||||
framework.Logf("created pod %s", podName)
|
||||
podMap[podName] = pod
|
||||
}
|
||||
@@ -444,7 +444,7 @@ func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAtt
|
||||
framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
|
||||
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
|
||||
|
||||
pod = f.PodClient().Create(pod)
|
||||
pod = e2epod.NewPodClient(f).Create(pod)
|
||||
err := e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "Failed", 30*time.Second, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Phase != v1.PodPending {
|
||||
return true, nil
|
||||
@@ -452,7 +452,7 @@ func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAtt
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
pod, err = e2epod.NewPodClient(f).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if pod.Status.Phase != v1.PodFailed {
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -44,7 +44,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() {
|
||||
)
|
||||
ginkgo.By("Creating a pod with a memory backed volume that exits success without restart", func() {
|
||||
volumeName = "memory-volume"
|
||||
memoryBackedPod = f.PodClient().Create(&v1.Pod{
|
||||
memoryBackedPod = e2epod.NewPodClient(f).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
@@ -83,7 +83,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() {
|
||||
for i := 0; i < 10; i++ {
|
||||
// need to create a new verification pod on each pass since updates
|
||||
//to the HostPath volume aren't propogated to the pod
|
||||
pod := f.PodClient().Create(&v1.Pod{
|
||||
pod := e2epod.NewPodClient(f).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
@@ -117,7 +117,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() {
|
||||
})
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
gp := int64(1)
|
||||
f.PodClient().Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
e2epod.NewPodClient(f).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user