mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 03:03:59 +00:00
Fix should be able to create and delete a critical pod
test
The namespace the crictical pod was referring to was wrong, because it was using the generated one instead of `kube-system`. This and the resulting test condition is now fixed. The test seems to run only in `ci-crio-cgroupv1-node-e2e-flaky` for now. Closes https://github.com/kubernetes/kubernetes/issues/109296 Signed-off-by: Sascha Grunert <sgrunert@redhat.com>
This commit is contained in:
parent
0a82bdbfdb
commit
bcbc12cd79
@ -70,6 +70,7 @@ func NewPodClient(f *framework.Framework) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
|
||||
namespace: f.Namespace.Name,
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,6 +81,7 @@ func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
|
||||
namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,6 +89,7 @@ func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
type PodClient struct {
|
||||
f *framework.Framework
|
||||
v1core.PodInterface
|
||||
namespace string
|
||||
}
|
||||
|
||||
// Create creates a new pod according to the framework specifications (don't wait for it to start).
|
||||
@ -99,9 +102,8 @@ func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
|
||||
// CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready.
|
||||
func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
namespace := c.f.Namespace.Name
|
||||
p := c.Create(ctx, pod)
|
||||
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
|
||||
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, c.namespace, framework.PodStartTimeout))
|
||||
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(ctx, p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@ -149,8 +151,6 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
|
||||
|
||||
// AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running.
|
||||
func (c *PodClient) AddEphemeralContainerSync(ctx context.Context, pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
|
||||
namespace := c.f.Namespace.Name
|
||||
|
||||
podJS, err := json.Marshal(pod)
|
||||
framework.ExpectNoError(err, "error creating JSON for pod %q", FormatPod(pod))
|
||||
|
||||
@ -167,7 +167,7 @@ func (c *PodClient) AddEphemeralContainerSync(ctx context.Context, pod *v1.Pod,
|
||||
return err
|
||||
}
|
||||
|
||||
framework.ExpectNoError(WaitForContainerRunning(ctx, c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
|
||||
framework.ExpectNoError(WaitForContainerRunning(ctx, c.f.ClientSet, c.namespace, pod.Name, ec.Name, timeout))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -185,12 +185,11 @@ func FormatPod(pod *v1.Pod) string {
|
||||
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
|
||||
// disappear before the timeout, it will fail the test.
|
||||
func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.DeleteOptions, timeout time.Duration) {
|
||||
namespace := c.f.Namespace.Name
|
||||
err := c.Delete(ctx, name, options)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, namespace, timeout), "wait for pod %q to disappear", name)
|
||||
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, c.namespace, timeout), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
// mungeSpec apply test-suite specific transformations to the pod spec.
|
||||
@ -232,8 +231,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
// WaitForSuccess waits for pod to succeed.
|
||||
// TODO(random-liu): Move pod wait function into this file
|
||||
func (c *PodClient) WaitForSuccess(ctx context.Context, name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
gomega.Expect(WaitForPodCondition(ctx, c.f.ClientSet, c.namespace, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -249,8 +247,7 @@ func (c *PodClient) WaitForSuccess(ctx context.Context, name string, timeout tim
|
||||
|
||||
// WaitForFinish waits for pod to finish running, regardless of success or failure.
|
||||
func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
gomega.Expect(WaitForPodCondition(ctx, c.f.ClientSet, c.namespace, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
|
@ -46,7 +46,7 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
|
||||
f := framework.NewDefaultFramework("critical-pod-test")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
ginkgo.Context("when we need to admit a critical pod", func() {
|
||||
ginkgo.It("[Flaky] should be able to create and delete a critical pod", func(ctx context.Context) {
|
||||
ginkgo.It("should be able to create and delete a critical pod", func(ctx context.Context) {
|
||||
// because adminssion Priority enable, If the priority class is not found, the Pod is rejected.
|
||||
node := getNodeName(ctx, f)
|
||||
// Define test pods
|
||||
@ -84,7 +84,7 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
|
||||
if p.Name == nonCriticalBestEffort.Name {
|
||||
framework.ExpectEqual(p.Status.Phase, v1.PodRunning, fmt.Sprintf("pod: %v should not be preempted with status: %#v", p.Name, p.Status))
|
||||
} else {
|
||||
framework.ExpectEqual(p.Status.Phase, v1.PodFailed, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status))
|
||||
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user