Merge pull request #123587 from huww98/e2e-image-global

test/e2e: do not use global variable for image
This commit is contained in:
Kubernetes Prow Robot 2024-04-22 07:11:16 -07:00 committed by GitHub
commit b38cce123d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 81 additions and 104 deletions

View File

@ -84,7 +84,7 @@ func etcdFailTest(ctx context.Context, f *framework.Framework, failCommand, fixC
checkExistingRCRecovers(ctx, f) checkExistingRCRecovers(ctx, f)
apps.TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage) apps.TestReplicationControllerServeImageOrFail(ctx, f, "basic", imageutils.GetE2EImage(imageutils.Agnhost))
} }
// For this duration, etcd will be failed by executing a failCommand on the master. // For this duration, etcd will be failed by executing a failCommand on the master.

View File

@ -68,7 +68,7 @@ var _ = SIGDescribe("ReplicationController", func() {
Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP. Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP.
*/ */
framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) { framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) {
TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage) TestReplicationControllerServeImageOrFail(ctx, f, "basic", imageutils.GetE2EImage(imageutils.Agnhost))
}) })
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {

View File

@ -110,7 +110,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried. Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried.
*/ */
framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) { framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) {
testReplicaSetServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage) testReplicaSetServeImageOrFail(ctx, f, "basic", imageutils.GetE2EImage(imageutils.Agnhost))
}) })
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -58,7 +59,7 @@ var _ = SIGDescribe("Kubelet", func() {
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
Name: podName, Name: podName,
Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"}, Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"},
}, },
@ -92,7 +93,7 @@ var _ = SIGDescribe("Kubelet", func() {
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
Name: podName, Name: podName,
Command: []string{"/bin/false"}, Command: []string{"/bin/false"},
}, },
@ -191,7 +192,7 @@ var _ = SIGDescribe("Kubelet", func() {
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
Name: podName, Name: podName,
Command: []string{"/bin/sh", "-c", "echo test > /file; sleep 240"}, Command: []string{"/bin/sh", "-c", "echo test > /file; sleep 240"},
SecurityContext: &v1.SecurityContext{ SecurityContext: &v1.SecurityContext{

View File

@ -455,7 +455,7 @@ var _ = SIGDescribe("Pods", func() {
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "srv", Name: "srv",
Image: framework.ServeHostnameImage, Image: imageutils.GetE2EImage(imageutils.Agnhost),
Ports: []v1.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },

View File

@ -53,7 +53,7 @@ var _ = SIGDescribe("Container Runtime", func() {
restartCountVolumeName := "restart-count" restartCountVolumeName := "restart-count"
restartCountVolumePath := "/restart-count" restartCountVolumePath := "/restart-count"
testContainer := v1.Container{ testContainer := v1.Container{
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
MountPath: restartCountVolumePath, MountPath: restartCountVolumePath,
@ -173,7 +173,7 @@ while true; do sleep 1; done
f.It("should report termination message if TerminationMessagePath is set", f.WithNodeConformance(), func(ctx context.Context) { f.It("should report termination message if TerminationMessagePath is set", f.WithNodeConformance(), func(ctx context.Context) {
container := v1.Container{ container := v1.Container{
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE > /dev/termination-log"}, Args: []string{"/bin/echo -n DONE > /dev/termination-log"},
TerminationMessagePath: "/dev/termination-log", TerminationMessagePath: "/dev/termination-log",
@ -194,7 +194,7 @@ while true; do sleep 1; done
*/ */
framework.ConformanceIt("should report termination message if TerminationMessagePath is set as non-root user and at a non-default path", f.WithNodeConformance(), func(ctx context.Context) { framework.ConformanceIt("should report termination message if TerminationMessagePath is set as non-root user and at a non-default path", f.WithNodeConformance(), func(ctx context.Context) {
container := v1.Container{ container := v1.Container{
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE > /dev/termination-custom-log"}, Args: []string{"/bin/echo -n DONE > /dev/termination-custom-log"},
TerminationMessagePath: "/dev/termination-custom-log", TerminationMessagePath: "/dev/termination-custom-log",
@ -215,7 +215,7 @@ while true; do sleep 1; done
*/ */
framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set", f.WithNodeConformance(), func(ctx context.Context) { framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set", f.WithNodeConformance(), func(ctx context.Context) {
container := v1.Container{ container := v1.Container{
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE; /bin/false"}, Args: []string{"/bin/echo -n DONE; /bin/false"},
TerminationMessagePath: "/dev/termination-log", TerminationMessagePath: "/dev/termination-log",
@ -231,7 +231,7 @@ while true; do sleep 1; done
*/ */
framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set", f.WithNodeConformance(), func(ctx context.Context) { framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set", f.WithNodeConformance(), func(ctx context.Context) {
container := v1.Container{ container := v1.Container{
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE; /bin/true"}, Args: []string{"/bin/echo -n DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log", TerminationMessagePath: "/dev/termination-log",
@ -247,7 +247,7 @@ while true; do sleep 1; done
*/ */
framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set", f.WithNodeConformance(), func(ctx context.Context) { framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set", f.WithNodeConformance(), func(ctx context.Context) {
container := v1.Container{ container := v1.Container{
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n OK > /dev/termination-log; /bin/echo DONE; /bin/true"}, Args: []string{"/bin/echo -n OK > /dev/termination-log; /bin/echo DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log", TerminationMessagePath: "/dev/termination-log",

View File

@ -333,7 +333,7 @@ var _ = SIGDescribe("Security Context", func() {
createAndWaitUserPod := func(ctx context.Context, userid int64) { createAndWaitUserPod := func(ctx context.Context, userid int64) {
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID()) podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
podClient.Create(ctx, makeUserPod(podName, podClient.Create(ctx, makeUserPod(podName,
framework.BusyBoxImage, imageutils.GetE2EImage(imageutils.BusyBox),
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)}, []string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
userid, userid,
)) ))
@ -454,7 +454,7 @@ var _ = SIGDescribe("Security Context", func() {
createAndWaitUserPod := func(ctx context.Context, readOnlyRootFilesystem bool) string { createAndWaitUserPod := func(ctx context.Context, readOnlyRootFilesystem bool) string {
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID()) podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
podClient.Create(ctx, makeUserPod(podName, podClient.Create(ctx, makeUserPod(podName,
framework.BusyBoxImage, imageutils.GetE2EImage(imageutils.BusyBox),
[]string{"sh", "-c", "touch checkfile"}, []string{"sh", "-c", "touch checkfile"},
readOnlyRootFilesystem, readOnlyRootFilesystem,
)) ))
@ -515,7 +515,7 @@ var _ = SIGDescribe("Security Context", func() {
createAndWaitUserPod := func(ctx context.Context, privileged bool) string { createAndWaitUserPod := func(ctx context.Context, privileged bool) string {
podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID()) podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
podClient.Create(ctx, makeUserPod(podName, podClient.Create(ctx, makeUserPod(podName,
framework.BusyBoxImage, imageutils.GetE2EImage(imageutils.BusyBox),
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"}, []string{"sh", "-c", "ip link add dummy0 type dummy || true"},
privileged, privileged,
)) ))

View File

@ -154,7 +154,7 @@ func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePe
} }
return c.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rcByNamePort( return c.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rcByNamePort(
name, replicas, framework.ServeHostnameImage, containerArgs, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod), metav1.CreateOptions{}) name, replicas, imageutils.GetE2EImage(imageutils.Agnhost), containerArgs, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod), metav1.CreateOptions{})
} }
// RestartNodes restarts specific nodes. // RestartNodes restarts specific nodes.

View File

@ -76,10 +76,6 @@ const (
crdNamePlural = "testcrds" crdNamePlural = "testcrds"
) )
var (
resourceConsumerImage = imageutils.GetE2EImage(imageutils.ResourceConsumer)
)
var ( var (
// KindRC is the GVK for ReplicationController // KindRC is the GVK for ReplicationController
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"} KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
@ -144,7 +140,7 @@ func NewDynamicResourceConsumer(ctx context.Context, name, nsName string, kind s
func getSidecarContainer(name string, cpuLimit, memLimit int64) v1.Container { func getSidecarContainer(name string, cpuLimit, memLimit int64) v1.Container {
container := v1.Container{ container := v1.Container{
Name: name + "-sidecar", Name: name + "-sidecar",
Image: resourceConsumerImage, Image: imageutils.GetE2EImage(imageutils.ResourceConsumer),
Command: []string{"/consumer", "-port=8081"}, Command: []string{"/consumer", "-port=8081"},
Ports: []v1.ContainerPort{{ContainerPort: 80}}, Ports: []v1.ContainerPort{{ContainerPort: 80}},
} }
@ -628,7 +624,7 @@ func runServiceAndWorkloadForResourceConsumer(ctx context.Context, c clientset.I
rcConfig := testutils.RCConfig{ rcConfig := testutils.RCConfig{
Client: c, Client: c,
Image: resourceConsumerImage, Image: imageutils.GetE2EImage(imageutils.ResourceConsumer),
Name: name, Name: name,
Namespace: ns, Namespace: ns,
Timeout: timeoutRC, Timeout: timeoutRC,

View File

@ -71,7 +71,7 @@ func NewTestJobOnNode(behavior, name string, rPol v1.RestartPolicy, parallelism,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "c", Name: "c",
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{}, Command: []string{},
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {

View File

@ -90,9 +90,6 @@ const (
echoHostname = "hostname" echoHostname = "hostname"
) )
// NetexecImageName is the image name for agnhost.
var NetexecImageName = imageutils.GetE2EImage(imageutils.Agnhost)
// Option is used to configure the NetworkingTest object // Option is used to configure the NetworkingTest object
type Option func(*NetworkingTestConfig) type Option func(*NetworkingTestConfig)
@ -587,7 +584,7 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName, hostname stri
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "webserver", Name: "webserver",
Image: NetexecImageName, Image: imageutils.GetE2EImage(imageutils.Agnhost),
ImagePullPolicy: v1.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
Args: netexecArgs, Args: netexecArgs,
Ports: []v1.ContainerPort{ Ports: []v1.ContainerPort{
@ -657,7 +654,7 @@ func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "webserver", Name: "webserver",
Image: NetexecImageName, Image: imageutils.GetE2EImage(imageutils.Agnhost),
ImagePullPolicy: v1.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
Args: []string{ Args: []string{
"netexec", "netexec",

View File

@ -1046,7 +1046,7 @@ func (j *TestJig) CreateServicePods(ctx context.Context, replica int) error {
config := testutils.RCConfig{ config := testutils.RCConfig{
Client: j.Client, Client: j.Client,
Name: j.Name, Name: j.Name,
Image: framework.ServeHostnameImage, Image: imageutils.GetE2EImage(imageutils.Agnhost),
Command: []string{"/agnhost", "serve-hostname", "--http=false", "--tcp", "--udp"}, Command: []string{"/agnhost", "serve-hostname", "--http=false", "--tcp", "--udp"},
Namespace: j.Namespace, Namespace: j.Namespace,
Labels: j.Labels, Labels: j.Labels,

View File

@ -53,7 +53,6 @@ import (
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api" clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
watchtools "k8s.io/client-go/tools/watch" watchtools "k8s.io/client-go/tools/watch"
imageutils "k8s.io/kubernetes/test/utils/image"
netutils "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
@ -132,14 +131,8 @@ const (
) )
var ( var (
// BusyBoxImage is the image URI of BusyBox.
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
// ProvidersWithSSH are those providers where each node is accessible with SSH // ProvidersWithSSH are those providers where each node is accessible with SSH
ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"} ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"}
// ServeHostnameImage is a serve hostname image name.
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
) )
// RunID is a unique identifier of the e2e run. // RunID is a unique identifier of the e2e run.

View File

@ -114,11 +114,6 @@ func unknownFieldMetadataJSON(gvk schema.GroupVersionKind, name string) string {
} }
var ( var (
nautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
httpdImage = imageutils.GetE2EImage(imageutils.Httpd)
busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
agnhostImage = imageutils.GetE2EImage(imageutils.Agnhost)
// If this suite still flakes due to timeouts we should change this to framework.PodStartTimeout // If this suite still flakes due to timeouts we should change this to framework.PodStartTimeout
podRunningTimeoutArg = fmt.Sprintf("--pod-running-timeout=%s", framework.PodStartShortTimeout.String()) podRunningTimeoutArg = fmt.Sprintf("--pod-running-timeout=%s", framework.PodStartShortTimeout.String())
) )
@ -344,7 +339,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.By("creating a replication controller") ginkgo.By("creating a replication controller")
e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) validateController(ctx, c, imageutils.GetE2EImage(imageutils.Nautilus), 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
}) })
/* /*
@ -354,6 +349,7 @@ var _ = SIGDescribe("Kubectl client", func() {
*/ */
framework.ConformanceIt("should scale a replication controller", func(ctx context.Context) { framework.ConformanceIt("should scale a replication controller", func(ctx context.Context) {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
nautilusImage := imageutils.GetE2EImage(imageutils.Nautilus)
ginkgo.By("creating a replication controller") ginkgo.By("creating a replication controller")
e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
@ -695,12 +691,12 @@ metadata:
ginkgo.Describe("Kubectl run", func() { ginkgo.Describe("Kubectl run", func() {
ginkgo.It("running a successful command", func(ctx context.Context) { ginkgo.It("running a successful command", func(ctx context.Context) {
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec() _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
ginkgo.It("running a failing command", func(ctx context.Context) { ginkgo.It("running a failing command", func(ctx context.Context) {
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError) ee, ok := err.(uexec.ExitError)
if !ok { if !ok {
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err) framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
@ -709,7 +705,7 @@ metadata:
}) })
f.It(f.WithSlow(), "running a failing command without --restart=Never", func(ctx context.Context) { f.It(f.WithSlow(), "running a failing command without --restart=Never", func(ctx context.Context) {
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
Exec() Exec()
ee, ok := err.(uexec.ExitError) ee, ok := err.(uexec.ExitError)
@ -722,7 +718,7 @@ metadata:
}) })
f.It(f.WithSlow(), "running a failing command without --restart=Never, but with --rm", func(ctx context.Context) { f.It(f.WithSlow(), "running a failing command without --restart=Never, but with --rm", func(ctx context.Context) {
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
Exec() Exec()
ee, ok := err.(uexec.ExitError) ee, ok := err.(uexec.ExitError)
@ -736,7 +732,7 @@ metadata:
}) })
f.It(f.WithSlow(), "running a failing command with --leave-stdin-open", func(ctx context.Context) { f.It(f.WithSlow(), "running a failing command with --leave-stdin-open", func(ctx context.Context) {
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
Exec() Exec()
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -757,7 +753,7 @@ metadata:
ginkgo.By("executing a command with run and attach with stdin") ginkgo.By("executing a command with run and attach with stdin")
// We wait for a non-empty line so we know kubectl has attached // We wait for a non-empty line so we know kubectl has attached
e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'"). e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
WithStdinData("value\nabcd1234"). WithStdinData("value\nabcd1234").
ExecOrDie(ns) ExecOrDie(ns)
@ -774,7 +770,7 @@ metadata:
// "stdin closed", but hasn't exited yet. // "stdin closed", but hasn't exited yet.
// We wait 10 seconds before printing to give time to kubectl to attach // We wait 10 seconds before printing to give time to kubectl to attach
// to the container, this does not solve the race though. // to the container, this does not solve the race though.
e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
ExecOrDie(ns) ExecOrDie(ns)
@ -785,7 +781,7 @@ metadata:
framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{})) framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{}))
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234\n"). WithStdinData("abcd1234\n").
ExecOrDie(ns) ExecOrDie(ns)
@ -814,7 +810,7 @@ metadata:
ginkgo.By("executing a command with run and attach with stdin") ginkgo.By("executing a command with run and attach with stdin")
// We wait for a non-empty line so we know kubectl has attached // We wait for a non-empty line so we know kubectl has attached
e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'"). e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
WithStdinData("value\nabcd1234"). WithStdinData("value\nabcd1234").
ExecOrDie(ns) ExecOrDie(ns)
@ -831,7 +827,7 @@ metadata:
// "stdin closed", but hasn't exited yet. // "stdin closed", but hasn't exited yet.
// We wait 10 seconds before printing to give time to kubectl to attach // We wait 10 seconds before printing to give time to kubectl to attach
// to the container, this does not solve the race though. // to the container, this does not solve the race though.
e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234"). WithStdinData("abcd1234").
ExecOrDie(ns) ExecOrDie(ns)
@ -842,7 +838,7 @@ metadata:
framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{})) framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{}))
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234\n"). WithStdinData("abcd1234\n").
ExecOrDie(ns) ExecOrDie(ns)
@ -862,7 +858,7 @@ metadata:
podName := "run-log-test" podName := "run-log-test"
ginkgo.By("executing a command with run") ginkgo.By("executing a command with run")
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) {
framework.Failf("Pod for run-log-test was not ready") framework.Failf("Pod for run-log-test was not ready")
@ -1051,15 +1047,15 @@ metadata:
e2ekubectl.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-")
ginkgo.By("verify diff finds difference between live and declared image") ginkgo.By("verify diff finds difference between live and declared image")
deployment = strings.Replace(deployment, httpdImage, busyboxImage, 1) deployment = strings.Replace(deployment, imageutils.GetE2EImage(imageutils.Httpd), imageutils.GetE2EImage(imageutils.BusyBox), 1)
if !strings.Contains(deployment, busyboxImage) { if !strings.Contains(deployment, imageutils.GetE2EImage(imageutils.BusyBox)) {
framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, deployment) framework.Failf("Failed replacing image from %s to %s in:\n%s\n", imageutils.GetE2EImage(imageutils.Httpd), imageutils.GetE2EImage(imageutils.BusyBox), deployment)
} }
output, err := e2ekubectl.RunKubectlInput(ns, deployment, "diff", "-f", "-") output, err := e2ekubectl.RunKubectlInput(ns, deployment, "diff", "-f", "-")
if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 { if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 {
framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err) framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err)
} }
requiredItems := []string{httpdImage, busyboxImage} requiredItems := []string{imageutils.GetE2EImage(imageutils.Httpd), imageutils.GetE2EImage(imageutils.BusyBox)}
for _, item := range requiredItems { for _, item := range requiredItems {
if !strings.Contains(output, item) { if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl diff output:\n%s\n%v\n", item, output, err) framework.Failf("Missing %s in kubectl diff output:\n%s\n%v\n", item, output, err)
@ -1077,12 +1073,13 @@ metadata:
Description: The command 'kubectl run' must create a pod with the specified image name. After, the command 'kubectl patch pod -p {...} --dry-run=server' should update the Pod with the new image name and server-side dry-run enabled. The image name must not change. Description: The command 'kubectl run' must create a pod with the specified image name. After, the command 'kubectl patch pod -p {...} --dry-run=server' should update the Pod with the new image name and server-side dry-run enabled. The image name must not change.
*/ */
framework.ConformanceIt("should check if kubectl can dry-run update Pods", func(ctx context.Context) { framework.ConformanceIt("should check if kubectl can dry-run update Pods", func(ctx context.Context) {
httpdImage := imageutils.GetE2EImage(imageutils.Httpd)
ginkgo.By("running the image " + httpdImage) ginkgo.By("running the image " + httpdImage)
podName := "e2e-test-httpd-pod" podName := "e2e-test-httpd-pod"
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
ginkgo.By("replace the image in the pod with server-side dry-run") ginkgo.By("replace the image in the pod with server-side dry-run")
specImage := fmt.Sprintf(`{"spec":{"containers":[{"name": "%s","image": "%s"}]}}`, podName, busyboxImage) specImage := fmt.Sprintf(`{"spec":{"containers":[{"name": "%s","image": "%s"}]}}`, podName, imageutils.GetE2EImage(imageutils.BusyBox))
e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server") e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server")
ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage) ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage)
@ -1414,7 +1411,7 @@ metadata:
{"Status:", "Running"}, {"Status:", "Running"},
{"IP:"}, {"IP:"},
{"Controlled By:", "ReplicationController/agnhost-primary"}, {"Controlled By:", "ReplicationController/agnhost-primary"},
{"Image:", agnhostImage}, {"Image:", imageutils.GetE2EImage(imageutils.Agnhost)},
{"State:", "Running"}, {"State:", "Running"},
{"QoS Class:", "BestEffort"}, {"QoS Class:", "BestEffort"},
} }
@ -1432,7 +1429,7 @@ metadata:
{"Replicas:", "1 current", "1 desired"}, {"Replicas:", "1 current", "1 desired"},
{"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"}, {"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"},
{"Pod Template:"}, {"Pod Template:"},
{"Image:", agnhostImage}, {"Image:", imageutils.GetE2EImage(imageutils.Agnhost)},
{"Events:"}} {"Events:"}}
checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary") checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary")
@ -1757,6 +1754,7 @@ metadata:
Description: Command 'kubectl run' MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image. Description: Command 'kubectl run' MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image.
*/ */
framework.ConformanceIt("should create a pod from an image when restart is Never", func(ctx context.Context) { framework.ConformanceIt("should create a pod from an image when restart is Never", func(ctx context.Context) {
httpdImage := imageutils.GetE2EImage(imageutils.Httpd)
ginkgo.By("running the image " + httpdImage) ginkgo.By("running the image " + httpdImage)
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage)
ginkgo.By("verifying the pod " + podName + " was created") ginkgo.By("verifying the pod " + podName + " was created")
@ -1791,6 +1789,7 @@ metadata:
Description: Command 'kubectl replace' on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to 'kubectl replace' SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image. Description: Command 'kubectl replace' on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to 'kubectl replace' SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image.
*/ */
framework.ConformanceIt("should update a single-container pod's image", func(ctx context.Context) { framework.ConformanceIt("should update a single-container pod's image", func(ctx context.Context) {
httpdImage := imageutils.GetE2EImage(imageutils.Httpd)
ginkgo.By("running the image " + httpdImage) ginkgo.By("running the image " + httpdImage)
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
@ -1808,6 +1807,7 @@ metadata:
} }
ginkgo.By("replace the image in the pod") ginkgo.By("replace the image in the pod")
busyboxImage := imageutils.GetE2EImage(imageutils.BusyBox)
podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1) podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1)
e2ekubectl.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-")
@ -1991,6 +1991,7 @@ metadata:
ginkgo.Describe("Kubectl events", func() { ginkgo.Describe("Kubectl events", func() {
ginkgo.It("should show event when pod is created", func(ctx context.Context) { ginkgo.It("should show event when pod is created", func(ctx context.Context) {
podName := "e2e-test-httpd-pod" podName := "e2e-test-httpd-pod"
httpdImage := imageutils.GetE2EImage(imageutils.Httpd)
ginkgo.By("running the image " + httpdImage) ginkgo.By("running the image " + httpdImage)
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)

View File

@ -33,6 +33,7 @@ import (
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -55,12 +56,12 @@ func testingPod(name, value, defaultContainerName string) v1.Pod {
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "container-1", Name: "container-1",
Image: agnhostImage, Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"logs-generator", "--log-lines-total", "10", "--run-duration", "5s"}, Args: []string{"logs-generator", "--log-lines-total", "10", "--run-duration", "5s"},
}, },
{ {
Name: defaultContainerName, Name: defaultContainerName,
Image: agnhostImage, Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"logs-generator", "--log-lines-total", "20", "--run-duration", "5s"}, Args: []string{"logs-generator", "--log-lines-total", "20", "--run-duration", "5s"},
}, },
}, },
@ -94,7 +95,7 @@ var _ = SIGDescribe("Kubectl logs", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
ginkgo.By("creating an pod") ginkgo.By("creating an pod")
// Agnhost image generates logs for a total of 100 lines over 20s. // Agnhost image generates logs for a total of 100 lines over 20s.
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", podRunningTimeoutArg, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s") e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+imageutils.GetE2EImage(imageutils.Agnhost), "--restart=Never", podRunningTimeoutArg, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s")
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
e2ekubectl.RunKubectlOrDie(ns, "delete", "pod", podName) e2ekubectl.RunKubectlOrDie(ns, "delete", "pod", podName)

View File

@ -281,7 +281,7 @@ func StartServeHostnameService(ctx context.Context, c clientset.Interface, svc *
maxContainerFailures := 0 maxContainerFailures := 0
config := testutils.RCConfig{ config := testutils.RCConfig{
Client: c, Client: c,
Image: framework.ServeHostnameImage, Image: imageutils.GetE2EImage(imageutils.Agnhost),
Command: []string{"/agnhost", "serve-hostname"}, Command: []string{"/agnhost", "serve-hostname"},
Name: name, Name: name,
Namespace: ns, Namespace: ns,

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -58,7 +59,7 @@ var _ = SIGDescribe("Events", func() {
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "p", Name: "p",
Image: framework.ServeHostnameImage, Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"serve-hostname"}, Args: []string{"serve-hostname"},
Ports: []v1.ContainerPort{{ContainerPort: 80}}, Ports: []v1.ContainerPort{{ContainerPort: 80}},
}, },

View File

@ -39,10 +39,6 @@ import (
_ "github.com/stretchr/testify/assert" _ "github.com/stretchr/testify/assert"
) )
var (
pauseImage = imageutils.GetE2EImage(imageutils.Pause)
)
const ( const (
testFinalizer = "example.com/test-finalizer" testFinalizer = "example.com/test-finalizer"
) )
@ -73,7 +69,7 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "pause", Name: "pause",
Image: pauseImage, Image: imageutils.GetE2EImage(imageutils.Pause),
}, },
}, },
}, },
@ -92,7 +88,7 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "pause", Name: "pause",
Image: pauseImage, Image: imageutils.GetE2EImage(imageutils.Pause),
}, },
}, },
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}}, Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}},
@ -111,7 +107,7 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "pause", Name: "pause",
Image: pauseImage, Image: imageutils.GetE2EImage(imageutils.Pause),
}, },
}, },
// default - tolerate forever // default - tolerate forever

View File

@ -73,7 +73,7 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
}) })
f.It("should spread the pods of a replication controller across zones", f.WithSerial(), func(ctx context.Context) { f.It("should spread the pods of a replication controller across zones", f.WithSerial(), func(ctx context.Context) {
SpreadRCOrFail(ctx, f, int32(5*zoneCount), zoneNames, framework.ServeHostnameImage, []string{"serve-hostname"}) SpreadRCOrFail(ctx, f, int32(5*zoneCount), zoneNames, imageutils.GetE2EImage(imageutils.Agnhost), []string{"serve-hostname"})
}) })
}) })

View File

@ -633,7 +633,7 @@ func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.Vol
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "volume-tester", Name: "volume-tester",
Image: framework.BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "my-volume", Name: "my-volume",

View File

@ -40,8 +40,6 @@ import (
) )
var ( var (
// BusyBoxImage is the image URI of BusyBox.
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
durationForStuckMount = 110 * time.Second durationForStuckMount = 110 * time.Second
) )
@ -211,7 +209,7 @@ func getFlexVolumePod(volumeSource v1.VolumeSource, nodeName string) *v1.Pod {
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "flexvolume-detach-test" + "-client", Name: "flexvolume-detach-test" + "-client",
Image: BusyBoxImage, Image: imageutils.GetE2EImage(imageutils.BusyBox),
WorkingDir: "/opt", WorkingDir: "/opt",
// An imperative and easily debuggable container which reads vol contents for // An imperative and easily debuggable container which reads vol contents for
// us to scan in the tests or by eye. // us to scan in the tests or by eye.

View File

@ -54,7 +54,6 @@ var (
probeFilePath = probeVolumePath + "/probe-file" probeFilePath = probeVolumePath + "/probe-file"
fileName = "test-file" fileName = "test-file"
retryDuration = 20 retryDuration = 20
mountImage = imageutils.GetE2EImage(imageutils.Agnhost)
) )
type subPathTestSuite struct { type subPathTestSuite struct {
@ -578,8 +577,8 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
} }
func containerIsUnused(container *v1.Container) bool { func containerIsUnused(container *v1.Container) bool {
// mountImage with nil command and nil Args or with just "mounttest" as Args does nothing. Leave everything else // agnhost image with nil command and nil Args or with just "mounttest" as Args does nothing. Leave everything else
return container.Image == mountImage && container.Command == nil && return container.Image == imageutils.GetE2EImage(imageutils.Agnhost) && container.Command == nil &&
(container.Args == nil || (len(container.Args) == 1 && container.Args[0] == "mounttest")) (container.Args == nil || (len(container.Args) == 1 && container.Args[0] == "mounttest"))
} }

View File

@ -33,6 +33,7 @@ import (
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
) )
@ -41,7 +42,7 @@ var _ = utils.SIGDescribe("Multi-AZ Cluster Volumes", func() {
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
var zoneCount int var zoneCount int
var err error var err error
image := framework.ServeHostnameImage image := imageutils.GetE2EImage(imageutils.Agnhost)
ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.BeforeEach(func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessProviderIs("gce", "gke")
if zoneCount <= 0 { if zoneCount <= 0 {

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
imageutils "k8s.io/kubernetes/test/utils/image"
) )
// DaemonSetUpgradeTest tests that a DaemonSet is running before and after // DaemonSetUpgradeTest tests that a DaemonSet is running before and after
@ -43,7 +44,7 @@ func (DaemonSetUpgradeTest) Name() string { return "[sig-apps] daemonset-upgrade
func (t *DaemonSetUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { func (t *DaemonSetUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
daemonSetName := "ds1" daemonSetName := "ds1"
labelSet := map[string]string{"ds-name": daemonSetName} labelSet := map[string]string{"ds-name": daemonSetName}
image := framework.ServeHostnameImage image := imageutils.GetE2EImage(imageutils.Agnhost)
ns := f.Namespace ns := f.Namespace

View File

@ -40,11 +40,6 @@ const (
windowsOS = "windows" windowsOS = "windows"
) )
var (
windowsBusyBoximage = imageutils.GetE2EImage(imageutils.Agnhost)
linuxBusyBoxImage = imageutils.GetE2EImage(imageutils.Nginx)
)
var _ = sigDescribe("Hybrid cluster network", skipUnlessWindows(func() { var _ = sigDescribe("Hybrid cluster network", skipUnlessWindows(func() {
f := framework.NewDefaultFramework("hybrid-network") f := framework.NewDefaultFramework("hybrid-network")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
@ -57,11 +52,11 @@ var _ = sigDescribe("Hybrid cluster network", skipUnlessWindows(func() {
ginkgo.It("should have stable networking for Linux and Windows pods", func(ctx context.Context) { ginkgo.It("should have stable networking for Linux and Windows pods", func(ctx context.Context) {
linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS) linuxPod := createTestPod(f, imageutils.GetE2EImage(imageutils.Nginx), linuxOS)
ginkgo.By("creating a linux pod and waiting for it to be running") ginkgo.By("creating a linux pod and waiting for it to be running")
linuxPod = e2epod.NewPodClient(f).CreateSync(ctx, linuxPod) linuxPod = e2epod.NewPodClient(f).CreateSync(ctx, linuxPod)
windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS) windowsPod := createTestPod(f, imageutils.GetE2EImage(imageutils.Agnhost), windowsOS)
windowsPod.Spec.Containers[0].Args = []string{"test-webserver"} windowsPod.Spec.Containers[0].Args = []string{"test-webserver"}
ginkgo.By("creating a windows pod and waiting for it to be running") ginkgo.By("creating a windows pod and waiting for it to be running")
@ -78,7 +73,7 @@ var _ = sigDescribe("Hybrid cluster network", skipUnlessWindows(func() {
}) })
f.It("should provide Internet connection for Linux containers", feature.NetworkingIPv4, func(ctx context.Context) { f.It("should provide Internet connection for Linux containers", feature.NetworkingIPv4, func(ctx context.Context) {
linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS) linuxPod := createTestPod(f, imageutils.GetE2EImage(imageutils.Nginx), linuxOS)
ginkgo.By("creating a linux pod and waiting for it to be running") ginkgo.By("creating a linux pod and waiting for it to be running")
linuxPod = e2epod.NewPodClient(f).CreateSync(ctx, linuxPod) linuxPod = e2epod.NewPodClient(f).CreateSync(ctx, linuxPod)
@ -89,7 +84,7 @@ var _ = sigDescribe("Hybrid cluster network", skipUnlessWindows(func() {
}) })
f.It("should provide Internet connection and DNS for Windows containers", feature.NetworkingIPv4, feature.NetworkingDNS, func(ctx context.Context) { f.It("should provide Internet connection and DNS for Windows containers", feature.NetworkingIPv4, feature.NetworkingDNS, func(ctx context.Context) {
windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS) windowsPod := createTestPod(f, imageutils.GetE2EImage(imageutils.Agnhost), windowsOS)
ginkgo.By("creating a windows pod and waiting for it to be running") ginkgo.By("creating a windows pod and waiting for it to be running")
windowsPod = e2epod.NewPodClient(f).CreateSync(ctx, windowsPod) windowsPod = e2epod.NewPodClient(f).CreateSync(ctx, windowsPod)

View File

@ -139,7 +139,7 @@ var _ = sigDescribe(feature.Windows, "SecurityContext", skipUnlessWindows(func()
// pod object to not have those security contexts. However the pod coming to running state is a sufficient // pod object to not have those security contexts. However the pod coming to running state is a sufficient
// enough condition for us to validate since prior to https://github.com/kubernetes/kubernetes/pull/93475 // enough condition for us to validate since prior to https://github.com/kubernetes/kubernetes/pull/93475
// the pod would have failed to come up. // the pod would have failed to come up.
windowsPodWithSELinux := createTestPod(f, windowsBusyBoximage, windowsOS) windowsPodWithSELinux := createTestPod(f, imageutils.GetE2EImage(imageutils.Agnhost), windowsOS)
windowsPodWithSELinux.Spec.Containers[0].Args = []string{"test-webserver-with-selinux"} windowsPodWithSELinux.Spec.Containers[0].Args = []string{"test-webserver-with-selinux"}
windowsPodWithSELinux.Spec.SecurityContext = &v1.PodSecurityContext{} windowsPodWithSELinux.Spec.SecurityContext = &v1.PodSecurityContext{}
containerUserName := "ContainerAdministrator" containerUserName := "ContainerAdministrator"

View File

@ -30,6 +30,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -75,7 +76,7 @@ var _ = sigDescribe("Services", skipUnlessWindows(func() {
//using hybrid_network methods //using hybrid_network methods
ginkgo.By("creating Windows testing Pod") ginkgo.By("creating Windows testing Pod")
testPod := createTestPod(f, windowsBusyBoximage, windowsOS) testPod := createTestPod(f, imageutils.GetE2EImage(imageutils.Agnhost), windowsOS)
testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod) testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
ginkgo.By("verifying that pod has the correct nodeSelector") ginkgo.By("verifying that pod has the correct nodeSelector")

View File

@ -41,10 +41,6 @@ const (
volumeName = "test-volume" volumeName = "test-volume"
) )
var (
image = imageutils.GetE2EImage(imageutils.Pause)
)
var _ = sigDescribe(feature.Windows, "Windows volume mounts", skipUnlessWindows(func() { var _ = sigDescribe(feature.Windows, "Windows volume mounts", skipUnlessWindows(func() {
f := framework.NewDefaultFramework("windows-volumes") f := framework.NewDefaultFramework("windows-volumes")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
@ -123,7 +119,7 @@ func doReadWriteReadOnlyTest(ctx context.Context, f *framework.Framework, source
rwcontainer := v1.Container{ rwcontainer := v1.Container{
Name: containerName + "-rw", Name: containerName + "-rw",
Image: image, Image: imageutils.GetE2EImage(imageutils.Pause),
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
@ -171,7 +167,7 @@ func testPodWithROVolume(podName string, source v1.VolumeSource, path string) *v
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: containerName, Name: containerName,
Image: image, Image: imageutils.GetE2EImage(imageutils.Pause),
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,

View File

@ -118,7 +118,7 @@ var _ = SIGDescribe("Container Manager Misc", framework.WithSerial(), func() {
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Image: framework.ServeHostnameImage, Image: imageutils.GetE2EImage(imageutils.Agnhost),
Name: podName, Name: podName,
}, },
}, },

View File

@ -39,8 +39,8 @@ import (
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/service" "k8s.io/kubernetes/test/e2e/framework/service"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -205,7 +205,7 @@ func main() {
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "serve-hostname", Name: "serve-hostname",
Image: e2e.ServeHostnameImage, Image: imageutils.GetE2EImage(imageutils.Agnhost),
Ports: []v1.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },