tests: Replaces images used with agnhost (part 3)

Quite a few images are only used a few times in a few tests. Thus,
the images are being centralized into the agnhost image, reducing
the number of images that have to be pulled and used.

This PR replaces the usage of the following images with agnhost:

- audit-proxy
- crd-conversion-webhook
- entrypoint-tester
- inclusterclient
- iperf
- porter
- serve-hostname
This commit is contained in:
Claudiu Belu 2019-05-20 19:40:25 -07:00
parent a78ae4ba74
commit c752ea8134
22 changed files with 74 additions and 75 deletions

View File

@ -134,7 +134,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook", func() {
context = setupServerCert(f.Namespace.Name, serviceCRDName)
createAuthReaderRoleBindingForCRDConversion(f, f.Namespace.Name)
deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.CRDConversionWebhook), context)
deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.Agnhost), context)
})
ginkgo.AfterEach(func() {
@ -269,11 +269,11 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
Name: "sample-crd-conversion-webhook",
VolumeMounts: mounts,
Args: []string{
"crd-conversion-webhook",
"--tls-cert-file=/webhook.local.config/certificates/tls.crt",
"--tls-private-key-file=/webhook.local.config/certificates/tls.key",
"--alsologtostderr",
"-v=4",
"2>&1",
},
Image: image,
},

View File

@ -184,7 +184,8 @@ func newTablePod(podName string) *v1.Pod {
Containers: []v1.Container{
{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Porter),
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"porter"},
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},

View File

@ -266,7 +266,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
rsRevision := "3546343826724305832"
annotations := make(map[string]string)
annotations[deploymentutil.RevisionAnnotation] = rsRevision
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage)
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
rs.Annotations = annotations
e2elog.Logf("Creating replica set %q (going to be adopted)", rs.Name)
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
@ -346,7 +346,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
rsName := "test-cleanup-controller"
replicas := int32(1)
revisionHistoryLimit := utilpointer.Int32Ptr(0)
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage))
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil))
framework.ExpectNoError(err)
// Verify that the required pods have come up.
@ -417,7 +417,7 @@ func testRolloverDeployment(f *framework.Framework) {
rsName := "test-rollover-controller"
rsReplicas := int32(1)
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage))
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil))
framework.ExpectNoError(err)
// Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas)

View File

@ -87,6 +87,7 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod {
{
Name: podName,
Image: image,
Args: []string{"serve-hostname"},
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},

View File

@ -52,7 +52,7 @@ var _ = SIGDescribe("ReplicationController", func() {
ginkgo.It("should serve a basic image on each replica with a private image", func() {
// requires private images
framework.SkipUnlessProviderIs("gce", "gke")
privateimage := imageutils.GetConfig(imageutils.ServeHostname)
privateimage := imageutils.GetConfig(imageutils.Agnhost)
privateimage.SetRegistry(imageutils.PrivateRegistry)
TestReplicationControllerServeImageOrFail(f, "private", privateimage.GetE2EImage())
})
@ -85,7 +85,7 @@ var _ = SIGDescribe("ReplicationController", func() {
})
})
func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string) *v1.ReplicationController {
func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string, args []string) *v1.ReplicationController {
zero := int64(0)
return &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
@ -103,6 +103,7 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
{
Name: imageName,
Image: image,
Args: args,
},
},
},
@ -123,7 +124,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
// The source for the Docker container kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC)
framework.ExpectNoError(err)
@ -200,7 +201,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage)
rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil)
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
framework.ExpectNoError(err)
@ -278,7 +279,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
ginkgo.By("When a replication controller with a matching selector is created")
replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
framework.ExpectNoError(err)
@ -307,7 +308,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
name := "pod-release"
ginkgo.By("Given a ReplicationController is created")
replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
framework.ExpectNoError(err)

View File

@ -38,7 +38,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
)
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *appsv1.ReplicaSet {
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string, args []string) *appsv1.ReplicaSet {
zero := int64(0)
return &appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
@ -60,6 +60,7 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa
{
Name: imageName,
Image: image,
Args: args,
},
},
},
@ -96,7 +97,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
ginkgo.It("should serve a basic image on each replica with a private image", func() {
// requires private images
framework.SkipUnlessProviderIs("gce", "gke")
privateimage := imageutils.GetConfig(imageutils.ServeHostname)
privateimage := imageutils.GetConfig(imageutils.Agnhost)
privateimage.SetRegistry(imageutils.PrivateRegistry)
testReplicaSetServeImageOrFail(f, "private", privateimage.GetE2EImage())
})
@ -125,7 +126,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
// The source for the Docker containter kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname
e2elog.Logf("Creating ReplicaSet %s", name)
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
framework.ExpectNoError(err)
@ -202,7 +203,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage)
rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil)
rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs)
framework.ExpectNoError(err)
@ -281,7 +282,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
ginkgo.By("When a replicaset with a matching selector is created")
replicas := int32(1)
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage)
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt)
framework.ExpectNoError(err)

View File

@ -77,7 +77,8 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
Containers: []v1.Container{
{
Name: "proxy",
Image: imageutils.GetE2EImage(imageutils.AuditProxy),
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"audit-proxy"},
Ports: []v1.ContainerPort{
{
ContainerPort: 8080,

View File

@ -41,7 +41,6 @@ import (
)
var mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
var inClusterClientImage = imageutils.GetE2EImage(imageutils.InClusterClient)
var _ = SIGDescribe("ServiceAccounts", func() {
f := framework.NewDefaultFramework("svcaccounts")
@ -436,7 +435,8 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "inclusterclient",
Image: inClusterClientImage,
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"inclusterclient"},
VolumeMounts: []v1.VolumeMount{{
MountPath: "/var/run/secrets/kubernetes.io/serviceaccount",
Name: "kube-api-access-e2e",

View File

@ -17,10 +17,13 @@ limitations under the License.
package common
import (
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -33,9 +36,17 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
Description: Default command and arguments from the docker image entrypoint MUST be used when Pod does not specify the container command
*/
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
"[/ep default arguments]",
})
pod := f.PodClient().Create(entrypointTestPod())
err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Expected pod %q to be running, got error: %v", pod.Name, err)
pollLogs := func() (string, error) {
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}
// The agnhost's image default entrypoint / args are: "/agnhost pause"
// which will print out "Paused".
gomega.Eventually(pollLogs, 3, framework.Poll).Should(gomega.ContainSubstring("Paused"))
})
/*
@ -45,10 +56,10 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
*/
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) [NodeConformance]", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
pod.Spec.Containers[0].Args = []string{"entrypoint-tester", "override", "arguments"}
f.TestContainerOutput("override arguments", pod, 0, []string{
"[/ep override arguments]",
"[/agnhost entrypoint-tester override arguments]",
})
})
@ -61,10 +72,10 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
*/
framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) [NodeConformance]", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Command = []string{"/ep-2"}
pod.Spec.Containers[0].Command = []string{"/agnhost-2", "entrypoint-tester"}
f.TestContainerOutput("override command", pod, 0, []string{
"[/ep-2]",
"[/agnhost-2 entrypoint-tester]",
})
})
@ -75,11 +86,11 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
*/
framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Command = []string{"/ep-2"}
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
pod.Spec.Containers[0].Args = []string{"entrypoint-tester", "override", "arguments"}
f.TestContainerOutput("override all", pod, 0, []string{
"[/ep-2 override arguments]",
"[/agnhost-2 entrypoint-tester override arguments]",
})
})
})
@ -99,7 +110,7 @@ func entrypointTestPod() *v1.Pod {
Containers: []v1.Container{
{
Name: testContainerName,
Image: imageutils.GetE2EImage(imageutils.EntrypointTester),
Image: imageutils.GetE2EImage(imageutils.Agnhost),
},
},
RestartPolicy: v1.RestartPolicyNever,

View File

@ -54,14 +54,11 @@ var CurrentSuite Suite
// TODO(random-liu): Change the image puller pod to use similar mechanism.
var CommonImageWhiteList = sets.NewString(
imageutils.GetE2EImage(imageutils.Agnhost),
imageutils.GetE2EImage(imageutils.AuditProxy),
imageutils.GetE2EImage(imageutils.BusyBox),
imageutils.GetE2EImage(imageutils.EntrypointTester),
imageutils.GetE2EImage(imageutils.IpcUtils),
imageutils.GetE2EImage(imageutils.Mounttest),
imageutils.GetE2EImage(imageutils.MounttestUser),
imageutils.GetE2EImage(imageutils.Nginx),
imageutils.GetE2EImage(imageutils.ServeHostname),
imageutils.GetE2EImage(imageutils.TestWebserver),
imageutils.GetE2EImage(imageutils.VolumeNFSServer),
imageutils.GetE2EImage(imageutils.VolumeGlusterServer),

View File

@ -1236,6 +1236,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string
config := testutils.RCConfig{
Client: c,
Image: ServeHostnameImage,
Command: []string{"/agnhost", "serve-hostname"},
Name: name,
Namespace: ns,
PollInterval: 3 * time.Second,

View File

@ -219,7 +219,7 @@ var (
}
// ServeHostnameImage is a serve hostname image name.
ServeHostnameImage = imageutils.GetE2EImage(imageutils.ServeHostname)
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
)
// GetServicesProxyRequest returns a request for a service proxy.

View File

@ -204,7 +204,7 @@ const (
addonNsName = metav1.NamespaceSystem
)
var serveHostnameImage = imageutils.GetE2EImage(imageutils.ServeHostname)
var serveHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
type stringPair struct {
data, fileName string

View File

@ -640,7 +640,8 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
// Build the containers for the server pod.
containers = append(containers, v1.Container{
Name: fmt.Sprintf("%s-container-%d", podName, port),
Image: imageutils.GetE2EImage(imageutils.Porter),
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"porter"},
Env: []v1.EnvVar{
{
Name: fmt.Sprintf("SERVE_PORT_%d", port),

View File

@ -69,7 +69,7 @@ func networkingIPerfTest(isIPv6 bool) {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "iperf-server",
Image: imageutils.GetE2EImage(imageutils.Iperf),
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{
"/bin/sh",
"-c",
@ -97,7 +97,7 @@ func networkingIPerfTest(isIPv6 bool) {
Containers: []v1.Container{
{
Name: "iperf-client",
Image: imageutils.GetE2EImage(imageutils.Iperf),
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{
"/bin/sh",
"-c",

View File

@ -125,7 +125,8 @@ var _ = SIGDescribe("Proxy", func() {
pods := []*v1.Pod{}
cfg := testutils.RCConfig{
Client: f.ClientSet,
Image: imageutils.GetE2EImage(imageutils.Porter),
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Command: []string{"/agnhost", "porter"},
Name: service.Name,
Namespace: f.Namespace.Name,
Replicas: 1,

View File

@ -61,6 +61,7 @@ var _ = SIGDescribe("Events", func() {
{
Name: "p",
Image: framework.ServeHostnameImage,
Args: []string{"serve-hostname"},
Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},

View File

@ -176,24 +176,25 @@ var _ = SIGDescribe("Load capacity", func() {
quotas bool
}
serveHostnameCmd := []string{"/agnhost", "serve-hostname"}
loadTests := []Load{
// The container will consume 1 cpu and 512mb of memory.
{podsPerNode: 3, image: "jess/stress", command: []string{"stress", "-c", "1", "-m", "2"}, kind: api.Kind("ReplicationController")},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: api.Kind("ReplicationController")},
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: api.Kind("ReplicationController")},
// Tests for other resource types
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment")},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: batch.Kind("Job")},
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: extensions.Kind("Deployment")},
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: batch.Kind("Job")},
// Test scheduling when daemons are preset
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
// Test with secrets
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
// Test with configmaps
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment"), configMapsPerPod: 2},
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: extensions.Kind("Deployment"), configMapsPerPod: 2},
// Special test case which randomizes created resources
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: randomKind},
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: randomKind},
// Test with quotas
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: api.Kind("ReplicationController"), quotas: true},
{podsPerNode: 30, image: framework.ServeHostnameImage, kind: randomKind, quotas: true},
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: api.Kind("ReplicationController"), quotas: true},
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: randomKind, quotas: true},
}
isCanonical := func(test *Load) bool {

View File

@ -56,7 +56,7 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
})
ginkgo.It("should spread the pods of a replication controller across zones", func() {
SpreadRCOrFail(f, int32((2*zoneCount)+1), image)
SpreadRCOrFail(f, int32((2*zoneCount)+1), image, []string{"serve-hostname"})
})
})
@ -177,7 +177,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str
// SpreadRCOrFail Check that the pods comprising a replication
// controller get spread evenly across available zones
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, args []string) {
name := "ubelite-spread-rc-" + string(uuid.NewUUID())
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
@ -199,6 +199,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
{
Name: name,
Image: image,
Args: args,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},

View File

@ -68,6 +68,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
{
Name: daemonSetName,
Image: image,
Args: []string{"serve-hostname"},
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
SecurityContext: &v1.SecurityContext{},
},

View File

@ -44,13 +44,13 @@ const (
// NodeImageWhiteList is a list of images used in node e2e test. These images will be prepulled
// before test running so that the image pulling won't fail in actual test.
var NodeImageWhiteList = sets.NewString(
imageutils.GetE2EImage(imageutils.Agnhost),
"google/cadvisor:latest",
"k8s.gcr.io/stress:v1",
busyboxImage,
"k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff",
imageutils.GetE2EImage(imageutils.Nginx),
imageutils.GetE2EImage(imageutils.Perl),
imageutils.GetE2EImage(imageutils.ServeHostname),
imageutils.GetE2EImage(imageutils.Nonewprivs),
imageutils.GetPauseImageName(),
gpu.GetGPUDevicePluginImage(),

View File

@ -103,18 +103,14 @@ var (
)
const (
// CRDConversionWebhook image
CRDConversionWebhook = iota
// Agnhost image
Agnhost
Agnhost = iota
// Alpine image
Alpine
// APIServer image
APIServer
// AppArmorLoader image
AppArmorLoader
// AuditProxy image
AuditProxy
// AuthenticatedAlpine image
AuthenticatedAlpine
// AuthenticatedWindowsNanoServer image
@ -133,8 +129,6 @@ const (
DebianBase
// EchoServer image
EchoServer
// EntrypointTester image
EntrypointTester
// Etcd image
Etcd
// GBFrontend image
@ -145,16 +139,12 @@ const (
Httpd
// HttpdNew image
HttpdNew
// InClusterClient image
InClusterClient
// Invalid image
Invalid
// InvalidRegistryImage image
InvalidRegistryImage
// IpcUtils image
IpcUtils
// Iperf image
Iperf
// JessieDnsutils image
JessieDnsutils
// Kitten image
@ -178,8 +168,6 @@ const (
Pause
// Perl image
Perl
// Porter image
Porter
// PrometheusDummyExporter image
PrometheusDummyExporter
// PrometheusToSd image
@ -192,8 +180,6 @@ const (
ResourceController
// SdDummyExporter image
SdDummyExporter
// ServeHostname image
ServeHostname
// StartupScript image
StartupScript
// TestWebserver image
@ -212,13 +198,11 @@ const (
func initImageConfigs() map[int]Config {
configs := map[int]Config{}
configs[CRDConversionWebhook] = Config{e2eRegistry, "crd-conversion-webhook", "1.13rev2"}
configs[Agnhost] = Config{e2eRegistry, "agnhost", "2.1"}
configs[Agnhost] = Config{e2eRegistry, "agnhost", "2.2"}
configs[Alpine] = Config{dockerLibraryRegistry, "alpine", "3.7"}
configs[AuthenticatedAlpine] = Config{gcAuthenticatedRegistry, "alpine", "3.7"}
configs[APIServer] = Config{e2eRegistry, "sample-apiserver", "1.10"}
configs[AppArmorLoader] = Config{e2eRegistry, "apparmor-loader", "1.0"}
configs[AuditProxy] = Config{e2eRegistry, "audit-proxy", "1.0"}
configs[BusyBox] = Config{dockerLibraryRegistry, "busybox", "1.29"}
configs[CheckMetadataConcealment] = Config{e2eRegistry, "metadata-concealment", "1.2"}
configs[CudaVectorAdd] = Config{e2eRegistry, "cuda-vector-add", "1.0"}
@ -226,17 +210,14 @@ func initImageConfigs() map[int]Config {
configs[Dnsutils] = Config{e2eRegistry, "dnsutils", "1.1"}
configs[DebianBase] = Config{googleContainerRegistry, "debian-base", "0.4.1"}
configs[EchoServer] = Config{e2eRegistry, "echoserver", "2.2"}
configs[EntrypointTester] = Config{e2eRegistry, "entrypoint-tester", "1.0"}
configs[Etcd] = Config{gcRegistry, "etcd", "3.3.10"}
configs[GBFrontend] = Config{sampleRegistry, "gb-frontend", "v6"}
configs[GBRedisSlave] = Config{sampleRegistry, "gb-redisslave", "v3"}
configs[Httpd] = Config{dockerLibraryRegistry, "httpd", "2.4.38-alpine"}
configs[HttpdNew] = Config{dockerLibraryRegistry, "httpd", "2.4.39-alpine"}
configs[InClusterClient] = Config{e2eRegistry, "inclusterclient", "1.0"}
configs[Invalid] = Config{gcRegistry, "invalid-image", "invalid-tag"}
configs[InvalidRegistryImage] = Config{invalidRegistry, "alpine", "3.1"}
configs[IpcUtils] = Config{e2eRegistry, "ipc-utils", "1.0"}
configs[Iperf] = Config{e2eRegistry, "iperf", "1.0"}
configs[JessieDnsutils] = Config{e2eRegistry, "jessie-dnsutils", "1.0"}
configs[Kitten] = Config{e2eRegistry, "kitten", "1.0"}
configs[Mounttest] = Config{e2eRegistry, "mounttest", "1.0"}
@ -249,14 +230,12 @@ func initImageConfigs() map[int]Config {
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
configs[Pause] = Config{gcRegistry, "pause", "3.1"}
configs[Perl] = Config{dockerLibraryRegistry, "perl", "5.26"}
configs[Porter] = Config{e2eRegistry, "porter", "1.0"}
configs[PrometheusDummyExporter] = Config{e2eRegistry, "prometheus-dummy-exporter", "v0.1.0"}
configs[PrometheusToSd] = Config{e2eRegistry, "prometheus-to-sd", "v0.5.0"}
configs[Redis] = Config{e2eRegistry, "redis", "1.0"}
configs[ResourceConsumer] = Config{e2eRegistry, "resource-consumer", "1.5"}
configs[ResourceController] = Config{e2eRegistry, "resource-consumer-controller", "1.0"}
configs[SdDummyExporter] = Config{gcRegistry, "sd-dummy-exporter", "v0.2.0"}
configs[ServeHostname] = Config{e2eRegistry, "serve-hostname", "1.1"}
configs[StartupScript] = Config{googleContainerRegistry, "startup-script", "v1"}
configs[TestWebserver] = Config{e2eRegistry, "test-webserver", "1.0"}
configs[VolumeNFSServer] = Config{e2eRegistry, "volume/nfs", "1.0"}