mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 07:20:13 +00:00
Merge pull request #27342 from Random-Liu/add-image-pulling-node-e2e
Automatic merge from submit-queue Add image pulling node e2e Fixes #27007. Based on #27309, will rebase after #27309 gets merged. This PR added all tests mentioned in #27007: * Pull an image from invalid registry; * Pull an invalid image from gcr; * Pull an image from gcr; * Pull an image from docker hub; * Pull an image needs auth with/without secrets. For the imagePullSecrets test, I created a new gcloud project "authenticated-image-pulling", and the service account in the code only has "Storage Object Viewer" permission. /cc @pwittrock @vishh []()
This commit is contained in:
commit
364239a294
@ -17,58 +17,33 @@ limitations under the License.
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
// One pod one container
|
||||
type ConformanceContainer struct {
|
||||
Container api.Container
|
||||
Client *client.Client
|
||||
RestartPolicy api.RestartPolicy
|
||||
Volumes []api.Volume
|
||||
NodeName string
|
||||
Namespace string
|
||||
Container api.Container
|
||||
Client *client.Client
|
||||
RestartPolicy api.RestartPolicy
|
||||
Volumes []api.Volume
|
||||
ImagePullSecrets []string
|
||||
NodeName string
|
||||
Namespace string
|
||||
|
||||
podName string
|
||||
}
|
||||
|
||||
type ConformanceContainerEqualMatcher struct {
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func CContainerEqual(expected interface{}) types.GomegaMatcher {
|
||||
return &ConformanceContainerEqualMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
func (matcher *ConformanceContainerEqualMatcher) Match(actual interface{}) (bool, error) {
|
||||
if actual == nil && matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
|
||||
}
|
||||
val := api.Semantic.DeepDerivative(matcher.Expected, actual)
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (matcher *ConformanceContainerEqualMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to equal", matcher.Expected)
|
||||
}
|
||||
|
||||
func (matcher *ConformanceContainerEqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to equal", matcher.Expected)
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Create() error {
|
||||
cc.podName = cc.Container.Name + string(util.NewUUID())
|
||||
imagePullSecrets := []api.LocalObjectReference{}
|
||||
for _, s := range cc.ImagePullSecrets {
|
||||
imagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: s})
|
||||
}
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: cc.podName,
|
||||
@ -80,7 +55,8 @@ func (cc *ConformanceContainer) Create() error {
|
||||
Containers: []api.Container{
|
||||
cc.Container,
|
||||
},
|
||||
Volumes: cc.Volumes,
|
||||
Volumes: cc.Volumes,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
},
|
||||
}
|
||||
|
||||
@ -88,26 +64,8 @@ func (cc *ConformanceContainer) Create() error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Same with 'delete'
|
||||
func (cc *ConformanceContainer) Stop() error {
|
||||
return cc.Client.Pods(cc.Namespace).Delete(cc.podName, &api.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Delete() error {
|
||||
return cc.Client.Pods(cc.Namespace).Delete(cc.podName, &api.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) Get() (ConformanceContainer, error) {
|
||||
pod, err := cc.Client.Pods(cc.Namespace).Get(cc.podName)
|
||||
if err != nil {
|
||||
return ConformanceContainer{}, err
|
||||
}
|
||||
|
||||
containers := pod.Spec.Containers
|
||||
if containers == nil || len(containers) != 1 {
|
||||
return ConformanceContainer{}, errors.New("Failed to get container")
|
||||
}
|
||||
return ConformanceContainer{containers[0], cc.Client, pod.Spec.RestartPolicy, pod.Spec.Volumes, pod.Spec.NodeName, cc.Namespace, cc.podName}, nil
|
||||
return cc.Client.Pods(cc.Namespace).Delete(cc.podName, api.NewDeleteOptions(0))
|
||||
}
|
||||
|
||||
func (cc *ConformanceContainer) IsReady() (bool, error) {
|
||||
@ -143,7 +101,7 @@ func (cc *ConformanceContainer) Present() (bool, error) {
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
|
@ -38,8 +38,10 @@ const (
|
||||
pauseImage
|
||||
|
||||
// Images just used for explicitly testing pulling of images
|
||||
pullTestExecHealthz
|
||||
pullTestAlpine
|
||||
pullTestAlpineWithBash
|
||||
pullTestAuthenticatedAlpine
|
||||
pullTestExecHealthz
|
||||
)
|
||||
|
||||
var ImageRegistry = map[int]string{
|
||||
@ -51,9 +53,11 @@ var ImageRegistry = map[int]string{
|
||||
}
|
||||
|
||||
// These are used by tests that explicitly test the ability to pull images
|
||||
var NoPullImagRegistry = map[int]string{
|
||||
pullTestAlpineWithBash: "gcr.io/google_containers/alpine-with-bash:1.0",
|
||||
pullTestExecHealthz: "gcr.io/google_containers/exechealthz:1.0",
|
||||
var NoPullImageRegistry = map[int]string{
|
||||
pullTestExecHealthz: "gcr.io/google_containers/exechealthz:1.0",
|
||||
pullTestAlpine: "alpine:3.1",
|
||||
pullTestAlpineWithBash: "gcr.io/google_containers/alpine-with-bash:1.0",
|
||||
pullTestAuthenticatedAlpine: "gcr.io/authenticated-image-pulling/alpine:3.1",
|
||||
}
|
||||
|
||||
// Pre-fetch all images tests depend on so that we don't fail in an actual test
|
||||
|
@ -21,23 +21,16 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Kubelet Container Manager", func() {
|
||||
var cl *client.Client
|
||||
BeforeEach(func() {
|
||||
// Setup the apiserver client
|
||||
cl = client.NewOrDie(&restclient.Config{Host: *apiServerAddress})
|
||||
})
|
||||
var _ = framework.KubeDescribe("Kubelet Container Manager", func() {
|
||||
f := NewDefaultFramework("kubelet-container-manager")
|
||||
Describe("oom score adjusting", func() {
|
||||
namespace := "oom-adj"
|
||||
Context("when scheduling a busybox command that always fails in a pod", func() {
|
||||
var podName string
|
||||
|
||||
@ -46,7 +39,7 @@ var _ = Describe("Kubelet Container Manager", func() {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
// Force the Pod to schedule to the node without a scheduler running
|
||||
@ -63,13 +56,13 @@ var _ = Describe("Kubelet Container Manager", func() {
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cl.Pods(namespace).Create(pod)
|
||||
_, err := f.Client.Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(BeNil(), fmt.Sprintf("Error creating Pod %v", err))
|
||||
})
|
||||
|
||||
It("it should have an error terminated reason", func() {
|
||||
It("should have an error terminated reason", func() {
|
||||
Eventually(func() error {
|
||||
podData, err := cl.Pods(namespace).Get(podName)
|
||||
podData, err := f.Client.Pods(f.Namespace.Name).Get(podName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -87,22 +80,10 @@ var _ = Describe("Kubelet Container Manager", func() {
|
||||
}, time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
|
||||
It("it should be possible to delete", func() {
|
||||
err := cl.Pods(namespace).Delete(podName, &api.DeleteOptions{})
|
||||
It("should be possible to delete", func() {
|
||||
err := f.Client.Pods(f.Namespace.Name).Delete(podName, &api.DeleteOptions{})
|
||||
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cl.Pods(namespace).Delete(podName, &api.DeleteOptions{})
|
||||
Eventually(func() bool {
|
||||
_, err := cl.Pods(namespace).Get(podName)
|
||||
if err != nil && apierrs.IsNotFound(err) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}, time.Minute, time.Second*4).Should(BeTrue())
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -19,9 +19,6 @@ package e2e_node
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@ -34,20 +31,13 @@ const (
|
||||
)
|
||||
|
||||
var _ = Describe("Image Container Conformance Test", func() {
|
||||
var cl *client.Client
|
||||
|
||||
BeforeEach(func() {
|
||||
// Setup the apiserver client
|
||||
cl = client.NewOrDie(&restclient.Config{Host: *apiServerAddress})
|
||||
})
|
||||
|
||||
Describe("[FLAKY] image conformance blackbox test", func() {
|
||||
Context("when testing images that exist", func() {
|
||||
var conformImages []ConformanceImage
|
||||
BeforeEach(func() {
|
||||
existImageTags := []string{
|
||||
NoPullImagRegistry[pullTestExecHealthz],
|
||||
NoPullImagRegistry[pullTestAlpineWithBash],
|
||||
NoPullImageRegistry[pullTestExecHealthz],
|
||||
NoPullImageRegistry[pullTestAlpineWithBash],
|
||||
}
|
||||
for _, existImageTag := range existImageTags {
|
||||
conformImage, _ := NewConformanceImage("docker", existImageTag)
|
||||
|
@ -38,9 +38,8 @@ import (
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
f := NewDefaultFramework("kubelet-test")
|
||||
Context("when scheduling a busybox command in a pod", func() {
|
||||
// Setup the framework
|
||||
f := NewDefaultFramework("pod-scheduling")
|
||||
podName := "busybox-scheduling-" + string(util.NewUUID())
|
||||
It("it should print the output to logs", func() {
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
@ -81,7 +80,6 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
})
|
||||
|
||||
Context("when scheduling a read only busybox container", func() {
|
||||
f := NewDefaultFramework("pod-scheduling")
|
||||
podName := "busybox-readonly-fs" + string(util.NewUUID())
|
||||
It("it should not write to root filesystem", func() {
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
@ -123,8 +121,6 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
})
|
||||
})
|
||||
Describe("metrics api", func() {
|
||||
// Setup the framework
|
||||
f := NewDefaultFramework("kubelet-metrics-api")
|
||||
Context("when querying /stats/summary", func() {
|
||||
It("it should report resource usage through the stats api", func() {
|
||||
podNamePrefix := "stats-busybox-" + string(util.NewUUID())
|
||||
|
@ -24,25 +24,21 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("MirrorPod", func() {
|
||||
var cl *client.Client
|
||||
BeforeEach(func() {
|
||||
// Setup the apiserver client
|
||||
cl = client.NewOrDie(&restclient.Config{Host: *apiServerAddress})
|
||||
})
|
||||
ns := "mirror-pod"
|
||||
var _ = framework.KubeDescribe("MirrorPod", func() {
|
||||
f := NewDefaultFramework("mirror-pod")
|
||||
Context("when create a mirror pod ", func() {
|
||||
var staticPodName, mirrorPodName string
|
||||
BeforeEach(func() {
|
||||
ns := f.Namespace.Name
|
||||
staticPodName = "static-pod-" + string(util.NewUUID())
|
||||
mirrorPodName = staticPodName + "-" + e2es.nodeName
|
||||
|
||||
@ -52,12 +48,13 @@ var _ = Describe("MirrorPod", func() {
|
||||
|
||||
By("wait for the mirror pod to be running")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRunning(cl, mirrorPodName, ns)
|
||||
return checkMirrorPodRunning(f.Client, mirrorPodName, ns)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
It("should be updated when static pod updated", func() {
|
||||
ns := f.Namespace.Name
|
||||
By("get mirror pod uid")
|
||||
pod, err := cl.Pods(ns).Get(mirrorPodName)
|
||||
pod, err := f.Client.Pods(ns).Get(mirrorPodName)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
uid := pod.UID
|
||||
|
||||
@ -68,53 +65,56 @@ var _ = Describe("MirrorPod", func() {
|
||||
|
||||
By("wait for the mirror pod to be updated")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRecreatedAndRunnig(cl, mirrorPodName, ns, uid)
|
||||
return checkMirrorPodRecreatedAndRunnig(f.Client, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
|
||||
By("check the mirror pod container image is updated")
|
||||
pod, err = cl.Pods(ns).Get(mirrorPodName)
|
||||
pod, err = f.Client.Pods(ns).Get(mirrorPodName)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(len(pod.Spec.Containers)).Should(Equal(1))
|
||||
Expect(pod.Spec.Containers[0].Image).Should(Equal(image))
|
||||
})
|
||||
It("should be recreated when mirror pod gracefully deleted", func() {
|
||||
ns := f.Namespace.Name
|
||||
By("get mirror pod uid")
|
||||
pod, err := cl.Pods(ns).Get(mirrorPodName)
|
||||
pod, err := f.Client.Pods(ns).Get(mirrorPodName)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
uid := pod.UID
|
||||
|
||||
By("delete the mirror pod with grace period 30s")
|
||||
err = cl.Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(30))
|
||||
err = f.Client.Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(30))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be recreated")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRecreatedAndRunnig(cl, mirrorPodName, ns, uid)
|
||||
return checkMirrorPodRecreatedAndRunnig(f.Client, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
It("should be recreated when mirror pod forcibly deleted", func() {
|
||||
ns := f.Namespace.Name
|
||||
By("get mirror pod uid")
|
||||
pod, err := cl.Pods(ns).Get(mirrorPodName)
|
||||
pod, err := f.Client.Pods(ns).Get(mirrorPodName)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
uid := pod.UID
|
||||
|
||||
By("delete the mirror pod with grace period 0s")
|
||||
err = cl.Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(0))
|
||||
err = f.Client.Pods(ns).Delete(mirrorPodName, api.NewDeleteOptions(0))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to be recreated")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodRecreatedAndRunnig(cl, mirrorPodName, ns, uid)
|
||||
return checkMirrorPodRecreatedAndRunnig(f.Client, mirrorPodName, ns, uid)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
AfterEach(func() {
|
||||
ns := f.Namespace.Name
|
||||
By("delete the static pod")
|
||||
err := deleteStaticPod(e2es.kubeletStaticPodDir, staticPodName, ns)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
By("wait for the mirror pod to disappear")
|
||||
Eventually(func() error {
|
||||
return checkMirrorPodDisappear(cl, mirrorPodName, ns)
|
||||
return checkMirrorPodDisappear(f.Client, mirrorPodName, ns)
|
||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
@ -24,40 +24,23 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
consistentCheckTimeout = time.Second * 10
|
||||
consistentCheckTimeout = time.Second * 5
|
||||
retryTimeout = time.Minute * 5
|
||||
pollInterval = time.Second * 5
|
||||
pollInterval = time.Second * 1
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
Name string
|
||||
RestartPolicy api.RestartPolicy
|
||||
Phase api.PodPhase
|
||||
State ContainerState
|
||||
RestartCountOper string
|
||||
RestartCount int32
|
||||
Ready bool
|
||||
}
|
||||
|
||||
var _ = Describe("Container Runtime Conformance Test", func() {
|
||||
var cl *client.Client
|
||||
|
||||
BeforeEach(func() {
|
||||
// Setup the apiserver client
|
||||
cl = client.NewOrDie(&restclient.Config{Host: *apiServerAddress})
|
||||
})
|
||||
var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
|
||||
f := NewDefaultFramework("runtime-conformance")
|
||||
|
||||
Describe("container runtime conformance blackbox test", func() {
|
||||
namespace := "runtime-conformance"
|
||||
|
||||
Context("when starting a container that exits", func() {
|
||||
It("it should run with the expected status [Conformance]", func() {
|
||||
restartCountVolumeName := "restart-count"
|
||||
@ -81,10 +64,17 @@ var _ = Describe("Container Runtime Conformance Test", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
testCases := []testCase{
|
||||
{"terminate-cmd-rpa", api.RestartPolicyAlways, api.PodRunning, ContainerStateRunning, "==", 2, true},
|
||||
{"terminate-cmd-rpof", api.RestartPolicyOnFailure, api.PodSucceeded, ContainerStateTerminated, "==", 1, false},
|
||||
{"terminate-cmd-rpn", api.RestartPolicyNever, api.PodFailed, ContainerStateTerminated, "==", 0, false},
|
||||
testCases := []struct {
|
||||
Name string
|
||||
RestartPolicy api.RestartPolicy
|
||||
Phase api.PodPhase
|
||||
State ContainerState
|
||||
RestartCount int32
|
||||
Ready bool
|
||||
}{
|
||||
{"terminate-cmd-rpa", api.RestartPolicyAlways, api.PodRunning, ContainerStateRunning, 2, true},
|
||||
{"terminate-cmd-rpof", api.RestartPolicyOnFailure, api.PodSucceeded, ContainerStateTerminated, 1, false},
|
||||
{"terminate-cmd-rpn", api.RestartPolicyNever, api.PodFailed, ContainerStateTerminated, 0, false},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
tmpFile, err := ioutil.TempFile("", "restartCount")
|
||||
@ -108,11 +98,11 @@ while true; do sleep 1; done
|
||||
testContainer.Command = []string{"sh", "-c", tmpCmd}
|
||||
terminateContainer := ConformanceContainer{
|
||||
Container: testContainer,
|
||||
Client: cl,
|
||||
Client: f.Client,
|
||||
RestartPolicy: testCase.RestartPolicy,
|
||||
Volumes: testVolumes,
|
||||
NodeName: *nodeName,
|
||||
Namespace: namespace,
|
||||
Namespace: f.Namespace.Name,
|
||||
}
|
||||
Expect(terminateContainer.Create()).To(Succeed())
|
||||
defer terminateContainer.Delete()
|
||||
@ -121,7 +111,7 @@ while true; do sleep 1; done
|
||||
Eventually(func() (int32, error) {
|
||||
status, err := terminateContainer.GetStatus()
|
||||
return status.RestartCount, err
|
||||
}, retryTimeout, pollInterval).Should(BeNumerically(testCase.RestartCountOper, testCase.RestartCount))
|
||||
}, retryTimeout, pollInterval).Should(Equal(testCase.RestartCount))
|
||||
|
||||
By("it should get the expected 'Phase'")
|
||||
Eventually(terminateContainer.GetPhase, retryTimeout, pollInterval).Should(Equal(testCase.Phase))
|
||||
@ -142,43 +132,110 @@ while true; do sleep 1; done
|
||||
})
|
||||
})
|
||||
|
||||
Context("when running a container with invalid image", func() {
|
||||
It("it should run with the expected status [Conformance]", func() {
|
||||
testContainer := api.Container{
|
||||
Image: "foo.com/foo/foo",
|
||||
Command: []string{"false"},
|
||||
}
|
||||
testCase := testCase{"invalid-image-rpa", api.RestartPolicyAlways, api.PodPending, ContainerStateWaiting, "==", 0, false}
|
||||
testContainer.Name = testCase.Name
|
||||
invalidImageContainer := ConformanceContainer{
|
||||
Container: testContainer,
|
||||
Client: cl,
|
||||
RestartPolicy: testCase.RestartPolicy,
|
||||
NodeName: *nodeName,
|
||||
Namespace: namespace,
|
||||
}
|
||||
Expect(invalidImageContainer.Create()).To(Succeed())
|
||||
defer invalidImageContainer.Delete()
|
||||
Context("when running a container with a new image", func() {
|
||||
// The service account only has pull permission
|
||||
auth := `
|
||||
{
|
||||
"auths": {
|
||||
"https://gcr.io": {
|
||||
"auth": "X2pzb25fa2V5OnsKICAidHlwZSI6ICJzZXJ2aWNlX2FjY291bnQiLAogICJwcm9qZWN0X2lkIjogImF1dGhlbnRpY2F0ZWQtaW1hZ2UtcHVsbGluZyIsCiAgInByaXZhdGVfa2V5X2lkIjogImI5ZjJhNjY0YWE5YjIwNDg0Y2MxNTg2MDYzZmVmZGExOTIyNGFjM2IiLAogICJwcml2YXRlX2tleSI6ICItLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzdTSG5LVEVFaVlMamZcbkpmQVBHbUozd3JCY2VJNTBKS0xxS21GWE5RL3REWGJRK2g5YVl4aldJTDhEeDBKZTc0bVovS01uV2dYRjVLWlNcbm9BNktuSU85Yi9SY1NlV2VpSXRSekkzL1lYVitPNkNjcmpKSXl4anFWam5mVzJpM3NhMzd0OUE5VEZkbGZycm5cbjR6UkpiOWl4eU1YNGJMdHFGR3ZCMDNOSWl0QTNzVlo1ODhrb1FBZmgzSmhhQmVnTWorWjRSYko0aGVpQlFUMDNcbnZVbzViRWFQZVQ5RE16bHdzZWFQV2dydDZOME9VRGNBRTl4bGNJek11MjUzUG4vSzgySFpydEx4akd2UkhNVXhcbng0ZjhwSnhmQ3h4QlN3Z1NORit3OWpkbXR2b0wwRmE3ZGducFJlODZWRDY2ejNZenJqNHlLRXRqc2hLZHl5VWRcbkl5cVhoN1JSQWdNQkFBRUNnZ0VBT3pzZHdaeENVVlFUeEFka2wvSTVTRFVidi9NazRwaWZxYjJEa2FnbmhFcG9cbjFJajJsNGlWMTByOS9uenJnY2p5VlBBd3pZWk1JeDFBZVF0RDdoUzRHWmFweXZKWUc3NkZpWFpQUm9DVlB6b3VcbmZyOGRDaWFwbDV0enJDOWx2QXNHd29DTTdJWVRjZmNWdDdjRTEyRDNRS3NGNlo3QjJ6ZmdLS251WVBmK0NFNlRcbmNNMHkwaCtYRS9kMERvSERoVy96YU1yWEhqOFRvd2V1eXRrYmJzNGYvOUZqOVBuU2dET1lQd2xhbFZUcitGUWFcbkpSd1ZqVmxYcEZBUW14M0Jyd25rWnQzQ2lXV2lGM2QrSGk5RXRVYnRWclcxYjZnK1JRT0licWFtcis4YlJuZFhcbjZWZ3FCQWtKWjhSVnlkeFVQMGQxMUdqdU9QRHhCbkhCbmM0UW9rSXJFUUtCZ1FEMUNlaWN1ZGhXdGc0K2dTeGJcbnplanh0VjFONDFtZHVjQnpvMmp5b1dHbzNQVDh3ckJPL3lRRTM0cU9WSi9pZCs4SThoWjRvSWh1K0pBMDBzNmdcblRuSXErdi9kL1RFalk4MW5rWmlDa21SUFdiWHhhWXR4UjIxS1BYckxOTlFKS2ttOHRkeVh5UHFsOE1veUdmQ1dcbjJ2aVBKS05iNkhabnY5Q3lqZEo5ZzJMRG5RS0JnUUREcVN2eURtaGViOTIzSW96NGxlZ01SK205Z2xYVWdTS2dcbkVzZlllbVJmbU5XQitDN3ZhSXlVUm1ZNU55TXhmQlZXc3dXRldLYXhjK0krYnFzZmx6elZZdFpwMThNR2pzTURcbmZlZWZBWDZCWk1zVXQ3Qmw3WjlWSjg1bnRFZHFBQ0xwWitaLzN0SVJWdWdDV1pRMWhrbmxHa0dUMDI0SkVFKytcbk55SDFnM2QzUlFLQmdRQ1J2MXdKWkkwbVBsRklva0tGTkh1YTBUcDNLb1JTU1hzTURTVk9NK2xIckcxWHJtRjZcbkMwNGNTKzQ0N0dMUkxHOFVUaEpKbTRxckh0Ti9aK2dZOTYvMm1xYjRIakpORDM3TVhKQnZFYTN5ZUxTOHEvK1JcbjJGOU1LamRRaU5LWnhQcG84VzhOSlREWTVOa1BaZGh4a2pzSHdVNGRTNjZwMVRESUU0MGd0TFpaRFFLQmdGaldcbktyblFpTnEzOS9iNm5QOFJNVGJDUUFKbmR3anhTUU5kQTVmcW1rQTlhRk9HbCtqamsxQ1BWa0tNSWxLSmdEYkpcbk9heDl2OUc2Ui9NSTFIR1hmV3QxWU56VnRocjRIdHNyQTB0U3BsbWhwZ05XRTZWejZuQURqdGZQSnMyZUdqdlhcbmpQUnArdjhjY21MK3dTZzhQTGprM3ZsN2VlNXJsWWxNQndNdUdjUHhBb0dBZWRueGJXMVJMbVZubEFpSEx1L0xcbmxtZkF3RFdtRWlJMFVnK1BMbm9Pdk81dFE1ZDRXMS94RU44bFA0cWtzcGtmZk1Rbk5oNFNZR0VlQlQzMlpxQ1RcbkpSZ2YwWGpveXZ2dXA5eFhqTWtYcnBZL3ljMXpmcVRaQzBNTzkvMVVjMWJSR2RaMmR5M2xSNU5XYXA3T1h5Zk9cblBQcE5Gb1BUWGd2M3FDcW5sTEhyR3pNPVxuLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLVxuIiwKICAiY2xpZW50X2VtYWlsIjogImltYWdlLXB1bGxpbmdAYXV0aGVudGljYXRlZC1pbWFnZS1wdWxsaW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjExMzc5NzkxNDUzMDA3MzI3ODcxMiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L2ltYWdlLXB1bGxpbmclNDBhdXRoZW50aWNhdGVkLWltYWdlLXB1bGxpbmcuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iCn0=",
|
||||
"email": "image-pulling@authenticated-image-pulling.iam.gserviceaccount.com"
|
||||
}
|
||||
}
|
||||
}`
|
||||
secret := &api.Secret{
|
||||
Data: map[string][]byte{api.DockerConfigJsonKey: []byte(auth)},
|
||||
Type: api.SecretTypeDockerConfigJson,
|
||||
}
|
||||
for _, testCase := range []struct {
|
||||
description string
|
||||
image string
|
||||
secret bool
|
||||
phase api.PodPhase
|
||||
state ContainerState
|
||||
}{
|
||||
{
|
||||
description: "should not be able to pull image from invalid registry",
|
||||
image: "invalid.com/invalid/alpine:3.1",
|
||||
phase: api.PodPending,
|
||||
state: ContainerStateWaiting,
|
||||
},
|
||||
{
|
||||
description: "should not be able to pull non-existing image from gcr.io",
|
||||
image: "gcr.io/google_containers/invalid-image:invalid-tag",
|
||||
phase: api.PodPending,
|
||||
state: ContainerStateWaiting,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull image from gcr.io",
|
||||
image: NoPullImageRegistry[pullTestAlpineWithBash],
|
||||
phase: api.PodRunning,
|
||||
state: ContainerStateRunning,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull image from docker hub",
|
||||
image: NoPullImageRegistry[pullTestAlpine],
|
||||
phase: api.PodRunning,
|
||||
state: ContainerStateRunning,
|
||||
},
|
||||
{
|
||||
description: "should not be able to pull from private registry without secret",
|
||||
image: NoPullImageRegistry[pullTestAuthenticatedAlpine],
|
||||
phase: api.PodPending,
|
||||
state: ContainerStateWaiting,
|
||||
},
|
||||
{
|
||||
description: "should be able to pull from private registry with secret",
|
||||
image: NoPullImageRegistry[pullTestAuthenticatedAlpine],
|
||||
secret: true,
|
||||
phase: api.PodRunning,
|
||||
state: ContainerStateRunning,
|
||||
},
|
||||
} {
|
||||
testCase := testCase
|
||||
It(testCase.description, func() {
|
||||
name := "image-pull-test"
|
||||
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
|
||||
container := ConformanceContainer{
|
||||
Container: api.Container{
|
||||
Name: name,
|
||||
Image: testCase.image,
|
||||
Command: command,
|
||||
// PullAlways makes sure that the image will always be pulled even if it is present before the test.
|
||||
ImagePullPolicy: api.PullAlways,
|
||||
},
|
||||
Client: f.Client,
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
NodeName: *nodeName,
|
||||
Namespace: f.Namespace.Name,
|
||||
}
|
||||
if testCase.secret {
|
||||
secret.Name = "image-pull-secret-" + string(util.NewUUID())
|
||||
By("create image pull secret")
|
||||
_, err := f.Client.Secrets(f.Namespace.Name).Create(secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer f.Client.Secrets(f.Namespace.Name).Delete(secret.Name)
|
||||
container.ImagePullSecrets = []string{secret.Name}
|
||||
}
|
||||
|
||||
Eventually(invalidImageContainer.GetPhase, retryTimeout, pollInterval).Should(Equal(testCase.Phase))
|
||||
Consistently(invalidImageContainer.GetPhase, consistentCheckTimeout, pollInterval).Should(Equal(testCase.Phase))
|
||||
By("create the container")
|
||||
Expect(container.Create()).To(Succeed())
|
||||
defer container.Delete()
|
||||
|
||||
status, err := invalidImageContainer.GetStatus()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("check the pod phase")
|
||||
Eventually(container.GetPhase, retryTimeout, pollInterval).Should(Equal(testCase.phase))
|
||||
Consistently(container.GetPhase, consistentCheckTimeout, pollInterval).Should(Equal(testCase.phase))
|
||||
|
||||
By("it should get the expected 'RestartCount'")
|
||||
Expect(status.RestartCount).To(BeNumerically(testCase.RestartCountOper, testCase.RestartCount))
|
||||
By("check the container state")
|
||||
status, err := container.GetStatus()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(GetContainerState(status.State)).To(Equal(testCase.state))
|
||||
|
||||
By("it should get the expected 'Ready' status")
|
||||
Expect(status.Ready).To(Equal(testCase.Ready))
|
||||
|
||||
By("it should get the expected 'State'")
|
||||
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
|
||||
|
||||
By("it should be possible to delete [Conformance]")
|
||||
Expect(invalidImageContainer.Delete()).To(Succeed())
|
||||
Eventually(invalidImageContainer.Present, retryTimeout, pollInterval).Should(BeFalse())
|
||||
})
|
||||
By("it should be possible to delete")
|
||||
Expect(container.Delete()).To(Succeed())
|
||||
Eventually(container.Present, retryTimeout, pollInterval).Should(BeFalse())
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user