mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #69583 from audreylim/annotate-apimachinery-e2e-test-errors
Annotate errors in apimachinery e2e tests
This commit is contained in:
commit
808557e468
@ -80,7 +80,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||||||
for {
|
for {
|
||||||
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
|
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
|
||||||
list, err := client.List(opts)
|
list, err := client.List(opts)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||||
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
|
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
|
||||||
|
|
||||||
@ -101,8 +101,9 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
By("retrieving those results all at once")
|
By("retrieving those results all at once")
|
||||||
list, err := client.List(metav1.ListOptions{Limit: numberOfTotalResources + 1})
|
opts := metav1.ListOptions{Limit: numberOfTotalResources + 1}
|
||||||
Expect(err).ToNot(HaveOccurred())
|
list, err := client.List(opts)
|
||||||
|
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||||
Expect(list.Items).To(HaveLen(numberOfTotalResources))
|
Expect(list.Items).To(HaveLen(numberOfTotalResources))
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -116,7 +117,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||||||
opts := metav1.ListOptions{}
|
opts := metav1.ListOptions{}
|
||||||
opts.Limit = oneTenth
|
opts.Limit = oneTenth
|
||||||
list, err := client.List(opts)
|
list, err := client.List(opts)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||||
firstToken := list.Continue
|
firstToken := list.Continue
|
||||||
firstRV := list.ResourceVersion
|
firstRV := list.ResourceVersion
|
||||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
||||||
@ -149,7 +150,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||||||
By("retrieving the second page again with the token received with the error message")
|
By("retrieving the second page again with the token received with the error message")
|
||||||
opts.Continue = inconsistentToken
|
opts.Continue = inconsistentToken
|
||||||
list, err = client.List(opts)
|
list, err = client.List(opts)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
|
||||||
Expect(list.ResourceVersion).ToNot(Equal(firstRV))
|
Expect(list.ResourceVersion).ToNot(Equal(firstRV))
|
||||||
Expect(len(list.Items)).To(BeNumerically("==", opts.Limit))
|
Expect(len(list.Items)).To(BeNumerically("==", opts.Limit))
|
||||||
found := oneTenth
|
found := oneTenth
|
||||||
@ -163,7 +164,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||||||
lastRV := list.ResourceVersion
|
lastRV := list.ResourceVersion
|
||||||
for {
|
for {
|
||||||
list, err := client.List(opts)
|
list, err := client.List(opts)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||||
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
|
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
|
||||||
Expect(list.ResourceVersion).To(Equal(lastRV))
|
Expect(list.ResourceVersion).To(Equal(lastRV))
|
||||||
|
@ -80,35 +80,35 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
|||||||
noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
|
noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
|
||||||
|
|
||||||
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA)
|
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to watch custom resource: %s", watchCRNameA)
|
||||||
|
|
||||||
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
|
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to watch custom resource: %s", watchCRNameB)
|
||||||
|
|
||||||
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
|
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
|
||||||
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
|
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
|
||||||
|
|
||||||
By("Creating first CR ")
|
By("Creating first CR ")
|
||||||
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
|
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to instantiate custom resource: %+v", testCrA)
|
||||||
expectEvent(watchA, watch.Added, testCrA)
|
expectEvent(watchA, watch.Added, testCrA)
|
||||||
expectNoEvent(watchB, watch.Added, testCrA)
|
expectNoEvent(watchB, watch.Added, testCrA)
|
||||||
|
|
||||||
By("Creating second CR")
|
By("Creating second CR")
|
||||||
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition)
|
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to instantiate custom resource: %+v", testCrB)
|
||||||
expectEvent(watchB, watch.Added, testCrB)
|
expectEvent(watchB, watch.Added, testCrB)
|
||||||
expectNoEvent(watchA, watch.Added, testCrB)
|
expectNoEvent(watchA, watch.Added, testCrB)
|
||||||
|
|
||||||
By("Deleting first CR")
|
By("Deleting first CR")
|
||||||
err = deleteCustomResource(noxuResourceClient, watchCRNameA)
|
err = deleteCustomResource(noxuResourceClient, watchCRNameA)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete custom resource: %s", watchCRNameA)
|
||||||
expectEvent(watchA, watch.Deleted, nil)
|
expectEvent(watchA, watch.Deleted, nil)
|
||||||
expectNoEvent(watchB, watch.Deleted, nil)
|
expectNoEvent(watchB, watch.Deleted, nil)
|
||||||
|
|
||||||
By("Deleting second CR")
|
By("Deleting second CR")
|
||||||
err = deleteCustomResource(noxuResourceClient, watchCRNameB)
|
err = deleteCustomResource(noxuResourceClient, watchCRNameB)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete custom resource: %s", watchCRNameB)
|
||||||
expectEvent(watchB, watch.Deleted, nil)
|
expectEvent(watchB, watch.Deleted, nil)
|
||||||
expectNoEvent(watchA, watch.Deleted, nil)
|
expectNoEvent(watchA, watch.Deleted, nil)
|
||||||
})
|
})
|
||||||
|
@ -94,8 +94,9 @@ func doEtcdFailure(failCommand, fixCommand string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func masterExec(cmd string) {
|
func masterExec(cmd string) {
|
||||||
result, err := framework.SSH(cmd, framework.GetMasterHost()+":22", framework.TestContext.Provider)
|
host := framework.GetMasterHost() + ":22"
|
||||||
Expect(err).NotTo(HaveOccurred())
|
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||||
if result.Code != 0 {
|
if result.Code != 0 {
|
||||||
framework.LogSSHResult(result)
|
framework.LogSSHResult(result)
|
||||||
framework.Failf("master exec command returned non-zero")
|
framework.Failf("master exec command returned non-zero")
|
||||||
@ -120,7 +121,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
|||||||
}
|
}
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||||
}
|
}
|
||||||
framework.Logf("apiserver has recovered")
|
framework.Logf("apiserver has recovered")
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -130,7 +131,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||||
pods, err := podClient.List(options)
|
pods, err := podClient.List(options)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
|
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -737,12 +737,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
}
|
}
|
||||||
By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
|
By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
|
||||||
pods, err := podClient.List(metav1.ListOptions{})
|
pods, err := podClient.List(metav1.ListOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", f.Namespace.Name)
|
||||||
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
|
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
|
||||||
for i := 0; i < halfReplicas; i++ {
|
for i := 0; i < halfReplicas; i++ {
|
||||||
pod := pods.Items[i]
|
pod := pods.Items[i]
|
||||||
_, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch))
|
_, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
|
||||||
}
|
}
|
||||||
|
|
||||||
By(fmt.Sprintf("delete the rc %s", rc1Name))
|
By(fmt.Sprintf("delete the rc %s", rc1Name))
|
||||||
@ -816,33 +816,39 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
framework.ConformanceIt("should not be blocked by dependency circle", func() {
|
framework.ConformanceIt("should not be blocked by dependency circle", func() {
|
||||||
clientSet := f.ClientSet
|
clientSet := f.ClientSet
|
||||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||||
pod1 := newGCPod("pod1")
|
pod1Name := "pod1"
|
||||||
|
pod1 := newGCPod(pod1Name)
|
||||||
pod1, err := podClient.Create(pod1)
|
pod1, err := podClient.Create(pod1)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
|
||||||
pod2 := newGCPod("pod2")
|
pod2Name := "pod2"
|
||||||
|
pod2 := newGCPod(pod2Name)
|
||||||
pod2, err = podClient.Create(pod2)
|
pod2, err = podClient.Create(pod2)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
|
||||||
pod3 := newGCPod("pod3")
|
pod3Name := "pod3"
|
||||||
|
pod3 := newGCPod(pod3Name)
|
||||||
pod3, err = podClient.Create(pod3)
|
pod3, err = podClient.Create(pod3)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
|
||||||
// create circular dependency
|
// create circular dependency
|
||||||
addRefPatch := func(name string, uid types.UID) []byte {
|
addRefPatch := func(name string, uid types.UID) []byte {
|
||||||
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
|
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
|
||||||
}
|
}
|
||||||
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, addRefPatch(pod3.Name, pod3.UID))
|
patch1 := addRefPatch(pod3.Name, pod3.UID)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||||
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||||
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, addRefPatch(pod1.Name, pod1.UID))
|
patch2 := addRefPatch(pod1.Name, pod1.UID)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||||
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||||
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, addRefPatch(pod2.Name, pod2.UID))
|
patch3 := addRefPatch(pod2.Name, pod2.UID)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||||
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||||
// delete one pod, should result in the deletion of all pods
|
// delete one pod, should result in the deletion of all pods
|
||||||
deleteOptions := getForegroundOptions()
|
deleteOptions := getForegroundOptions()
|
||||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
|
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
|
||||||
err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions)
|
err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
|
||||||
var pods *v1.PodList
|
var pods *v1.PodList
|
||||||
var err2 error
|
var err2 error
|
||||||
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
|
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
|
||||||
@ -1073,7 +1079,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
By("Create the cronjob")
|
By("Create the cronjob")
|
||||||
cronJob := newCronJob("simple", "*/1 * * * ?")
|
cronJob := newCronJob("simple", "*/1 * * * ?")
|
||||||
cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob)
|
cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
|
||||||
|
|
||||||
By("Wait for the CronJob to create new Job")
|
By("Wait for the CronJob to create new Job")
|
||||||
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
|
||||||
|
@ -52,8 +52,9 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
|
|
||||||
ch := make(chan struct{})
|
ch := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
_, err := c.CoreV1().Pods(ns).Create(newUninitializedPod(podName))
|
pod := newUninitializedPod(podName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||||
close(ch)
|
close(ch)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -72,34 +73,35 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
|
|
||||||
// verify that we can update an initializing pod
|
// verify that we can update an initializing pod
|
||||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||||
pod.Annotations = map[string]string{"update-1": "test"}
|
pod.Annotations = map[string]string{"update-1": "test"}
|
||||||
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update pod %s in namespace %s to: %+v", pod.Name, ns, pod)
|
||||||
|
|
||||||
// verify the list call filters out uninitialized pods
|
// verify the list call filters out uninitialized pods
|
||||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{IncludeUninitialized: true})
|
listOptions := metav1.ListOptions{IncludeUninitialized: true}
|
||||||
Expect(err).NotTo(HaveOccurred())
|
pods, err := c.CoreV1().Pods(ns).List(listOptions)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, given list options: %+v", ns, listOptions)
|
||||||
Expect(pods.Items).To(HaveLen(1))
|
Expect(pods.Items).To(HaveLen(1))
|
||||||
pods, err = c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
pods, err = c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns)
|
||||||
Expect(pods.Items).To(HaveLen(0))
|
Expect(pods.Items).To(HaveLen(0))
|
||||||
|
|
||||||
// clear initializers
|
// clear initializers
|
||||||
pod.Initializers = nil
|
pod.Initializers = nil
|
||||||
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update pod %s in namespace %s to: %+v", pod.Name, ns, pod)
|
||||||
|
|
||||||
// pod should now start running
|
// pod should now start running
|
||||||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, pod.Namespace)
|
||||||
|
|
||||||
// ensure create call returns
|
// ensure create call returns
|
||||||
<-ch
|
<-ch
|
||||||
|
|
||||||
// verify that we cannot start the pod initializing again
|
// verify that we cannot start the pod initializing again
|
||||||
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||||
pod.Initializers = &metav1.Initializers{
|
pod.Initializers = &metav1.Initializers{
|
||||||
Pending: []metav1.Initializer{{Name: "Other"}},
|
Pending: []metav1.Initializer{{Name: "Other"}},
|
||||||
}
|
}
|
||||||
@ -119,7 +121,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
// create and register an initializer
|
// create and register an initializer
|
||||||
initializerName := "pod.test.e2e.kubernetes.io"
|
initializerName := "pod.test.e2e.kubernetes.io"
|
||||||
initializerConfigName := "e2e-test-initializer"
|
initializerConfigName := "e2e-test-initializer"
|
||||||
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
|
initializerConfig := &v1alpha1.InitializerConfiguration{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
|
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
|
||||||
Initializers: []v1alpha1.Initializer{
|
Initializers: []v1alpha1.Initializer{
|
||||||
{
|
{
|
||||||
@ -129,11 +131,12 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
}
|
||||||
|
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(initializerConfig)
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
|
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
|
||||||
}
|
}
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create and register initializer with config: %+v", initializerConfig)
|
||||||
|
|
||||||
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
|
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
|
||||||
defer cleanupInitializer(c, initializerConfigName, initializerName)
|
defer cleanupInitializer(c, initializerConfigName, initializerName)
|
||||||
@ -145,8 +148,9 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
ch := make(chan struct{})
|
ch := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
defer close(ch)
|
defer close(ch)
|
||||||
_, err := c.CoreV1().Pods(ns).Create(newInitPod(podName))
|
pod := newInitPod(podName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// wait until the pod shows up uninitialized
|
// wait until the pod shows up uninitialized
|
||||||
@ -162,7 +166,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s from namespace: %s", podName, ns)
|
||||||
Expect(pod.Initializers).NotTo(BeNil())
|
Expect(pod.Initializers).NotTo(BeNil())
|
||||||
Expect(pod.Initializers.Pending).To(HaveLen(1))
|
Expect(pod.Initializers.Pending).To(HaveLen(1))
|
||||||
Expect(pod.Initializers.Pending[0].Name).To(Equal(initializerName))
|
Expect(pod.Initializers.Pending[0].Name).To(Equal(initializerName))
|
||||||
@ -171,14 +175,14 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
By("Completing initialization")
|
By("Completing initialization")
|
||||||
pod.Initializers = nil
|
pod.Initializers = nil
|
||||||
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update pod %s in namespace %s to: %+v", pod.Name, ns, pod)
|
||||||
|
|
||||||
// ensure create call returns
|
// ensure create call returns
|
||||||
<-ch
|
<-ch
|
||||||
|
|
||||||
// pod should now start running
|
// pod should now start running
|
||||||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, pod.Namespace)
|
||||||
|
|
||||||
// bypass initialization by explicitly passing an empty pending list
|
// bypass initialization by explicitly passing an empty pending list
|
||||||
By("Setting an empty initializer as an admin to bypass initialization")
|
By("Setting an empty initializer as an admin to bypass initialization")
|
||||||
@ -186,7 +190,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
pod = newUninitializedPod(podName)
|
pod = newUninitializedPod(podName)
|
||||||
pod.Initializers.Pending = nil
|
pod.Initializers.Pending = nil
|
||||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||||
Expect(pod.Initializers).To(BeNil())
|
Expect(pod.Initializers).To(BeNil())
|
||||||
|
|
||||||
// bypass initialization for mirror pods
|
// bypass initialization for mirror pods
|
||||||
@ -198,7 +202,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
}
|
}
|
||||||
pod.Spec.NodeName = "node-does-not-yet-exist"
|
pod.Spec.NodeName = "node-does-not-yet-exist"
|
||||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||||
Expect(pod.Initializers).To(BeNil())
|
Expect(pod.Initializers).To(BeNil())
|
||||||
Expect(pod.Annotations[v1.MirrorPodAnnotationKey]).To(Equal("true"))
|
Expect(pod.Annotations[v1.MirrorPodAnnotationKey]).To(Equal("true"))
|
||||||
})
|
})
|
||||||
@ -213,7 +217,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
// create and register an initializer, without setting up a controller to handle it.
|
// create and register an initializer, without setting up a controller to handle it.
|
||||||
initializerName := "pod.test.e2e.kubernetes.io"
|
initializerName := "pod.test.e2e.kubernetes.io"
|
||||||
initializerConfigName := "e2e-test-initializer"
|
initializerConfigName := "e2e-test-initializer"
|
||||||
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
|
initializerConfig := &v1alpha1.InitializerConfiguration{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
|
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
|
||||||
Initializers: []v1alpha1.Initializer{
|
Initializers: []v1alpha1.Initializer{
|
||||||
{
|
{
|
||||||
@ -223,11 +227,12 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
}
|
||||||
|
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(initializerConfig)
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
|
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
|
||||||
}
|
}
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create and register initializer with config: %+v", initializerConfig)
|
||||||
|
|
||||||
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
|
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
|
||||||
defer cleanupInitializer(c, initializerConfigName, initializerName)
|
defer cleanupInitializer(c, initializerConfigName, initializerName)
|
||||||
@ -236,31 +241,32 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
// create a replicaset
|
// create a replicaset
|
||||||
persistedRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newReplicaset())
|
rs := newReplicaset()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
persistedRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create replicaset %s in namespace: %s", persistedRS.Name, ns)
|
||||||
// wait for replicaset controller to confirm that it has handled the creation
|
// wait for replicaset controller to confirm that it has handled the creation
|
||||||
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
|
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "replicaset %s failed to observe generation: %d", persistedRS.Name, persistedRS.Generation)
|
||||||
|
|
||||||
// update the replicaset spec to trigger a resync
|
// update the replicaset spec to trigger a resync
|
||||||
patch := []byte(`{"spec":{"minReadySeconds":5}}`)
|
patch := []byte(`{"spec":{"minReadySeconds":5}}`)
|
||||||
persistedRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Patch(persistedRS.Name, types.StrategicMergePatchType, patch)
|
persistedRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Patch(persistedRS.Name, types.StrategicMergePatchType, patch)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to apply to replicaset %s in namespace %s a strategic merge patch: %s", persistedRS.Name, ns, patch)
|
||||||
|
|
||||||
// wait for replicaset controller to confirm that it has handle the spec update
|
// wait for replicaset controller to confirm that it has handle the spec update
|
||||||
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
|
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "replicaset %s failed to observe generation: %d", persistedRS.Name, persistedRS.Generation)
|
||||||
|
|
||||||
// verify that the replicaset controller doesn't create extra pod
|
// verify that the replicaset controller doesn't create extra pod
|
||||||
selector, err := metav1.LabelSelectorAsSelector(persistedRS.Spec.Selector)
|
selector, err := metav1.LabelSelectorAsSelector(persistedRS.Spec.Selector)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to convert label selector %+v of LabelSelector api type into a struct that implements labels.Selector", persistedRS.Spec.Selector)
|
||||||
|
|
||||||
listOptions := metav1.ListOptions{
|
listOptions := metav1.ListOptions{
|
||||||
LabelSelector: selector.String(),
|
LabelSelector: selector.String(),
|
||||||
IncludeUninitialized: true,
|
IncludeUninitialized: true,
|
||||||
}
|
}
|
||||||
pods, err := c.CoreV1().Pods(ns).List(listOptions)
|
pods, err := c.CoreV1().Pods(ns).List(listOptions)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, given list options: %+v", ns, listOptions)
|
||||||
Expect(len(pods.Items)).Should(Equal(1))
|
Expect(len(pods.Items)).Should(Equal(1))
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -277,13 +283,13 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
|||||||
framework.Failf("expect err to be timeout error, got %v", err)
|
framework.Failf("expect err to be timeout error, got %v", err)
|
||||||
}
|
}
|
||||||
uninitializedPod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
uninitializedPod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||||
Expect(uninitializedPod.Initializers).NotTo(BeNil())
|
Expect(uninitializedPod.Initializers).NotTo(BeNil())
|
||||||
Expect(len(uninitializedPod.Initializers.Pending)).Should(Equal(1))
|
Expect(len(uninitializedPod.Initializers.Pending)).Should(Equal(1))
|
||||||
|
|
||||||
patch := fmt.Sprintf(`{"metadata":{"initializers":{"pending":[{"$patch":"delete","name":"%s"}]}}}`, uninitializedPod.Initializers.Pending[0].Name)
|
patch := fmt.Sprintf(`{"metadata":{"initializers":{"pending":[{"$patch":"delete","name":"%s"}]}}}`, uninitializedPod.Initializers.Pending[0].Name)
|
||||||
patchedPod, err := c.CoreV1().Pods(ns).Patch(uninitializedPod.Name, types.StrategicMergePatchType, []byte(patch))
|
patchedPod, err := c.CoreV1().Pods(ns).Patch(uninitializedPod.Name, types.StrategicMergePatchType, []byte(patch))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s a strategic merge patch: %s", uninitializedPod.Name, ns, patch)
|
||||||
Expect(patchedPod.Initializers).To(BeNil())
|
Expect(patchedPod.Initializers).To(BeNil())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -45,8 +45,9 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
|||||||
go func(n int) {
|
go func(n int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
_, err = f.CreateNamespace(fmt.Sprintf("nslifetest-%v", n), nil)
|
ns := fmt.Sprintf("nslifetest-%v", n)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
_, err = f.CreateNamespace(ns, nil)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", ns)
|
||||||
}(n)
|
}(n)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
@ -54,8 +55,9 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
|||||||
//Wait 10 seconds, then SEND delete requests for all the namespaces.
|
//Wait 10 seconds, then SEND delete requests for all the namespaces.
|
||||||
By("Waiting 10 seconds")
|
By("Waiting 10 seconds")
|
||||||
time.Sleep(time.Duration(10 * time.Second))
|
time.Sleep(time.Duration(10 * time.Second))
|
||||||
deleted, err := framework.DeleteNamespaces(f.ClientSet, []string{"nslifetest"}, nil /* skipFilter */)
|
deleteFilter := []string{"nslifetest"}
|
||||||
Expect(err).NotTo(HaveOccurred())
|
deleted, err := framework.DeleteNamespaces(f.ClientSet, deleteFilter, nil /* skipFilter */)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace(s) containing: %s", deleteFilter)
|
||||||
Expect(len(deleted)).To(Equal(totalNS))
|
Expect(len(deleted)).To(Equal(totalNS))
|
||||||
|
|
||||||
By("Waiting for namespaces to vanish")
|
By("Waiting for namespaces to vanish")
|
||||||
@ -93,23 +95,25 @@ func waitForPodInNamespace(c clientset.Interface, ns, podName string) *v1.Pod {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||||
By("Creating a test namespace")
|
By("Creating a test namespace")
|
||||||
namespace, err := f.CreateNamespace("nsdeletetest", nil)
|
namespaceName := "nsdeletetest"
|
||||||
Expect(err).NotTo(HaveOccurred())
|
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||||
|
|
||||||
By("Waiting for a default service account to be provisioned in namespace")
|
By("Waiting for a default service account to be provisioned in namespace")
|
||||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||||
|
|
||||||
By("Creating a pod in the namespace")
|
By("Creating a pod in the namespace")
|
||||||
|
podName := "test-pod"
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-pod",
|
Name: podName,
|
||||||
},
|
},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
@ -121,7 +125,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
|
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, namespace.Name)
|
||||||
|
|
||||||
By("Waiting for the pod to have running status")
|
By("Waiting for the pod to have running status")
|
||||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||||
@ -150,7 +154,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||||||
|
|
||||||
By("Deleting the namespace")
|
By("Deleting the namespace")
|
||||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||||
|
|
||||||
By("Waiting for the namespace to be removed.")
|
By("Waiting for the namespace to be removed.")
|
||||||
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
|
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
|
||||||
@ -164,26 +168,27 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||||||
}))
|
}))
|
||||||
|
|
||||||
By("Recreating the namespace")
|
By("Recreating the namespace")
|
||||||
namespace, err = f.CreateNamespace("nsdeletetest", nil)
|
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||||
|
|
||||||
By("Verifying there are no pods in the namespace")
|
By("Verifying there are no pods in the namespace")
|
||||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred(), "failed to get pod %s in namespace: %s", pod.Name, namespace.Name)
|
||||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true})
|
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true})
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred(), "failed to get pod %s in namespace: %s", podB.Name, namespace.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
By("Creating a test namespace")
|
By("Creating a test namespace")
|
||||||
namespace, err := f.CreateNamespace("nsdeletetest", nil)
|
namespaceName := "nsdeletetest"
|
||||||
Expect(err).NotTo(HaveOccurred())
|
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||||
|
|
||||||
By("Waiting for a default service account to be provisioned in namespace")
|
By("Waiting for a default service account to be provisioned in namespace")
|
||||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||||
|
|
||||||
By("Creating a service in the namespace")
|
By("Creating a service in the namespace")
|
||||||
serviceName := "test-service"
|
serviceName := "test-service"
|
||||||
@ -204,11 +209,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
|
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create service %s in namespace %s", serviceName, namespace.Name)
|
||||||
|
|
||||||
By("Deleting the namespace")
|
By("Deleting the namespace")
|
||||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||||
|
|
||||||
By("Waiting for the namespace to be removed.")
|
By("Waiting for the namespace to be removed.")
|
||||||
maxWaitSeconds := int64(60)
|
maxWaitSeconds := int64(60)
|
||||||
@ -222,12 +227,12 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||||||
}))
|
}))
|
||||||
|
|
||||||
By("Recreating the namespace")
|
By("Recreating the namespace")
|
||||||
namespace, err = f.CreateNamespace("nsdeletetest", nil)
|
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||||
|
|
||||||
By("Verifying there is no service in the namespace")
|
By("Verifying there is no service in the namespace")
|
||||||
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
|
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred(), "failed to get service %s in namespace: %s", service.Name, namespace.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test must run [Serial] due to the impact of running other parallel
|
// This test must run [Serial] due to the impact of running other parallel
|
||||||
|
@ -55,11 +55,11 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||||||
framework.Logf("Creating pod %s", podName)
|
framework.Logf("Creating pod %s", podName)
|
||||||
|
|
||||||
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||||
|
|
||||||
table := &metav1beta1.Table{}
|
table := &metav1beta1.Table{}
|
||||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||||
framework.Logf("Table: %#v", table)
|
framework.Logf("Table: %#v", table)
|
||||||
|
|
||||||
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">", 2))
|
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">", 2))
|
||||||
@ -107,7 +107,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||||||
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
|
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
|
||||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||||
Do().Into(pagedTable)
|
Do().Into(pagedTable)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||||
Expect(len(pagedTable.Rows)).To(Equal(2))
|
Expect(len(pagedTable.Rows)).To(Equal(2))
|
||||||
Expect(pagedTable.ResourceVersion).ToNot(Equal(""))
|
Expect(pagedTable.ResourceVersion).ToNot(Equal(""))
|
||||||
Expect(pagedTable.SelfLink).ToNot(Equal(""))
|
Expect(pagedTable.SelfLink).ToNot(Equal(""))
|
||||||
@ -119,7 +119,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||||||
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
|
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
|
||||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||||
Do().Into(pagedTable)
|
Do().Into(pagedTable)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||||
Expect(len(pagedTable.Rows)).To(BeNumerically(">", 0))
|
Expect(len(pagedTable.Rows)).To(BeNumerically(">", 0))
|
||||||
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0002"))
|
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0002"))
|
||||||
})
|
})
|
||||||
@ -129,7 +129,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||||||
|
|
||||||
table := &metav1beta1.Table{}
|
table := &metav1beta1.Table{}
|
||||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to get nodes in Table form across all namespaces")
|
||||||
framework.Logf("Table: %#v", table)
|
framework.Logf("Table: %#v", table)
|
||||||
|
|
||||||
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">=", 2))
|
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">=", 2))
|
||||||
@ -157,7 +157,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Body(sar).Do().Into(table)
|
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Body(sar).Do().Into(table)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred(), "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar)
|
||||||
Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406)))
|
Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406)))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -166,7 +166,7 @@ func printTable(table *metav1beta1.Table) string {
|
|||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
|
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
|
||||||
err := printers.PrintTable(table, tw, printers.PrintOptions{})
|
err := printers.PrintTable(table, tw, printers.PrintOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to print table: %+v", table)
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
@ -57,15 +57,15 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
|
|
||||||
By("creating a watch on configmaps with label A")
|
By("creating a watch on configmaps with label A")
|
||||||
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA)
|
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
|
||||||
|
|
||||||
By("creating a watch on configmaps with label B")
|
By("creating a watch on configmaps with label B")
|
||||||
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB)
|
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
|
||||||
|
|
||||||
By("creating a watch on configmaps with label A or B")
|
By("creating a watch on configmaps with label A or B")
|
||||||
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||||
|
|
||||||
testConfigMapA := &v1.ConfigMap{
|
testConfigMapA := &v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -86,7 +86,7 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
|
|
||||||
By("creating a configmap with label A and ensuring the correct watchers observe the notification")
|
By("creating a configmap with label A and ensuring the correct watchers observe the notification")
|
||||||
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
|
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
|
||||||
expectEvent(watchA, watch.Added, testConfigMapA)
|
expectEvent(watchA, watch.Added, testConfigMapA)
|
||||||
expectEvent(watchAB, watch.Added, testConfigMapA)
|
expectEvent(watchAB, watch.Added, testConfigMapA)
|
||||||
expectNoEvent(watchB, watch.Added, testConfigMapA)
|
expectNoEvent(watchB, watch.Added, testConfigMapA)
|
||||||
@ -95,7 +95,7 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||||
setConfigMapData(cm, "mutation", "1")
|
setConfigMapData(cm, "mutation", "1")
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||||
@ -104,28 +104,28 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||||
setConfigMapData(cm, "mutation", "2")
|
setConfigMapData(cm, "mutation", "2")
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||||
|
|
||||||
By("deleting configmap A and ensuring the correct watchers observe the notification")
|
By("deleting configmap A and ensuring the correct watchers observe the notification")
|
||||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
|
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||||
expectEvent(watchA, watch.Deleted, nil)
|
expectEvent(watchA, watch.Deleted, nil)
|
||||||
expectEvent(watchAB, watch.Deleted, nil)
|
expectEvent(watchAB, watch.Deleted, nil)
|
||||||
expectNoEvent(watchB, watch.Deleted, nil)
|
expectNoEvent(watchB, watch.Deleted, nil)
|
||||||
|
|
||||||
By("creating a configmap with label B and ensuring the correct watchers observe the notification")
|
By("creating a configmap with label B and ensuring the correct watchers observe the notification")
|
||||||
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
|
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
|
||||||
expectEvent(watchB, watch.Added, testConfigMapB)
|
expectEvent(watchB, watch.Added, testConfigMapB)
|
||||||
expectEvent(watchAB, watch.Added, testConfigMapB)
|
expectEvent(watchAB, watch.Added, testConfigMapB)
|
||||||
expectNoEvent(watchA, watch.Added, testConfigMapB)
|
expectNoEvent(watchA, watch.Added, testConfigMapB)
|
||||||
|
|
||||||
By("deleting configmap B and ensuring the correct watchers observe the notification")
|
By("deleting configmap B and ensuring the correct watchers observe the notification")
|
||||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
|
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
|
||||||
expectEvent(watchB, watch.Deleted, nil)
|
expectEvent(watchB, watch.Deleted, nil)
|
||||||
expectEvent(watchAB, watch.Deleted, nil)
|
expectEvent(watchAB, watch.Deleted, nil)
|
||||||
expectNoEvent(watchA, watch.Deleted, nil)
|
expectNoEvent(watchA, watch.Deleted, nil)
|
||||||
@ -151,27 +151,27 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
|
|
||||||
By("creating a new configmap")
|
By("creating a new configmap")
|
||||||
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||||
|
|
||||||
By("modifying the configmap once")
|
By("modifying the configmap once")
|
||||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||||
setConfigMapData(cm, "mutation", "1")
|
setConfigMapData(cm, "mutation", "1")
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||||
|
|
||||||
By("modifying the configmap a second time")
|
By("modifying the configmap a second time")
|
||||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||||
setConfigMapData(cm, "mutation", "2")
|
setConfigMapData(cm, "mutation", "2")
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
|
||||||
|
|
||||||
By("deleting the configmap")
|
By("deleting the configmap")
|
||||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||||
|
|
||||||
By("creating a watch on configmaps from the resource version returned by the first update")
|
By("creating a watch on configmaps from the resource version returned by the first update")
|
||||||
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
|
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
|
||||||
|
|
||||||
By("Expecting to observe notifications for all changes to the configmap after the first update")
|
By("Expecting to observe notifications for all changes to the configmap after the first update")
|
||||||
expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||||
@ -188,9 +188,10 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
c := f.ClientSet
|
c := f.ClientSet
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
|
configMapName := "e2e-watch-test-watch-closed"
|
||||||
testConfigMap := &v1.ConfigMap{
|
testConfigMap := &v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "e2e-watch-test-watch-closed",
|
Name: configMapName,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
watchConfigMapLabelKey: watchRestartedLabelValue,
|
watchConfigMapLabelKey: watchRestartedLabelValue,
|
||||||
},
|
},
|
||||||
@ -199,17 +200,17 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
|
|
||||||
By("creating a watch on configmaps")
|
By("creating a watch on configmaps")
|
||||||
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue)
|
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
|
||||||
|
|
||||||
By("creating a new configmap")
|
By("creating a new configmap")
|
||||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||||
|
|
||||||
By("modifying the configmap once")
|
By("modifying the configmap once")
|
||||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||||
setConfigMapData(cm, "mutation", "1")
|
setConfigMapData(cm, "mutation", "1")
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||||
|
|
||||||
By("closing the watch once it receives two notifications")
|
By("closing the watch once it receives two notifications")
|
||||||
expectEvent(testWatchBroken, watch.Added, testConfigMap)
|
expectEvent(testWatchBroken, watch.Added, testConfigMap)
|
||||||
@ -223,7 +224,7 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||||
setConfigMapData(cm, "mutation", "2")
|
setConfigMapData(cm, "mutation", "2")
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||||
|
|
||||||
By("creating a new watch on configmaps from the last resource version observed by the first watch")
|
By("creating a new watch on configmaps from the last resource version observed by the first watch")
|
||||||
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
|
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
|
||||||
@ -231,11 +232,11 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
framework.Failf("Expected last notfication to refer to a configmap but got: %v", lastEvent)
|
framework.Failf("Expected last notfication to refer to a configmap but got: %v", lastEvent)
|
||||||
}
|
}
|
||||||
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
|
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
|
||||||
|
|
||||||
By("deleting the configmap")
|
By("deleting the configmap")
|
||||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||||
|
|
||||||
By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
|
By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
|
||||||
expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate)
|
expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate)
|
||||||
@ -252,9 +253,10 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
c := f.ClientSet
|
c := f.ClientSet
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
|
configMapName := "e2e-watch-test-label-changed"
|
||||||
testConfigMap := &v1.ConfigMap{
|
testConfigMap := &v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "e2e-watch-test-label-changed",
|
Name: configMapName,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
watchConfigMapLabelKey: toBeChangedLabelValue,
|
watchConfigMapLabelKey: toBeChangedLabelValue,
|
||||||
},
|
},
|
||||||
@ -263,23 +265,23 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
|
|
||||||
By("creating a watch on configmaps with a certain label")
|
By("creating a watch on configmaps with a certain label")
|
||||||
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue)
|
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
|
||||||
|
|
||||||
By("creating a new configmap")
|
By("creating a new configmap")
|
||||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||||
|
|
||||||
By("modifying the configmap once")
|
By("modifying the configmap once")
|
||||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||||
setConfigMapData(cm, "mutation", "1")
|
setConfigMapData(cm, "mutation", "1")
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||||
|
|
||||||
By("changing the label value of the configmap")
|
By("changing the label value of the configmap")
|
||||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
|
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
|
||||||
|
|
||||||
By("Expecting to observe a delete notification for the watched object")
|
By("Expecting to observe a delete notification for the watched object")
|
||||||
expectEvent(testWatch, watch.Added, testConfigMap)
|
expectEvent(testWatch, watch.Added, testConfigMap)
|
||||||
@ -290,7 +292,7 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||||
setConfigMapData(cm, "mutation", "2")
|
setConfigMapData(cm, "mutation", "2")
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||||
|
|
||||||
By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
|
By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
|
||||||
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||||
@ -299,17 +301,17 @@ var _ = SIGDescribe("Watchers", func() {
|
|||||||
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
|
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
|
||||||
|
|
||||||
By("modifying the configmap a third time")
|
By("modifying the configmap a third time")
|
||||||
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||||
setConfigMapData(cm, "mutation", "3")
|
setConfigMapData(cm, "mutation", "3")
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a third time", configMapName, ns)
|
||||||
|
|
||||||
By("deleting the configmap")
|
By("deleting the configmap")
|
||||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||||
|
|
||||||
By("Expecting to observe an add notification for the watched object when the label value was restored")
|
By("Expecting to observe an add notification for the watched object when the label value was restored")
|
||||||
expectEvent(testWatch, watch.Added, testConfigMapLabelRestored)
|
expectEvent(testWatch, watch.Added, testConfigMapLabelRestored)
|
||||||
|
@ -620,7 +620,7 @@ func testWebhook(f *framework.Framework) {
|
|||||||
// Creating the pod, the request should be rejected
|
// Creating the pod, the request should be rejected
|
||||||
pod := nonCompliantPod(f)
|
pod := nonCompliantPod(f)
|
||||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
Expect(err).NotTo(BeNil())
|
Expect(err).To(HaveOccurred(), "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name)
|
||||||
expectedErrMsg1 := "the pod contains unwanted container name"
|
expectedErrMsg1 := "the pod contains unwanted container name"
|
||||||
if !strings.Contains(err.Error(), expectedErrMsg1) {
|
if !strings.Contains(err.Error(), expectedErrMsg1) {
|
||||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||||
@ -635,7 +635,7 @@ func testWebhook(f *framework.Framework) {
|
|||||||
// Creating the pod, the request should be rejected
|
// Creating the pod, the request should be rejected
|
||||||
pod = hangingPod(f)
|
pod = hangingPod(f)
|
||||||
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
Expect(err).NotTo(BeNil())
|
Expect(err).To(HaveOccurred(), "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name)
|
||||||
expectedTimeoutErr := "request did not complete within"
|
expectedTimeoutErr := "request did not complete within"
|
||||||
if !strings.Contains(err.Error(), expectedTimeoutErr) {
|
if !strings.Contains(err.Error(), expectedTimeoutErr) {
|
||||||
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
|
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
|
||||||
@ -645,7 +645,7 @@ func testWebhook(f *framework.Framework) {
|
|||||||
// Creating the configmap, the request should be rejected
|
// Creating the configmap, the request should be rejected
|
||||||
configmap := nonCompliantConfigMap(f)
|
configmap := nonCompliantConfigMap(f)
|
||||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||||
Expect(err).NotTo(BeNil())
|
Expect(err).To(HaveOccurred(), "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name)
|
||||||
expectedErrMsg := "the configmap contains unwanted key and value"
|
expectedErrMsg := "the configmap contains unwanted key and value"
|
||||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||||
@ -662,7 +662,7 @@ func testWebhook(f *framework.Framework) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
|
||||||
|
|
||||||
By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||||
toNonCompliantFn := func(cm *v1.ConfigMap) {
|
toNonCompliantFn := func(cm *v1.ConfigMap) {
|
||||||
@ -672,7 +672,7 @@ func testWebhook(f *framework.Framework) {
|
|||||||
cm.Data["webhook-e2e-test"] = "webhook-disallow"
|
cm.Data["webhook-e2e-test"] = "webhook-disallow"
|
||||||
}
|
}
|
||||||
_, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn)
|
_, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn)
|
||||||
Expect(err).NotTo(BeNil())
|
Expect(err).To(HaveOccurred(), "update (PUT) admitted configmap %s in namespace %s to a non-compliant one should be rejected by webhook", allowedConfigMapName, f.Namespace.Name)
|
||||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||||
}
|
}
|
||||||
@ -680,7 +680,7 @@ func testWebhook(f *framework.Framework) {
|
|||||||
By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||||
patch := nonCompliantConfigMapPatch()
|
patch := nonCompliantConfigMapPatch()
|
||||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
|
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
|
||||||
Expect(err).NotTo(BeNil())
|
Expect(err).To(HaveOccurred(), "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch)
|
||||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||||
}
|
}
|
||||||
@ -699,7 +699,7 @@ func testWebhook(f *framework.Framework) {
|
|||||||
By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
|
By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
|
||||||
configmap = nonCompliantConfigMap(f)
|
configmap = nonCompliantConfigMap(f)
|
||||||
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
|
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
|
||||||
Expect(err).To(BeNil())
|
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAttachingPodWebhook(f *framework.Framework) {
|
func testAttachingPodWebhook(f *framework.Framework) {
|
||||||
@ -707,15 +707,15 @@ func testAttachingPodWebhook(f *framework.Framework) {
|
|||||||
client := f.ClientSet
|
client := f.ClientSet
|
||||||
pod := toBeAttachedPod(f)
|
pod := toBeAttachedPod(f)
|
||||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
Expect(err).To(BeNil())
|
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||||
err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
|
err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
|
||||||
|
|
||||||
By("'kubectl attach' the pod, should be denied by the webhook")
|
By("'kubectl attach' the pod, should be denied by the webhook")
|
||||||
timer := time.NewTimer(30 * time.Second)
|
timer := time.NewTimer(30 * time.Second)
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
|
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
|
||||||
Expect(err).NotTo(BeNil())
|
Expect(err).To(HaveOccurred(), "'kubectl attach' the pod, should be denied by the webhook")
|
||||||
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
|
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
|
||||||
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
|
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
|
||||||
}
|
}
|
||||||
@ -804,7 +804,7 @@ func testFailClosedWebhook(f *framework.Framework) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
|
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred(), "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
|
||||||
if !errors.IsInternalError(err) {
|
if !errors.IsInternalError(err) {
|
||||||
framework.Failf("expect an internal error, got %#v", err)
|
framework.Failf("expect an internal error, got %#v", err)
|
||||||
}
|
}
|
||||||
@ -1242,12 +1242,13 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
|
|||||||
|
|
||||||
func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
||||||
By("Creating a custom resource that should be denied by the webhook")
|
By("Creating a custom resource that should be denied by the webhook")
|
||||||
|
crInstanceName := "cr-instance-1"
|
||||||
crInstance := &unstructured.Unstructured{
|
crInstance := &unstructured.Unstructured{
|
||||||
Object: map[string]interface{}{
|
Object: map[string]interface{}{
|
||||||
"kind": crd.Spec.Names.Kind,
|
"kind": crd.Spec.Names.Kind,
|
||||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||||
"metadata": map[string]interface{}{
|
"metadata": map[string]interface{}{
|
||||||
"name": "cr-instance-1",
|
"name": crInstanceName,
|
||||||
"namespace": f.Namespace.Name,
|
"namespace": f.Namespace.Name,
|
||||||
},
|
},
|
||||||
"data": map[string]interface{}{
|
"data": map[string]interface{}{
|
||||||
@ -1256,7 +1257,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := customResourceClient.Create(crInstance, metav1.CreateOptions{})
|
_, err := customResourceClient.Create(crInstance, metav1.CreateOptions{})
|
||||||
Expect(err).NotTo(BeNil())
|
Expect(err).To(HaveOccurred(), "create custom resource %s in namespace %s should be denied by webhook", crInstanceName, f.Namespace.Name)
|
||||||
expectedErrMsg := "the custom resource contains unwanted data"
|
expectedErrMsg := "the custom resource contains unwanted data"
|
||||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||||
@ -1265,12 +1266,13 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
|
|||||||
|
|
||||||
func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
||||||
By("Creating a custom resource that should be mutated by the webhook")
|
By("Creating a custom resource that should be mutated by the webhook")
|
||||||
|
crName := "cr-instance-1"
|
||||||
cr := &unstructured.Unstructured{
|
cr := &unstructured.Unstructured{
|
||||||
Object: map[string]interface{}{
|
Object: map[string]interface{}{
|
||||||
"kind": crd.Spec.Names.Kind,
|
"kind": crd.Spec.Names.Kind,
|
||||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||||
"metadata": map[string]interface{}{
|
"metadata": map[string]interface{}{
|
||||||
"name": "cr-instance-1",
|
"name": crName,
|
||||||
"namespace": f.Namespace.Name,
|
"namespace": f.Namespace.Name,
|
||||||
},
|
},
|
||||||
"data": map[string]interface{}{
|
"data": map[string]interface{}{
|
||||||
@ -1279,7 +1281,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
|
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
|
||||||
Expect(err).To(BeNil())
|
Expect(err).NotTo(HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||||
expectedCRData := map[string]interface{}{
|
expectedCRData := map[string]interface{}{
|
||||||
"mutation-start": "yes",
|
"mutation-start": "yes",
|
||||||
"mutation-stage-1": "yes",
|
"mutation-stage-1": "yes",
|
||||||
@ -1382,7 +1384,7 @@ func testCRDDenyWebhook(f *framework.Framework) {
|
|||||||
|
|
||||||
// create CRD
|
// create CRD
|
||||||
_, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd)
|
_, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd)
|
||||||
Expect(err).NotTo(BeNil())
|
Expect(err).To(HaveOccurred(), "create custom resource definition %s should be denied by webhook", testcrd.GetMetaName())
|
||||||
expectedErrMsg := "the crd contains unwanted label"
|
expectedErrMsg := "the crd contains unwanted label"
|
||||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||||
|
Loading…
Reference in New Issue
Block a user