mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #93405 from liggitt/revert-pod-lifecycle-flake
Revert "Merge pull request #90942 from ii/ii-create-pod%2Bpodstatus-resource-lifecycle-test"
This commit is contained in:
commit
607c5daabd
@ -18,7 +18,6 @@ package apps
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
@ -33,15 +32,11 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/client-go/dynamic"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||||
watchtools "k8s.io/client-go/tools/watch"
|
watchtools "k8s.io/client-go/tools/watch"
|
||||||
@ -55,7 +50,6 @@ import (
|
|||||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
testutil "k8s.io/kubernetes/test/utils"
|
testutil "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
|
||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -73,7 +67,6 @@ var (
|
|||||||
var _ = SIGDescribe("Deployment", func() {
|
var _ = SIGDescribe("Deployment", func() {
|
||||||
var ns string
|
var ns string
|
||||||
var c clientset.Interface
|
var c clientset.Interface
|
||||||
var dc dynamic.Interface
|
|
||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
failureTrap(c, ns)
|
failureTrap(c, ns)
|
||||||
@ -84,7 +77,6 @@ var _ = SIGDescribe("Deployment", func() {
|
|||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
dc = f.DynamicClient
|
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("deployment reaping should cascade to its replica sets and pods", func() {
|
ginkgo.It("deployment reaping should cascade to its replica sets and pods", func() {
|
||||||
@ -142,283 +134,6 @@ var _ = SIGDescribe("Deployment", func() {
|
|||||||
})
|
})
|
||||||
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
||||||
// See https://github.com/kubernetes/kubernetes/issues/29229
|
// See https://github.com/kubernetes/kubernetes/issues/29229
|
||||||
|
|
||||||
ginkgo.It("should run the lifecycle of a Deployment", func() {
|
|
||||||
deploymentResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
|
|
||||||
testNamespaceName := f.Namespace.Name
|
|
||||||
testDeploymentName := "test-deployment"
|
|
||||||
testDeploymentInitialImage := imageutils.GetE2EImage(imageutils.Agnhost)
|
|
||||||
testDeploymentPatchImage := imageutils.GetE2EImage(imageutils.Pause)
|
|
||||||
testDeploymentUpdateImage := imageutils.GetE2EImage(imageutils.Httpd)
|
|
||||||
testDeploymentDefaultReplicas := int32(3)
|
|
||||||
testDeploymentMinimumReplicas := int32(1)
|
|
||||||
testDeploymentNoReplicas := int32(0)
|
|
||||||
testDeploymentLabels := map[string]string{"test-deployment-static": "true"}
|
|
||||||
testDeploymentLabelsFlat := "test-deployment-static=true"
|
|
||||||
testDeploymentLabelSelectors := metav1.LabelSelector{
|
|
||||||
MatchLabels: testDeploymentLabels,
|
|
||||||
}
|
|
||||||
w := &cache.ListWatch{
|
|
||||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
|
||||||
options.LabelSelector = testDeploymentLabelsFlat
|
|
||||||
return f.ClientSet.AppsV1().Deployments(testNamespaceName).Watch(context.TODO(), options)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
deploymentsList, err := f.ClientSet.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
|
|
||||||
framework.ExpectNoError(err, "failed to list Deployments")
|
|
||||||
|
|
||||||
ginkgo.By("creating a Deployment")
|
|
||||||
testDeployment := appsv1.Deployment{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: testDeploymentName,
|
|
||||||
Labels: map[string]string{"test-deployment-static": "true"},
|
|
||||||
},
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Replicas: &testDeploymentDefaultReplicas,
|
|
||||||
Selector: &testDeploymentLabelSelectors,
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: testDeploymentLabelSelectors.MatchLabels,
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{{
|
|
||||||
Name: testDeploymentName,
|
|
||||||
Image: testDeploymentInitialImage,
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(context.TODO(), &testDeployment, metav1.CreateOptions{})
|
|
||||||
framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName)
|
|
||||||
|
|
||||||
ginkgo.By("waiting for Deployment to be created")
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
switch event.Type {
|
|
||||||
case watch.Added:
|
|
||||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
|
||||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
|
||||||
deployment.Labels["test-deployment-static"] == "true"
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
framework.Logf("observed event type %v", event.Type)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see %v event", watch.Added)
|
|
||||||
|
|
||||||
ginkgo.By("waiting for all Replicas to be Ready")
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
|
||||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
|
||||||
deployment.Labels["test-deployment-static"] == "true" &&
|
|
||||||
deployment.Status.AvailableReplicas == testDeploymentDefaultReplicas &&
|
|
||||||
deployment.Status.ReadyReplicas == testDeploymentDefaultReplicas
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas)
|
|
||||||
|
|
||||||
ginkgo.By("patching the Deployment")
|
|
||||||
deploymentPatch, err := json.Marshal(map[string]interface{}{
|
|
||||||
"metadata": map[string]interface{}{
|
|
||||||
"labels": map[string]string{"test-deployment": "patched"},
|
|
||||||
},
|
|
||||||
"spec": map[string]interface{}{
|
|
||||||
"replicas": testDeploymentMinimumReplicas,
|
|
||||||
"template": map[string]interface{}{
|
|
||||||
"spec": map[string]interface{}{
|
|
||||||
"containers": [1]map[string]interface{}{{
|
|
||||||
"name": testDeploymentName,
|
|
||||||
"image": testDeploymentPatchImage,
|
|
||||||
"command": []string{"/bin/sleep", "100000"},
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch")
|
|
||||||
_, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Patch(context.TODO(), testDeploymentName, types.StrategicMergePatchType, []byte(deploymentPatch), metav1.PatchOptions{})
|
|
||||||
framework.ExpectNoError(err, "failed to patch Deployment")
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
switch event.Type {
|
|
||||||
case watch.Modified:
|
|
||||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
|
||||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
|
||||||
deployment.Labels["test-deployment-static"] == "true"
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
framework.Logf("observed event type %v", event.Type)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
|
|
||||||
|
|
||||||
ginkgo.By("waiting for Replicas to scale")
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
|
||||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
|
||||||
deployment.Labels["test-deployment-static"] == "true" &&
|
|
||||||
deployment.Status.AvailableReplicas == testDeploymentMinimumReplicas &&
|
|
||||||
deployment.Status.ReadyReplicas == testDeploymentMinimumReplicas &&
|
|
||||||
deployment.Spec.Template.Spec.Containers[0].Image == testDeploymentPatchImage
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentMinimumReplicas)
|
|
||||||
|
|
||||||
ginkgo.By("listing Deployments")
|
|
||||||
deploymentsList, err = f.ClientSet.AppsV1().Deployments("").List(context.TODO(), metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
|
|
||||||
framework.ExpectNoError(err, "failed to list Deployments")
|
|
||||||
foundDeployment := false
|
|
||||||
for _, deploymentItem := range deploymentsList.Items {
|
|
||||||
if deploymentItem.ObjectMeta.Name == testDeploymentName &&
|
|
||||||
deploymentItem.ObjectMeta.Namespace == testNamespaceName &&
|
|
||||||
deploymentItem.ObjectMeta.Labels["test-deployment-static"] == "true" {
|
|
||||||
foundDeployment = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
framework.ExpectEqual(foundDeployment, true, "unable to find the Deployment in list", deploymentsList)
|
|
||||||
|
|
||||||
ginkgo.By("updating the Deployment")
|
|
||||||
testDeploymentUpdate := testDeployment
|
|
||||||
testDeploymentUpdate.ObjectMeta.Labels["test-deployment"] = "updated"
|
|
||||||
testDeploymentUpdate.Spec.Template.Spec.Containers[0].Image = testDeploymentUpdateImage
|
|
||||||
testDeploymentDefaultReplicasPointer := &testDeploymentDefaultReplicas
|
|
||||||
testDeploymentUpdate.Spec.Replicas = testDeploymentDefaultReplicasPointer
|
|
||||||
testDeploymentUpdateUnstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&testDeploymentUpdate)
|
|
||||||
framework.ExpectNoError(err, "failed to convert to unstructured")
|
|
||||||
testDeploymentUpdateUnstructured := unstructuredv1.Unstructured{
|
|
||||||
Object: testDeploymentUpdateUnstructuredMap,
|
|
||||||
}
|
|
||||||
// currently this hasn't been able to hit the endpoint replaceAppsV1NamespacedDeploymentStatus
|
|
||||||
_, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Update(context.TODO(), &testDeploymentUpdateUnstructured, metav1.UpdateOptions{}) //, "status")
|
|
||||||
framework.ExpectNoError(err, "failed to update the DeploymentStatus")
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
switch event.Type {
|
|
||||||
case watch.Modified:
|
|
||||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
|
||||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
|
||||||
deployment.Labels["test-deployment-static"] == "true"
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
framework.Logf("observed event type %v", event.Type)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
|
|
||||||
|
|
||||||
ginkgo.By("fetching the DeploymentStatus")
|
|
||||||
deploymentGetUnstructured, err := dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(context.TODO(), testDeploymentName, metav1.GetOptions{}, "status")
|
|
||||||
framework.ExpectNoError(err, "failed to fetch the Deployment")
|
|
||||||
deploymentGet := appsv1.Deployment{}
|
|
||||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
|
|
||||||
framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment")
|
|
||||||
framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image")
|
|
||||||
framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels")
|
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
|
||||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
|
||||||
deployment.Labels["test-deployment-static"] == "true" &&
|
|
||||||
deployment.Status.AvailableReplicas == testDeploymentDefaultReplicas &&
|
|
||||||
deployment.Status.ReadyReplicas == testDeploymentDefaultReplicas
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas)
|
|
||||||
|
|
||||||
ginkgo.By("patching the DeploymentStatus")
|
|
||||||
deploymentStatusPatch, err := json.Marshal(map[string]interface{}{
|
|
||||||
"metadata": map[string]interface{}{
|
|
||||||
"labels": map[string]string{"test-deployment": "patched-status"},
|
|
||||||
},
|
|
||||||
"status": map[string]interface{}{
|
|
||||||
"readyReplicas": testDeploymentNoReplicas,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch")
|
|
||||||
dc.Resource(deploymentResource).Namespace(testNamespaceName).Patch(context.TODO(), testDeploymentName, types.StrategicMergePatchType, []byte(deploymentStatusPatch), metav1.PatchOptions{}, "status")
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
switch event.Type {
|
|
||||||
case watch.Modified:
|
|
||||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
|
||||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
|
||||||
deployment.Labels["test-deployment-static"] == "true"
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
framework.Logf("observed event type %v", event.Type)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
|
|
||||||
|
|
||||||
ginkgo.By("fetching the DeploymentStatus")
|
|
||||||
deploymentGetUnstructured, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(context.TODO(), testDeploymentName, metav1.GetOptions{}, "status")
|
|
||||||
framework.ExpectNoError(err, "failed to fetch the DeploymentStatus")
|
|
||||||
deploymentGet = appsv1.Deployment{}
|
|
||||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet)
|
|
||||||
framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment")
|
|
||||||
framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image")
|
|
||||||
framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels")
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
|
||||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
|
||||||
deployment.Labels["test-deployment-static"] == "true" &&
|
|
||||||
deployment.Status.AvailableReplicas == testDeploymentDefaultReplicas &&
|
|
||||||
deployment.Status.ReadyReplicas == testDeploymentDefaultReplicas &&
|
|
||||||
deployment.Spec.Template.Spec.Containers[0].Image == testDeploymentUpdateImage
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas)
|
|
||||||
|
|
||||||
ginkgo.By("deleting the Deployment")
|
|
||||||
err = f.ClientSet.AppsV1().Deployments(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat})
|
|
||||||
framework.ExpectNoError(err, "failed to delete Deployment via collection")
|
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
switch event.Type {
|
|
||||||
case watch.Deleted:
|
|
||||||
if deployment, ok := event.Object.(*appsv1.Deployment); ok {
|
|
||||||
found := deployment.ObjectMeta.Name == testDeployment.Name &&
|
|
||||||
deployment.Labels["test-deployment-static"] == "true"
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
framework.Logf("observed event type %v", event.Type)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see %v event", watch.Deleted)
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
func failureTrap(c clientset.Interface, ns string) {
|
func failureTrap(c clientset.Interface, ns string) {
|
||||||
|
@ -58,7 +58,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||||
@ -67,7 +66,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||||
|
@ -19,11 +19,8 @@ package common
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/client-go/dynamic"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -180,11 +177,9 @@ func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interf
|
|||||||
|
|
||||||
var _ = framework.KubeDescribe("Pods", func() {
|
var _ = framework.KubeDescribe("Pods", func() {
|
||||||
f := framework.NewDefaultFramework("pods")
|
f := framework.NewDefaultFramework("pods")
|
||||||
var dc dynamic.Interface
|
|
||||||
var podClient *framework.PodClient
|
var podClient *framework.PodClient
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
podClient = f.PodClient()
|
podClient = f.PodClient()
|
||||||
dc = f.DynamicClient
|
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -878,154 +873,6 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
err = wait.PollImmediate(podRetryPeriod, podRetryTimeout, checkPodListQuantity(f, "type=Testing", 0))
|
err = wait.PollImmediate(podRetryPeriod, podRetryTimeout, checkPodListQuantity(f, "type=Testing", 0))
|
||||||
framework.ExpectNoError(err, "found a pod(s)")
|
framework.ExpectNoError(err, "found a pod(s)")
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should run through the lifecycle of Pods and PodStatus", func() {
|
|
||||||
podResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
|
|
||||||
testNamespaceName := f.Namespace.Name
|
|
||||||
testPodName := "pod-test"
|
|
||||||
testPodImage := imageutils.GetE2EImage(imageutils.Agnhost)
|
|
||||||
testPodImage2 := imageutils.GetE2EImage(imageutils.Httpd)
|
|
||||||
testPodLabels := map[string]string{"test-pod-static": "true"}
|
|
||||||
testPodLabelsFlat := "test-pod-static=true"
|
|
||||||
|
|
||||||
w := &cache.ListWatch{
|
|
||||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
|
||||||
options.LabelSelector = testPodLabelsFlat
|
|
||||||
return f.ClientSet.CoreV1().Pods(testNamespaceName).Watch(context.TODO(), options)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
podsList, err := f.ClientSet.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{LabelSelector: testPodLabelsFlat})
|
|
||||||
framework.ExpectNoError(err, "failed to list Pods")
|
|
||||||
|
|
||||||
testPod := v1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: testPodName,
|
|
||||||
Labels: testPodLabels,
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: testPodName,
|
|
||||||
Image: testPodImage,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
ginkgo.By("creating a Pod with a static label")
|
|
||||||
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(context.TODO(), &testPod, metav1.CreateOptions{})
|
|
||||||
framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName)
|
|
||||||
|
|
||||||
ginkgo.By("watching for Pod to be ready")
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
if pod, ok := event.Object.(*v1.Pod); ok {
|
|
||||||
found := pod.ObjectMeta.Name == testPod.ObjectMeta.Name &&
|
|
||||||
pod.ObjectMeta.Namespace == testNamespaceName &&
|
|
||||||
pod.Labels["test-pod-static"] == "true" &&
|
|
||||||
pod.Status.Phase == v1.PodRunning
|
|
||||||
if !found {
|
|
||||||
framework.Logf("observed Pod %v in namespace %v in phase %v", pod.ObjectMeta.Name, pod.ObjectMeta.Namespace, pod.Status.Phase)
|
|
||||||
}
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see Pod %v in namespace %v running", testPod.ObjectMeta.Name, testNamespaceName)
|
|
||||||
|
|
||||||
ginkgo.By("patching the Pod with a new Label and updated data")
|
|
||||||
podPatch, err := json.Marshal(v1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{"test-pod": "patched"},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{{
|
|
||||||
Name: testPodName,
|
|
||||||
Image: testPodImage2,
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to marshal JSON patch for Pod")
|
|
||||||
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Patch(context.TODO(), testPodName, types.StrategicMergePatchType, []byte(podPatch), metav1.PatchOptions{})
|
|
||||||
framework.ExpectNoError(err, "failed to patch Pod %s in namespace %s", testPodName, testNamespaceName)
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
switch event.Type {
|
|
||||||
case watch.Modified:
|
|
||||||
if pod, ok := event.Object.(*v1.Pod); ok {
|
|
||||||
found := pod.ObjectMeta.Name == pod.Name &&
|
|
||||||
pod.Labels["test-pod-static"] == "true"
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
framework.Logf("observed event type %v", event.Type)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
|
|
||||||
|
|
||||||
ginkgo.By("getting the Pod and ensuring that it's patched")
|
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{})
|
|
||||||
framework.ExpectNoError(err, "failed to fetch Pod %s in namespace %s", testPodName, testNamespaceName)
|
|
||||||
framework.ExpectEqual(pod.ObjectMeta.Labels["test-pod"], "patched", "failed to patch Pod - missing label")
|
|
||||||
framework.ExpectEqual(pod.Spec.Containers[0].Image, testPodImage2, "failed to patch Pod - wrong image")
|
|
||||||
|
|
||||||
ginkgo.By("getting the PodStatus")
|
|
||||||
podStatusUnstructured, err := dc.Resource(podResource).Namespace(testNamespaceName).Get(context.TODO(), testPodName, metav1.GetOptions{}, "status")
|
|
||||||
framework.ExpectNoError(err, "failed to fetch PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName)
|
|
||||||
podStatusBytes, err := json.Marshal(podStatusUnstructured)
|
|
||||||
framework.ExpectNoError(err, "failed to marshal unstructured response")
|
|
||||||
var podStatus v1.Pod
|
|
||||||
err = json.Unmarshal(podStatusBytes, &podStatus)
|
|
||||||
framework.ExpectNoError(err, "failed to unmarshal JSON bytes to a Pod object type")
|
|
||||||
|
|
||||||
ginkgo.By("replacing the Pod's status Ready condition to False")
|
|
||||||
podStatusUpdated := podStatus
|
|
||||||
podStatusFieldPatchCount := 0
|
|
||||||
podStatusFieldPatchCountTotal := 2
|
|
||||||
for pos, cond := range podStatusUpdated.Status.Conditions {
|
|
||||||
if (cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue) || (cond.Type == v1.ContainersReady && cond.Status == v1.ConditionTrue) {
|
|
||||||
podStatusUpdated.Status.Conditions[pos].Status = v1.ConditionFalse
|
|
||||||
podStatusFieldPatchCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
framework.ExpectEqual(podStatusFieldPatchCount, podStatusFieldPatchCountTotal, "failed to patch all relevant Pod conditions")
|
|
||||||
podStatusUpdate, err := f.ClientSet.CoreV1().Pods(testNamespaceName).UpdateStatus(context.TODO(), &podStatusUpdated, metav1.UpdateOptions{})
|
|
||||||
framework.ExpectNoError(err, "failed to update PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName)
|
|
||||||
|
|
||||||
ginkgo.By("check the Pod again to ensure its Ready conditions are False")
|
|
||||||
podStatusFieldPatchCount = 0
|
|
||||||
podStatusFieldPatchCountTotal = 2
|
|
||||||
for _, cond := range podStatusUpdate.Status.Conditions {
|
|
||||||
if (cond.Type == v1.PodReady && cond.Status == v1.ConditionFalse) || (cond.Type == v1.ContainersReady && cond.Status == v1.ConditionFalse) {
|
|
||||||
podStatusFieldPatchCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
framework.ExpectEqual(podStatusFieldPatchCount, podStatusFieldPatchCountTotal, "failed to update PodStatus - field patch count doesn't match the total")
|
|
||||||
|
|
||||||
ginkgo.By("deleting the Pod via a Collection with a LabelSelector")
|
|
||||||
err = f.ClientSet.CoreV1().Pods(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: testPodLabelsFlat})
|
|
||||||
framework.ExpectNoError(err, "failed to delete Pod by collection")
|
|
||||||
|
|
||||||
ginkgo.By("watching for the Pod to be deleted")
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
|
||||||
switch event.Type {
|
|
||||||
case watch.Deleted:
|
|
||||||
if pod, ok := event.Object.(*v1.Pod); ok {
|
|
||||||
found := pod.ObjectMeta.Name == pod.Name &&
|
|
||||||
pod.Labels["test-pod-static"] == "true"
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
framework.Logf("observed event type %v", event.Type)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "failed to see %v event", watch.Deleted)
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
func checkPodListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) {
|
func checkPodListQuantity(f *framework.Framework, label string, quantity int) func() (bool, error) {
|
||||||
|
Loading…
Reference in New Issue
Block a user