mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
convert testOverlappingDeployment e2e test to integration test
This commit is contained in:
parent
35e9784196
commit
131f7e1469
@ -90,9 +90,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
It("scaled rollout deployment should not block on annotation check", func() {
|
||||
testScaledRolloutDeployment(f)
|
||||
})
|
||||
It("overlapping deployment should not fight with each other", func() {
|
||||
testOverlappingDeployment(f)
|
||||
})
|
||||
|
||||
It("iterative rollouts should eventually progress", func() {
|
||||
testIterativeDeployments(f)
|
||||
})
|
||||
@ -776,43 +774,6 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
|
||||
func testOverlappingDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
// Create first deployment.
|
||||
deploymentName := "first-deployment"
|
||||
podLabels := map[string]string{"name": RedisImageName}
|
||||
replicas := int32(1)
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed creating the first deployment")
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploy.Name, "1", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred(), "The first deployment failed to update to revision 1")
|
||||
|
||||
// Create second deployment with overlapping selector.
|
||||
deploymentName = "second-deployment"
|
||||
framework.Logf("Creating deployment %q with overlapping selector", deploymentName)
|
||||
podLabels["other-label"] = "random-label"
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deployOverlapping, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed creating the second deployment")
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deployOverlapping.Name, "1", NginxImage)
|
||||
Expect(err).NotTo(HaveOccurred(), "The second deployment failed to update to revision 1")
|
||||
|
||||
// Both deployments should proceed independently.
|
||||
framework.Logf("Checking each deployment creates its own replica set")
|
||||
options := metav1.ListOptions{}
|
||||
rsList, err := c.Extensions().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed listing all replica sets in namespace %s", ns)
|
||||
Expect(rsList.Items).To(HaveLen(2))
|
||||
}
|
||||
|
||||
func randomScale(d *extensions.Deployment, i int) {
|
||||
switch r := rand.Float32(); {
|
||||
case r < 0.3:
|
||||
|
@ -680,3 +680,84 @@ func TestFailedDeployment(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverlappingDeployments(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-overlapping-deployments"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
replicas := int32(1)
|
||||
firstDeploymentName := "first-deployment"
|
||||
secondDeploymentName := "second-deployment"
|
||||
testers := []*deploymentTester{
|
||||
{t: t, c: c, deployment: newDeployment(firstDeploymentName, ns.Name, replicas)},
|
||||
{t: t, c: c, deployment: newDeployment(secondDeploymentName, ns.Name, replicas)},
|
||||
}
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Create 2 deployments with overlapping selectors
|
||||
var err error
|
||||
var rss []*v1beta1.ReplicaSet
|
||||
for _, tester := range testers {
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
dname := tester.deployment.Name
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", dname, err)
|
||||
}
|
||||
// Wait for the deployment to be updated to revision 1
|
||||
if err = tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatalf("failed to update deployment %q to revision 1: %v", dname, err)
|
||||
}
|
||||
// Make sure the deployment completes while manually marking its pods as ready at the same time
|
||||
if err = tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatalf("deployment %q failed to complete: %v", dname, err)
|
||||
}
|
||||
// Get replicaset of the deployment
|
||||
newRS, err := tester.getNewReplicaSet()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get new replicaset of deployment %q: %v", dname, err)
|
||||
}
|
||||
if newRS == nil {
|
||||
t.Fatalf("unable to find new replicaset of deployment %q", dname)
|
||||
}
|
||||
// Store the replicaset for future usage
|
||||
rss = append(rss, newRS)
|
||||
}
|
||||
|
||||
// Both deployments should proceed independently, so their respective replicaset should not be the same replicaset
|
||||
if rss[0].UID == rss[1].UID {
|
||||
t.Fatalf("overlapping deployments should not share the same replicaset")
|
||||
}
|
||||
|
||||
// Scale only the first deployment by 1
|
||||
newReplicas := replicas + 1
|
||||
testers[0].deployment, err = testers[0].updateDeployment(func(update *v1beta1.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed updating deployment %q: %v", firstDeploymentName, err)
|
||||
}
|
||||
|
||||
// Make sure the deployment completes after scaling
|
||||
if err := testers[0].waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatalf("deployment %q failed to complete after scaling: %v", firstDeploymentName, err)
|
||||
}
|
||||
|
||||
// Verify replicaset of both deployments has updated number of replicas
|
||||
for i, tester := range testers {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(rss[i].Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset %q: %v", rss[i].Name, err)
|
||||
}
|
||||
if *rs.Spec.Replicas != *tester.deployment.Spec.Replicas {
|
||||
t.Errorf("expected replicaset %q of deployment %q has %d replicas, but found %d replicas", rs.Name, firstDeploymentName, *tester.deployment.Spec.Replicas, *rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -206,27 +206,23 @@ func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// markAllPodsReady manually updates all Deployment pods status to ready
|
||||
func (d *deploymentTester) markAllPodsReady() {
|
||||
// markUpdatedPodsReady manually marks updated Deployment pods status to ready
|
||||
func (d *deploymentTester) markUpdatedPodsReady() {
|
||||
ns := d.deployment.Namespace
|
||||
selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
d.t.Fatalf("failed to parse Deployment selector: %v", err)
|
||||
}
|
||||
var readyPods int32
|
||||
err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
readyPods = 0
|
||||
pods, err := d.c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
pods, err := d.listUpdatedPods()
|
||||
if err != nil {
|
||||
d.t.Logf("failed to list Deployment pods, will retry later: %v", err)
|
||||
d.t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
if len(pods.Items) != int(*d.deployment.Spec.Replicas) {
|
||||
d.t.Logf("%d/%d of deployment pods are created", len(pods.Items), *d.deployment.Spec.Replicas)
|
||||
if len(pods) != int(*d.deployment.Spec.Replicas) {
|
||||
d.t.Logf("%d/%d of deployment pods are created", len(pods), *d.deployment.Spec.Replicas)
|
||||
return false, nil
|
||||
}
|
||||
for i := range pods.Items {
|
||||
pod := pods.Items[i]
|
||||
for i := range pods {
|
||||
pod := pods[i]
|
||||
if podutil.IsPodReady(&pod) {
|
||||
readyPods++
|
||||
continue
|
||||
@ -237,13 +233,10 @@ func (d *deploymentTester) markAllPodsReady() {
|
||||
readyPods++
|
||||
}
|
||||
}
|
||||
if readyPods >= *d.deployment.Spec.Replicas {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
return readyPods >= *d.deployment.Spec.Replicas, nil
|
||||
})
|
||||
if err != nil {
|
||||
d.t.Fatalf("failed to mark all Deployment pods to ready: %v", err)
|
||||
d.t.Fatalf("failed to mark updated Deployment pods to ready: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -261,11 +254,11 @@ func (d *deploymentTester) waitForDeploymentComplete() error {
|
||||
}
|
||||
|
||||
// waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady waits for the Deployment to complete
|
||||
// while marking all Deployment pods as ready at the same time.
|
||||
// while marking updated Deployment pods as ready at the same time.
|
||||
// Uses hard check to make sure rolling update strategy is not violated at any times.
|
||||
func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady() error {
|
||||
// Manually mark all Deployment pods as ready in a separate goroutine
|
||||
go d.markAllPodsReady()
|
||||
// Manually mark updated Deployment pods as ready in a separate goroutine
|
||||
go d.markUpdatedPodsReady()
|
||||
|
||||
// Wait for the Deployment status to complete while Deployment pods are becoming ready
|
||||
err := d.waitForDeploymentCompleteAndCheckRolling()
|
||||
@ -276,10 +269,10 @@ func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsRe
|
||||
}
|
||||
|
||||
// waitForDeploymentCompleteAndMarkPodsReady waits for the Deployment to complete
|
||||
// while marking all Deployment pods as ready at the same time.
|
||||
// while marking updated Deployment pods as ready at the same time.
|
||||
func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error {
|
||||
// Manually mark all Deployment pods as ready in a separate goroutine
|
||||
go d.markAllPodsReady()
|
||||
// Manually mark updated Deployment pods as ready in a separate goroutine
|
||||
go d.markUpdatedPodsReady()
|
||||
|
||||
// Wait for the Deployment status to complete using soft check, while Deployment pods are becoming ready
|
||||
err := d.waitForDeploymentComplete()
|
||||
@ -301,7 +294,11 @@ func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) er
|
||||
}
|
||||
|
||||
func (d *deploymentTester) getNewReplicaSet() (*v1beta1.ReplicaSet, error) {
|
||||
rs, err := deploymentutil.GetNewReplicaSet(d.deployment, d.c.ExtensionsV1beta1())
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed retrieving deployment %s: %v", d.deployment.Name, err)
|
||||
}
|
||||
rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
|
||||
}
|
||||
@ -351,3 +348,30 @@ func (d *deploymentTester) waitForDeploymentUpdatedReplicasLTE(minUpdatedReplica
|
||||
func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType v1beta1.DeploymentConditionType) error {
|
||||
return testutil.WaitForDeploymentWithCondition(d.c, d.deployment.Namespace, d.deployment.Name, reason, condType, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse deployment selector: %v", err)
|
||||
}
|
||||
pods, err := d.c.CoreV1().Pods(d.deployment.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list deployment pods, will retry later: %v", err)
|
||||
}
|
||||
newRS, err := d.getNewReplicaSet()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get new replicaset of deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
if newRS == nil {
|
||||
return nil, fmt.Errorf("unable to find new replicaset of deployment %q", d.deployment.Name)
|
||||
}
|
||||
|
||||
var ownedPods []v1.Pod
|
||||
for _, pod := range pods.Items {
|
||||
rs := metav1.GetControllerOf(&pod)
|
||||
if rs.UID == newRS.UID {
|
||||
ownedPods = append(ownedPods, pod)
|
||||
}
|
||||
}
|
||||
return ownedPods, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user