mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
move funs of framework/deployment to e2e/apps
This commit is contained in:
parent
2fbe432d23
commit
10b173868a
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package apps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
@ -36,6 +37,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -48,6 +50,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
poll = 2 * time.Second
|
||||
dRetryPeriod = 2 * time.Second
|
||||
dRetryTimeout = 5 * time.Minute
|
||||
)
|
||||
@ -333,7 +336,7 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
err = e2edeploy.WatchRecreateDeployment(c, deployment)
|
||||
err = watchRecreateDeployment(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -403,7 +406,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
err = e2edeploy.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
|
||||
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -1018,3 +1021,71 @@ func setAffinities(d *appsv1.Deployment, setAffinity bool) {
|
||||
}
|
||||
d.Spec.Template.Spec.Affinity = affinity
|
||||
}
|
||||
|
||||
// watchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error {
|
||||
if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*appsv1.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
|
||||
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
|
||||
if err == nil && nerr == nil {
|
||||
framework.Logf("%+v", d)
|
||||
testutil.LogReplicaSetsOfDeployment(d, allOldRSs, newRS, framework.Logf)
|
||||
testutil.LogPodsOfDeployment(c, d, append(allOldRSs, newRS), framework.Logf)
|
||||
}
|
||||
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
|
||||
}
|
||||
|
||||
return *(d.Spec.Replicas) == d.Status.Replicas &&
|
||||
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// waitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
|
||||
func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*appsv1.ReplicaSet
|
||||
var d *appsv1.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
d = deployment
|
||||
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(oldRSs) == desiredRSNum, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
|
||||
testutil.LogReplicaSetsOfDeployment(d, oldRSs, nil, framework.Logf)
|
||||
}
|
||||
return pollErr
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fixtures.go",
|
||||
"logging.go",
|
||||
"wait.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/deployment",
|
||||
@ -17,9 +16,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
@ -17,18 +17,13 @@ limitations under the License.
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -45,49 +40,6 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
|
||||
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
|
||||
}
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error {
|
||||
if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*appsv1.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
|
||||
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
|
||||
if err == nil && nerr == nil {
|
||||
framework.Logf("%+v", d)
|
||||
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
|
||||
}
|
||||
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
|
||||
}
|
||||
|
||||
return *(d.Spec.Replicas) == d.Status.Replicas &&
|
||||
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewDeployment returns a deployment spec with the specified argument.
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType appsv1.DeploymentStrategyType) *appsv1.Deployment {
|
||||
zero := int64(0)
|
||||
|
@ -1,32 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
func logReplicaSetsOfDeployment(deployment *appsv1.Deployment, allOldRSs []*appsv1.ReplicaSet, newRS *appsv1.ReplicaSet) {
|
||||
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, framework.Logf)
|
||||
}
|
||||
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) {
|
||||
testutils.LogPodsOfDeployment(c, deployment, rsList, framework.Logf)
|
||||
}
|
@ -77,31 +77,6 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
|
||||
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
|
||||
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*appsv1.ReplicaSet
|
||||
var d *appsv1.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
d = deployment
|
||||
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(oldRSs) == desiredRSNum, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
|
||||
logReplicaSetsOfDeployment(d, oldRSs, nil)
|
||||
}
|
||||
return pollErr
|
||||
}
|
||||
|
||||
// WaitForDeploymentRevision waits for becoming the target revision of a delopyment.
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *appsv1.Deployment, targetRevision string) error {
|
||||
err := wait.PollImmediate(poll, pollLongTimeout, func() (bool, error) {
|
||||
|
Loading…
Reference in New Issue
Block a user