mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Adding reaper for deployments
This commit is contained in:
parent
eb5f707f24
commit
0ea31b56ed
@ -679,7 +679,6 @@ runTests() {
|
|||||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
|
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
|
||||||
# Clean up
|
# Clean up
|
||||||
kubectl delete deployment nginx "${kube_flags[@]}"
|
kubectl delete deployment nginx "${kube_flags[@]}"
|
||||||
kubectl delete rc -l pod-template-hash "${kube_flags[@]}"
|
|
||||||
|
|
||||||
##############
|
##############
|
||||||
# Namespaces #
|
# Namespaces #
|
||||||
@ -966,6 +965,7 @@ __EOF__
|
|||||||
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
|
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
|
||||||
# Clean-up
|
# Clean-up
|
||||||
kubectl delete job/pi "${kube_flags[@]}"
|
kubectl delete job/pi "${kube_flags[@]}"
|
||||||
|
|
||||||
# TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
# TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||||
# ### Scale a deployment
|
# ### Scale a deployment
|
||||||
# kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
|
# kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
|
||||||
@ -975,8 +975,6 @@ __EOF__
|
|||||||
# kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
|
# kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
|
||||||
# # Clean-up
|
# # Clean-up
|
||||||
# kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
|
# kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
|
||||||
# # TODO: Remove once deployment reaping is implemented
|
|
||||||
# kubectl delete rs --all "${kube_flags[@]}"
|
|
||||||
|
|
||||||
### Expose a deployment as a service
|
### Expose a deployment as a service
|
||||||
kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}"
|
kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}"
|
||||||
@ -988,8 +986,6 @@ __EOF__
|
|||||||
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
|
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
|
||||||
# Clean-up
|
# Clean-up
|
||||||
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
|
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
|
||||||
# TODO: Remove once deployment reaping is implemented
|
|
||||||
kubectl delete rs --all "${kube_flags[@]}"
|
|
||||||
|
|
||||||
### Expose replication controller as service
|
### Expose replication controller as service
|
||||||
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
||||||
@ -1106,7 +1102,6 @@ __EOF__
|
|||||||
# Clean up
|
# Clean up
|
||||||
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
|
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
|
||||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||||
kubectl delete rs -l pod-template-hash "${kube_flags[@]}"
|
|
||||||
|
|
||||||
### Rollback a deployment
|
### Rollback a deployment
|
||||||
# Pre-condition: no deployment exists
|
# Pre-condition: no deployment exists
|
||||||
|
@ -78,6 +78,9 @@ func ReaperFor(kind unversioned.GroupKind, c client.Interface) (Reaper, error) {
|
|||||||
case extensions.Kind("Job"):
|
case extensions.Kind("Job"):
|
||||||
return &JobReaper{c, Interval, Timeout}, nil
|
return &JobReaper{c, Interval, Timeout}, nil
|
||||||
|
|
||||||
|
case extensions.Kind("Deployment"):
|
||||||
|
return &DeploymentReaper{c, Interval, Timeout}, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, &NoSuchReaperError{kind}
|
return nil, &NoSuchReaperError{kind}
|
||||||
}
|
}
|
||||||
@ -102,6 +105,10 @@ type JobReaper struct {
|
|||||||
client.Interface
|
client.Interface
|
||||||
pollInterval, timeout time.Duration
|
pollInterval, timeout time.Duration
|
||||||
}
|
}
|
||||||
|
type DeploymentReaper struct {
|
||||||
|
client.Interface
|
||||||
|
pollInterval, timeout time.Duration
|
||||||
|
}
|
||||||
type PodReaper struct {
|
type PodReaper struct {
|
||||||
client.Interface
|
client.Interface
|
||||||
}
|
}
|
||||||
@ -191,10 +198,7 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := rc.Delete(name); err != nil {
|
return rc.Delete(name)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(madhusudancs): Implement it when controllerRef is implemented - https://github.com/kubernetes/kubernetes/issues/2210
|
// TODO(madhusudancs): Implement it when controllerRef is implemented - https://github.com/kubernetes/kubernetes/issues/2210
|
||||||
@ -303,10 +307,7 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := reaper.Extensions().DaemonSets(namespace).Delete(name); err != nil {
|
return reaper.Extensions().DaemonSets(namespace).Delete(name)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
||||||
@ -352,10 +353,92 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra
|
|||||||
return utilerrors.NewAggregate(errList)
|
return utilerrors.NewAggregate(errList)
|
||||||
}
|
}
|
||||||
// once we have all the pods removed we can safely remove the job itself
|
// once we have all the pods removed we can safely remove the job itself
|
||||||
if err := jobs.Delete(name, gracePeriod); err != nil {
|
return jobs.Delete(name, gracePeriod)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
||||||
|
deployments := reaper.Extensions().Deployments(namespace)
|
||||||
|
replicaSets := reaper.Extensions().ReplicaSets(namespace)
|
||||||
|
rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper)
|
||||||
|
|
||||||
|
deployment, err := deployments.Get(name)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
// set deployment's history and scale to 0
|
||||||
|
// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
|
||||||
|
zero := 0
|
||||||
|
deployment.Spec.RevisionHistoryLimit = &zero
|
||||||
|
deployment.Spec.Replicas = 0
|
||||||
|
// TODO: un-pausing should not be necessary, remove when this is fixed:
|
||||||
|
// https://github.com/kubernetes/kubernetes/issues/20966
|
||||||
|
// Instead deployment should be Paused at this point and not at next TODO.
|
||||||
|
deployment.Spec.Paused = false
|
||||||
|
deployment, err = deployments.Update(deployment)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for total no of pods drop to 0
|
||||||
|
if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
|
||||||
|
curr, err := deployments.Get(name)
|
||||||
|
// if deployment was not found it must have been deleted, error out
|
||||||
|
if err != nil && errors.IsNotFound(err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
// if other errors happen, retry
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
// check if deployment wasn't recreated with the same name
|
||||||
|
// TODO use generations when deployment will have them
|
||||||
|
if curr.UID != deployment.UID {
|
||||||
|
return false, errors.NewNotFound(extensions.Resource("Deployment"), name)
|
||||||
|
}
|
||||||
|
return curr.Status.Replicas == 0, nil
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: When deployments will allow running cleanup policy while being
|
||||||
|
// paused, move pausing to above update operation. Without it, we need to
|
||||||
|
// pause deployment before stopping RSs, to prevent creating new RSs.
|
||||||
|
// See https://github.com/kubernetes/kubernetes/issues/20966
|
||||||
|
deployment, err = deployments.Get(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
deployment.Spec.Paused = true
|
||||||
|
deployment, err = deployments.Update(deployment)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove remaining RSs
|
||||||
|
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
options := api.ListOptions{LabelSelector: selector}
|
||||||
|
rsList, err := replicaSets.List(options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
errList := []error{}
|
||||||
|
for _, rc := range rsList.Items {
|
||||||
|
if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
|
||||||
|
if !errors.IsNotFound(err) {
|
||||||
|
errList = append(errList, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(errList) > 0 {
|
||||||
|
return utilerrors.NewAggregate(errList)
|
||||||
|
}
|
||||||
|
|
||||||
|
// and finally deployment
|
||||||
|
return deployments.Delete(name, gracePeriod)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
||||||
@ -364,11 +447,7 @@ func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gra
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := pods.Delete(name, gracePeriod); err != nil {
|
return pods.Delete(name, gracePeriod)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
||||||
@ -377,8 +456,5 @@ func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := services.Delete(name); err != nil {
|
return services.Delete(name)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestReplicationControllerStop(t *testing.T) {
|
func TestReplicationControllerStop(t *testing.T) {
|
||||||
@ -495,6 +496,133 @@ func TestJobStop(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDeploymentStop(t *testing.T) {
|
||||||
|
name := "foo"
|
||||||
|
ns := "default"
|
||||||
|
deployment := extensions.Deployment{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns,
|
||||||
|
},
|
||||||
|
Spec: extensions.DeploymentSpec{
|
||||||
|
Replicas: 0,
|
||||||
|
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}},
|
||||||
|
},
|
||||||
|
Status: extensions.DeploymentStatus{
|
||||||
|
Replicas: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
template := deploymentutil.GetNewReplicaSetTemplate(deployment)
|
||||||
|
tests := []struct {
|
||||||
|
Name string
|
||||||
|
Objs []runtime.Object
|
||||||
|
StopError error
|
||||||
|
ExpectedActions []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "SimpleDeployment",
|
||||||
|
Objs: []runtime.Object{
|
||||||
|
&extensions.Deployment{ // GET
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns,
|
||||||
|
},
|
||||||
|
Spec: extensions.DeploymentSpec{
|
||||||
|
Replicas: 0,
|
||||||
|
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}},
|
||||||
|
},
|
||||||
|
Status: extensions.DeploymentStatus{
|
||||||
|
Replicas: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&extensions.Scale{ // UPDATE
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns,
|
||||||
|
},
|
||||||
|
Spec: extensions.ScaleSpec{
|
||||||
|
Replicas: 0,
|
||||||
|
},
|
||||||
|
Status: extensions.ScaleStatus{
|
||||||
|
Replicas: 0,
|
||||||
|
Selector: map[string]string{"k1": "v1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
StopError: nil,
|
||||||
|
ExpectedActions: []string{"get:deployments", "update:deployments",
|
||||||
|
"get:deployments", "get:deployments", "update:deployments",
|
||||||
|
"list:replicasets", "delete:deployments"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Deployment with single replicaset",
|
||||||
|
Objs: []runtime.Object{
|
||||||
|
&deployment, // GET
|
||||||
|
&extensions.Scale{ // UPDATE
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns,
|
||||||
|
},
|
||||||
|
Spec: extensions.ScaleSpec{
|
||||||
|
Replicas: 0,
|
||||||
|
},
|
||||||
|
Status: extensions.ScaleStatus{
|
||||||
|
Replicas: 0,
|
||||||
|
Selector: map[string]string{"k1": "v1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&extensions.ReplicaSetList{ // LIST
|
||||||
|
Items: []extensions.ReplicaSet{
|
||||||
|
{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns,
|
||||||
|
},
|
||||||
|
Spec: extensions.ReplicaSetSpec{
|
||||||
|
Template: &template,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
StopError: nil,
|
||||||
|
ExpectedActions: []string{"get:deployments", "update:deployments",
|
||||||
|
"get:deployments", "get:deployments", "update:deployments",
|
||||||
|
"list:replicasets", "get:replicasets", "get:replicasets",
|
||||||
|
"update:replicasets", "get:replicasets", "get:replicasets",
|
||||||
|
"delete:replicasets", "delete:deployments"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
fake := testclient.NewSimpleFake(test.Objs...)
|
||||||
|
reaper := DeploymentReaper{fake, time.Millisecond, time.Millisecond}
|
||||||
|
err := reaper.Stop(ns, name, 0, nil)
|
||||||
|
if !reflect.DeepEqual(err, test.StopError) {
|
||||||
|
t.Errorf("%s unexpected error: %v", test.Name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
actions := fake.Actions()
|
||||||
|
if len(actions) != len(test.ExpectedActions) {
|
||||||
|
t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for i, expAction := range test.ExpectedActions {
|
||||||
|
action := strings.Split(expAction, ":")
|
||||||
|
if actions[i].GetVerb() != action[0] {
|
||||||
|
t.Errorf("%s unexpected verb: %+v, expected %s", test.Name, actions[i], expAction)
|
||||||
|
}
|
||||||
|
if actions[i].GetResource() != action[1] {
|
||||||
|
t.Errorf("%s unexpected resource: %+v, expected %s", test.Name, actions[i], expAction)
|
||||||
|
}
|
||||||
|
if len(action) == 3 && actions[i].GetSubresource() != action[2] {
|
||||||
|
t.Errorf("%s unexpected subresource: %+v, expected %s", test.Name, actions[i], expAction)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type noSuchPod struct {
|
type noSuchPod struct {
|
||||||
*testclient.FakePods
|
*testclient.FakePods
|
||||||
}
|
}
|
||||||
|
@ -21,12 +21,15 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/kubectl"
|
"k8s.io/kubernetes/pkg/kubectl"
|
||||||
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
"k8s.io/kubernetes/pkg/util/intstr"
|
||||||
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -65,6 +68,7 @@ var _ = Describe("Deployment [Feature:Deployment]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
|
func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
|
||||||
|
zero := int64(0)
|
||||||
return &extensions.ReplicaSet{
|
return &extensions.ReplicaSet{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: rsName,
|
Name: rsName,
|
||||||
@ -77,6 +81,7 @@ func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName
|
|||||||
Labels: rsPodLabels,
|
Labels: rsPodLabels,
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
|
TerminationGracePeriodSeconds: &zero,
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{
|
{
|
||||||
Name: imageName,
|
Name: imageName,
|
||||||
@ -90,6 +95,7 @@ func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newDeployment(deploymentName string, replicas int, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int) *extensions.Deployment {
|
func newDeployment(deploymentName string, replicas int, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int) *extensions.Deployment {
|
||||||
|
zero := int64(0)
|
||||||
return &extensions.Deployment{
|
return &extensions.Deployment{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: deploymentName,
|
Name: deploymentName,
|
||||||
@ -106,6 +112,7 @@ func newDeployment(deploymentName string, replicas int, podLabels map[string]str
|
|||||||
Labels: podLabels,
|
Labels: podLabels,
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
|
TerminationGracePeriodSeconds: &zero,
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{
|
{
|
||||||
Name: imageName,
|
Name: imageName,
|
||||||
@ -149,6 +156,43 @@ func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revisio
|
|||||||
return deployment, newRS
|
return deployment, newRS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymentName string) {
|
||||||
|
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
Logf("deleting deployment %s", deploymentName)
|
||||||
|
reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), oldC)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
timeout := 1 * time.Minute
|
||||||
|
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
Logf("ensuring deployment %s was deleted", deploymentName)
|
||||||
|
_, err = c.Extensions().Deployments(ns).Get(deployment.Name)
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||||
|
Logf("ensuring deployment %s rcs were deleted", deploymentName)
|
||||||
|
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
options := api.ListOptions{LabelSelector: selector}
|
||||||
|
rss, err := c.Extensions().ReplicaSets(ns).List(options)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(rss.Items).Should(HaveLen(0))
|
||||||
|
Logf("ensuring deployment %s pods were deleted", deploymentName)
|
||||||
|
if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
|
pods, err := c.Core().Pods(ns).List(api.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if len(pods.Items) == 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}); err != nil {
|
||||||
|
Failf("Failed to remove deployment %s pods!", deploymentName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testNewDeployment(f *Framework) {
|
func testNewDeployment(f *Framework) {
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
// TODO: remove unversionedClient when the refactoring is done. Currently some
|
// TODO: remove unversionedClient when the refactoring is done. Currently some
|
||||||
@ -164,16 +208,8 @@ func testNewDeployment(f *Framework) {
|
|||||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||||
_, err := c.Extensions().Deployments(ns).Create(d)
|
_, err := c.Extensions().Deployments(ns).Create(d)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Logf("deleting deployment %s", deploymentName)
|
|
||||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
|
||||||
// TODO: remove this once we can delete replica sets with deployment
|
|
||||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
// Check that deployment is created fine.
|
// Check that deployment is created fine.
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -216,10 +252,6 @@ func testRollingUpdateDeployment(f *Framework) {
|
|||||||
replicas := 3
|
replicas := 3
|
||||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
|
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
|
||||||
Logf("deleting replica set %s", rsName)
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
// Verify that the required pods have come up.
|
// Verify that the required pods have come up.
|
||||||
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
|
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -232,16 +264,7 @@ func testRollingUpdateDeployment(f *Framework) {
|
|||||||
Logf("Creating deployment %s", deploymentName)
|
Logf("Creating deployment %s", deploymentName)
|
||||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
|
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Logf("deleting deployment %s", deploymentName)
|
|
||||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
|
||||||
// TODO: remove this once we can delete replica sets with deployment
|
|
||||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -273,10 +296,6 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
|||||||
|
|
||||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
|
||||||
Logf("deleting replica set %s", rsName)
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
// Verify that the required pods have come up.
|
// Verify that the required pods have come up.
|
||||||
err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1)
|
err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -289,16 +308,7 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
|||||||
Logf("Creating deployment %s", deploymentName)
|
Logf("Creating deployment %s", deploymentName)
|
||||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
|
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Logf("deleting deployment %s", deploymentName)
|
|
||||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
|
||||||
// TODO: remove this once we can delete replica sets with deployment
|
|
||||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -341,10 +351,6 @@ func testRecreateDeployment(f *Framework) {
|
|||||||
replicas := 3
|
replicas := 3
|
||||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
|
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
|
||||||
Logf("deleting replica set %s", rsName)
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
// Verify that the required pods have come up.
|
// Verify that the required pods have come up.
|
||||||
err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
|
err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -357,16 +363,7 @@ func testRecreateDeployment(f *Framework) {
|
|||||||
Logf("Creating deployment %s", deploymentName)
|
Logf("Creating deployment %s", deploymentName)
|
||||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RecreateDeploymentStrategyType, nil))
|
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RecreateDeploymentStrategyType, nil))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Logf("deleting deployment %s", deploymentName)
|
|
||||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
|
||||||
// TODO: remove this once we can delete replica sets with deployment
|
|
||||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
|
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -426,16 +423,7 @@ func testDeploymentCleanUpPolicy(f *Framework) {
|
|||||||
Logf("Creating deployment %s", deploymentName)
|
Logf("Creating deployment %s", deploymentName)
|
||||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
|
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Logf("deleting deployment %s", deploymentName)
|
|
||||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
|
||||||
// TODO: remove this once we can delete replica sets with deployment
|
|
||||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
|
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -460,10 +448,6 @@ func testRolloverDeployment(f *Framework) {
|
|||||||
rsReplicas := 4
|
rsReplicas := 4
|
||||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, "nginx", "nginx"))
|
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, "nginx", "nginx"))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
|
||||||
Logf("deleting replica set %s", rsName)
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
// Verify that the required pods have come up.
|
// Verify that the required pods have come up.
|
||||||
err = verifyPods(unversionedClient, ns, podName, false, rsReplicas)
|
err = verifyPods(unversionedClient, ns, podName, false, rsReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -486,16 +470,8 @@ func testRolloverDeployment(f *Framework) {
|
|||||||
}
|
}
|
||||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment)
|
_, err = c.Extensions().Deployments(ns).Create(newDeployment)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Logf("deleting deployment %s", deploymentName)
|
|
||||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
|
||||||
// TODO: remove this once we can delete replica sets with deployment
|
|
||||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -534,12 +510,7 @@ func testPausedDeployment(f *Framework) {
|
|||||||
Logf("Creating paused deployment %s", deploymentName)
|
Logf("Creating paused deployment %s", deploymentName)
|
||||||
_, err := c.Extensions().Deployments(ns).Create(d)
|
_, err := c.Extensions().Deployments(ns).Create(d)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||||
_, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Logf("deleting deployment %s", deploymentName)
|
|
||||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
// Check that deployment is created fine.
|
// Check that deployment is created fine.
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -618,21 +589,8 @@ func testRollbackDeployment(f *Framework) {
|
|||||||
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
|
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
|
||||||
_, err := c.Extensions().Deployments(ns).Create(d)
|
_, err := c.Extensions().Deployments(ns).Create(d)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Logf("deleting deployment %s", deploymentName)
|
|
||||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
|
||||||
// TODO: remove this once we can delete replica sets with deployment
|
|
||||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
for _, oldRS := range oldRSs {
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Check that deployment is created fine.
|
// Check that deployment is created fine.
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -718,10 +676,6 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
|
|||||||
rs.Annotations["make"] = "difference"
|
rs.Annotations["make"] = "difference"
|
||||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
|
||||||
Logf("deleting replica set %s", rsName)
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Create a deployment to create nginx pods, which have different template than the replica set created above.
|
// Create a deployment to create nginx pods, which have different template than the replica set created above.
|
||||||
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
|
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
|
||||||
@ -732,21 +686,8 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
|
|||||||
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
|
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
|
||||||
_, err = c.Extensions().Deployments(ns).Create(d)
|
_, err = c.Extensions().Deployments(ns).Create(d)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Logf("deleting deployment %s", deploymentName)
|
|
||||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
|
||||||
// TODO: remove this once we can delete replica sets with deployment
|
|
||||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
for _, oldRS := range oldRSs {
|
|
||||||
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Check that deployment is created fine.
|
// Check that deployment is created fine.
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Loading…
Reference in New Issue
Block a user