mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #18077 from soltysh/stop_deployment
Auto commit by PR queue bot
This commit is contained in:
commit
19c8d73cac
@ -636,13 +636,13 @@ runTests() {
|
||||
# Post-Condition: service "nginx" has configuration annotation
|
||||
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Clean up
|
||||
kubectl delete rc,svc nginx
|
||||
kubectl delete rc,svc nginx
|
||||
## 6. kubectl autoscale --save-config should generate configuration annotation
|
||||
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
||||
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Command: autoscale rc "frontend"
|
||||
# Command: autoscale rc "frontend"
|
||||
kubectl autoscale -f examples/guestbook/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
|
||||
# Post-Condition: hpa "frontend" has configuration annotation
|
||||
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
@ -652,21 +652,21 @@ runTests() {
|
||||
## kubectl apply should create the resource that doesn't exist yet
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
|
||||
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
|
||||
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
# Post-Condition: pod "test-pod" is created
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Clean up
|
||||
# Clean up
|
||||
kubectl delete pods test-pod "${kube_flags[@]}"
|
||||
|
||||
## kubectl run should create deployments or jobs
|
||||
## kubectl run should create deployments or jobs
|
||||
# Pre-Condition: no Job exists
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl run pi --generator=job/v1beta1 --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
|
||||
# Post-Condition: Job "pi" is created
|
||||
# Post-Condition: Job "pi" is created
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
|
||||
# Clean up
|
||||
kubectl delete jobs pi "${kube_flags[@]}"
|
||||
@ -674,11 +674,10 @@ runTests() {
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl run nginx --image=nginx --generator=deployment/v1beta1 "${kube_flags[@]}"
|
||||
# Post-Condition: Deployment "nginx" is created
|
||||
# Post-Condition: Deployment "nginx" is created
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
|
||||
# Clean up
|
||||
# Clean up
|
||||
kubectl delete deployment nginx "${kube_flags[@]}"
|
||||
kubectl delete rc -l pod-template-hash "${kube_flags[@]}"
|
||||
|
||||
##############
|
||||
# Namespaces #
|
||||
@ -720,7 +719,7 @@ runTests() {
|
||||
# Command
|
||||
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0
|
||||
# Post-condition: valid-pod POD doesn't exist
|
||||
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Clean up
|
||||
kubectl delete namespace other
|
||||
|
||||
@ -973,17 +972,15 @@ __EOF__
|
||||
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
|
||||
# Clean-up
|
||||
kubectl delete job/pi "${kube_flags[@]}"
|
||||
# TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
# ### Scale a deployment
|
||||
# kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
|
||||
# # Command
|
||||
# kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
|
||||
# # Post-condition: 1 replica for nginx-deployment
|
||||
# kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
|
||||
# # Clean-up
|
||||
# kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
|
||||
# # TODO: Remove once deployment reaping is implemented
|
||||
# kubectl delete rs --all "${kube_flags[@]}"
|
||||
|
||||
### Scale a deployment
|
||||
kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}"
|
||||
# Command
|
||||
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
|
||||
# Post-condition: 1 replica for nginx-deployment
|
||||
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
|
||||
# Clean-up
|
||||
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
|
||||
|
||||
### Expose a deployment as a service
|
||||
kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}"
|
||||
@ -995,8 +992,6 @@ __EOF__
|
||||
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
|
||||
# Clean-up
|
||||
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
|
||||
# TODO: Remove once deployment reaping is implemented
|
||||
kubectl delete rs --all "${kube_flags[@]}"
|
||||
|
||||
### Expose replication controller as service
|
||||
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
|
||||
@ -1101,7 +1096,7 @@ __EOF__
|
||||
# Clean up
|
||||
kubectl delete rc frontend "${kube_flags[@]}"
|
||||
|
||||
### Auto scale deployment
|
||||
### Auto scale deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
@ -1113,9 +1108,8 @@ __EOF__
|
||||
# Clean up
|
||||
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete rs -l pod-template-hash "${kube_flags[@]}"
|
||||
|
||||
### Rollback a deployment
|
||||
### Rollback a deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
@ -1337,7 +1331,7 @@ __EOF__
|
||||
# cleaning
|
||||
rm "${temp_editor}"
|
||||
# Command
|
||||
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
|
||||
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
|
||||
# fails on some, but not all, of the resources, retries will fail because it tries to modify
|
||||
# existing labels.
|
||||
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
|
||||
@ -1356,7 +1350,7 @@ __EOF__
|
||||
fi
|
||||
# Command
|
||||
# Command
|
||||
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
|
||||
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
|
||||
# fails on some, but not all, of the resources, retries will fail because it tries to modify
|
||||
# existing annotations.
|
||||
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
|
||||
|
@ -48,9 +48,8 @@ func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) {
|
||||
return &ReplicaSetScaler{c.Extensions()}, nil
|
||||
case extensions.Kind("Job"):
|
||||
return &JobScaler{c.Extensions()}, nil
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
// case extensions.Kind("Deployment"):
|
||||
// return &DeploymentScaler{c.Extensions()}, nil
|
||||
case extensions.Kind("Deployment"):
|
||||
return &DeploymentScaler{c.Extensions()}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
|
||||
}
|
||||
@ -328,57 +327,55 @@ func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
// type DeploymentScaler struct {
|
||||
// c client.ExtensionsInterface
|
||||
// }
|
||||
type DeploymentScaler struct {
|
||||
c client.ExtensionsInterface
|
||||
}
|
||||
|
||||
// // ScaleSimple is responsible for updating a deployment's desired replicas count.
|
||||
// func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error {
|
||||
// deployment, err := scaler.c.Deployments(namespace).Get(name)
|
||||
// if err != nil {
|
||||
// return ScaleError{ScaleGetFailure, "Unknown", err}
|
||||
// }
|
||||
// if preconditions != nil {
|
||||
// if err := preconditions.ValidateDeployment(deployment); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// scale, err := extensions.ScaleFromDeployment(deployment)
|
||||
// if err != nil {
|
||||
// return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err}
|
||||
// }
|
||||
// scale.Spec.Replicas = int(newSize)
|
||||
// if _, err := scaler.c.Scales(namespace).Update("Deployment", scale); err != nil {
|
||||
// if errors.IsInvalid(err) {
|
||||
// return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err}
|
||||
// }
|
||||
// return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err}
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
// ScaleSimple is responsible for updating a deployment's desired replicas count.
|
||||
func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error {
|
||||
deployment, err := scaler.c.Deployments(namespace).Get(name)
|
||||
if err != nil {
|
||||
return ScaleError{ScaleGetFailure, "Unknown", err}
|
||||
}
|
||||
if preconditions != nil {
|
||||
if err := preconditions.ValidateDeployment(deployment); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// // Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil),
|
||||
// // optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
|
||||
// func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
|
||||
// if preconditions == nil {
|
||||
// preconditions = &ScalePrecondition{-1, ""}
|
||||
// }
|
||||
// if retry == nil {
|
||||
// // Make it try only once, immediately
|
||||
// retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
||||
// }
|
||||
// cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
|
||||
// if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if waitForReplicas != nil {
|
||||
// deployment, err := scaler.c.Deployments(namespace).Get(name)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,
|
||||
// client.DeploymentHasDesiredReplicas(scaler.c, deployment))
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
// For now I'm falling back to regular Deployment update operation.
|
||||
deployment.Spec.Replicas = int(newSize)
|
||||
if _, err := scaler.c.Deployments(namespace).Update(deployment); err != nil {
|
||||
if errors.IsInvalid(err) {
|
||||
return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err}
|
||||
}
|
||||
return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil),
|
||||
// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
|
||||
func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
|
||||
if preconditions == nil {
|
||||
preconditions = &ScalePrecondition{-1, ""}
|
||||
}
|
||||
if retry == nil {
|
||||
// Make it try only once, immediately
|
||||
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
||||
}
|
||||
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
|
||||
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
|
||||
return err
|
||||
}
|
||||
if waitForReplicas != nil {
|
||||
deployment, err := scaler.c.Deployments(namespace).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,
|
||||
client.DeploymentHasDesiredReplicas(scaler.c, deployment))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -488,145 +488,118 @@ func TestValidateJob(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
|
||||
type ErrorDeployments struct {
|
||||
testclient.FakeDeployments
|
||||
invalid bool
|
||||
}
|
||||
|
||||
// type ErrorScales struct {
|
||||
// testclient.FakeScales
|
||||
// invalid bool
|
||||
// }
|
||||
func (c *ErrorDeployments) Update(deployment *extensions.Deployment) (*extensions.Deployment, error) {
|
||||
if c.invalid {
|
||||
return nil, kerrors.NewInvalid(extensions.Kind(deployment.Kind), deployment.Name, nil)
|
||||
}
|
||||
return nil, errors.New("deployment update failure")
|
||||
}
|
||||
|
||||
// func (c *ErrorScales) Update(kind string, scale *extensions.Scale) (*extensions.Scale, error) {
|
||||
// if c.invalid {
|
||||
// return nil, kerrors.NewInvalid(extensions.Kind(scale.Kind), scale.Name, nil)
|
||||
// }
|
||||
// return nil, errors.New("scale update failure")
|
||||
// }
|
||||
func (c *ErrorDeployments) Get(name string) (*extensions.Deployment, error) {
|
||||
return &extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 0,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// func (c *ErrorScales) Get(kind, name string) (*extensions.Scale, error) {
|
||||
// return &extensions.Scale{
|
||||
// Spec: extensions.ScaleSpec{
|
||||
// Replicas: 0,
|
||||
// },
|
||||
// }, nil
|
||||
// }
|
||||
type ErrorDeploymentClient struct {
|
||||
testclient.FakeExperimental
|
||||
invalid bool
|
||||
}
|
||||
|
||||
// type ErrorDeployments struct {
|
||||
// testclient.FakeDeployments
|
||||
// invalid bool
|
||||
// }
|
||||
func (c *ErrorDeploymentClient) Deployments(namespace string) client.DeploymentInterface {
|
||||
return &ErrorDeployments{testclient.FakeDeployments{Fake: &c.FakeExperimental, Namespace: namespace}, c.invalid}
|
||||
}
|
||||
|
||||
// func (c *ErrorDeployments) Update(deployment *extensions.Deployment) (*extensions.Deployment, error) {
|
||||
// if c.invalid {
|
||||
// return nil, kerrors.NewInvalid(extensions.Kind(deployment.Kind), deployment.Name, nil)
|
||||
// }
|
||||
// return nil, errors.New("deployment update failure")
|
||||
// }
|
||||
func TestDeploymentScaleRetry(t *testing.T) {
|
||||
fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: false}
|
||||
scaler := &DeploymentScaler{fake}
|
||||
preconditions := &ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
namespace := "default"
|
||||
|
||||
// func (c *ErrorDeployments) Get(name string) (*extensions.Deployment, error) {
|
||||
// return &extensions.Deployment{
|
||||
// Spec: extensions.DeploymentSpec{
|
||||
// Replicas: 0,
|
||||
// },
|
||||
// }, nil
|
||||
// }
|
||||
scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count)
|
||||
pass, err := scaleFunc()
|
||||
if pass != false {
|
||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an error on update failure, got %v", err)
|
||||
}
|
||||
preconditions = &ScalePrecondition{3, ""}
|
||||
scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count)
|
||||
pass, err = scaleFunc()
|
||||
if err == nil {
|
||||
t.Errorf("Expected error on precondition failure")
|
||||
}
|
||||
}
|
||||
|
||||
// type ErrorDeploymentClient struct {
|
||||
// testclient.FakeExperimental
|
||||
// invalid bool
|
||||
// }
|
||||
func TestDeploymentScale(t *testing.T) {
|
||||
fake := &testclient.FakeExperimental{Fake: &testclient.Fake{}}
|
||||
scaler := DeploymentScaler{fake}
|
||||
preconditions := ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
// func (c *ErrorDeploymentClient) Deployments(namespace string) client.DeploymentInterface {
|
||||
// return &ErrorDeployments{testclient.FakeDeployments{Fake: &c.FakeExperimental, Namespace: namespace}, c.invalid}
|
||||
// }
|
||||
actions := fake.Actions()
|
||||
if len(actions) != 2 {
|
||||
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
|
||||
}
|
||||
if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name {
|
||||
t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
|
||||
}
|
||||
if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "deployments" || action.GetObject().(*extensions.Deployment).Spec.Replicas != int(count) {
|
||||
t.Errorf("unexpected action %v, expected update-deployment with replicas = %d", actions[1], count)
|
||||
}
|
||||
}
|
||||
|
||||
// func (c *ErrorDeploymentClient) Scales(namespace string) client.ScaleInterface {
|
||||
// return &ErrorScales{testclient.FakeScales{Fake: &c.FakeExperimental, Namespace: namespace}, c.invalid}
|
||||
// }
|
||||
func TestDeploymentScaleInvalid(t *testing.T) {
|
||||
fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: true}
|
||||
scaler := DeploymentScaler{fake}
|
||||
preconditions := ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
namespace := "default"
|
||||
|
||||
// func TestDeploymentScaleRetry(t *testing.T) {
|
||||
// fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: false}
|
||||
// scaler := &DeploymentScaler{fake}
|
||||
// preconditions := &ScalePrecondition{-1, ""}
|
||||
// count := uint(3)
|
||||
// name := "foo"
|
||||
// namespace := "default"
|
||||
scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count)
|
||||
pass, err := scaleFunc()
|
||||
if pass {
|
||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
}
|
||||
e, ok := err.(ScaleError)
|
||||
if err == nil || !ok || e.FailureType != ScaleUpdateInvalidFailure {
|
||||
t.Errorf("Expected error on invalid update failure, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count)
|
||||
// pass, err := scaleFunc()
|
||||
// if pass != false {
|
||||
// t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
// }
|
||||
// if err != nil {
|
||||
// t.Errorf("Did not expect an error on update failure, got %v", err)
|
||||
// }
|
||||
// preconditions = &ScalePrecondition{3, ""}
|
||||
// scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count)
|
||||
// pass, err = scaleFunc()
|
||||
// if err == nil {
|
||||
// t.Errorf("Expected error on precondition failure")
|
||||
// }
|
||||
// }
|
||||
func TestDeploymentScaleFailsPreconditions(t *testing.T) {
|
||||
fake := testclient.NewSimpleFake(&extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 10,
|
||||
},
|
||||
})
|
||||
scaler := DeploymentScaler{&testclient.FakeExperimental{fake}}
|
||||
preconditions := ScalePrecondition{2, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
// func TestDeploymentScale(t *testing.T) {
|
||||
// fake := &testclient.FakeExperimental{Fake: &testclient.Fake{}}
|
||||
// scaler := DeploymentScaler{fake}
|
||||
// preconditions := ScalePrecondition{-1, ""}
|
||||
// count := uint(3)
|
||||
// name := "foo"
|
||||
// scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
// actions := fake.Actions()
|
||||
// if len(actions) != 2 {
|
||||
// t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
|
||||
// }
|
||||
// if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name {
|
||||
// t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
|
||||
// }
|
||||
// // TODO: The testclient needs to support subresources
|
||||
// if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "Deployment" || action.GetObject().(*extensions.Scale).Spec.Replicas != int(count) {
|
||||
// t.Errorf("unexpected action %v, expected update-deployment-scale with replicas = %d", actions[1], count)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func TestDeploymentScaleInvalid(t *testing.T) {
|
||||
// fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: true}
|
||||
// scaler := DeploymentScaler{fake}
|
||||
// preconditions := ScalePrecondition{-1, ""}
|
||||
// count := uint(3)
|
||||
// name := "foo"
|
||||
// namespace := "default"
|
||||
|
||||
// scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count)
|
||||
// pass, err := scaleFunc()
|
||||
// if pass {
|
||||
// t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
// }
|
||||
// e, ok := err.(ScaleError)
|
||||
// if err == nil || !ok || e.FailureType != ScaleUpdateInvalidFailure {
|
||||
// t.Errorf("Expected error on invalid update failure, got %v", err)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func TestDeploymentScaleFailsPreconditions(t *testing.T) {
|
||||
// fake := testclient.NewSimpleFake(&extensions.Deployment{
|
||||
// Spec: extensions.DeploymentSpec{
|
||||
// Replicas: 10,
|
||||
// },
|
||||
// })
|
||||
// scaler := DeploymentScaler{&testclient.FakeExperimental{fake}}
|
||||
// preconditions := ScalePrecondition{2, ""}
|
||||
// count := uint(3)
|
||||
// name := "foo"
|
||||
// scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
// actions := fake.Actions()
|
||||
// if len(actions) != 1 {
|
||||
// t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions)
|
||||
// }
|
||||
// if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name {
|
||||
// t.Errorf("unexpected action: %v, expected get-deployment %s", actions[0], name)
|
||||
// }
|
||||
// }
|
||||
actions := fake.Actions()
|
||||
if len(actions) != 1 {
|
||||
t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions)
|
||||
}
|
||||
if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name {
|
||||
t.Errorf("unexpected action: %v, expected get-deployment %s", actions[0], name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateDeployment(t *testing.T) {
|
||||
zero, ten, twenty := 0, 10, 20
|
||||
|
@ -78,6 +78,9 @@ func ReaperFor(kind unversioned.GroupKind, c client.Interface) (Reaper, error) {
|
||||
case extensions.Kind("Job"):
|
||||
return &JobReaper{c, Interval, Timeout}, nil
|
||||
|
||||
case extensions.Kind("Deployment"):
|
||||
return &DeploymentReaper{c, Interval, Timeout}, nil
|
||||
|
||||
}
|
||||
return nil, &NoSuchReaperError{kind}
|
||||
}
|
||||
@ -102,6 +105,10 @@ type JobReaper struct {
|
||||
client.Interface
|
||||
pollInterval, timeout time.Duration
|
||||
}
|
||||
type DeploymentReaper struct {
|
||||
client.Interface
|
||||
pollInterval, timeout time.Duration
|
||||
}
|
||||
type PodReaper struct {
|
||||
client.Interface
|
||||
}
|
||||
@ -191,10 +198,7 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := rc.Delete(name); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return rc.Delete(name)
|
||||
}
|
||||
|
||||
// TODO(madhusudancs): Implement it when controllerRef is implemented - https://github.com/kubernetes/kubernetes/issues/2210
|
||||
@ -303,10 +307,7 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio
|
||||
return err
|
||||
}
|
||||
|
||||
if err := reaper.Extensions().DaemonSets(namespace).Delete(name); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return reaper.Extensions().DaemonSets(namespace).Delete(name)
|
||||
}
|
||||
|
||||
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
||||
@ -352,10 +353,92 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra
|
||||
return utilerrors.NewAggregate(errList)
|
||||
}
|
||||
// once we have all the pods removed we can safely remove the job itself
|
||||
if err := jobs.Delete(name, gracePeriod); err != nil {
|
||||
return jobs.Delete(name, gracePeriod)
|
||||
}
|
||||
|
||||
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
||||
deployments := reaper.Extensions().Deployments(namespace)
|
||||
replicaSets := reaper.Extensions().ReplicaSets(namespace)
|
||||
rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper)
|
||||
|
||||
deployment, err := deployments.Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
// set deployment's history and scale to 0
|
||||
// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
|
||||
zero := 0
|
||||
deployment.Spec.RevisionHistoryLimit = &zero
|
||||
deployment.Spec.Replicas = 0
|
||||
// TODO: un-pausing should not be necessary, remove when this is fixed:
|
||||
// https://github.com/kubernetes/kubernetes/issues/20966
|
||||
// Instead deployment should be Paused at this point and not at next TODO.
|
||||
deployment.Spec.Paused = false
|
||||
deployment, err = deployments.Update(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait for total no of pods drop to 0
|
||||
if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
|
||||
curr, err := deployments.Get(name)
|
||||
// if deployment was not found it must have been deleted, error out
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
// if other errors happen, retry
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
// check if deployment wasn't recreated with the same name
|
||||
// TODO use generations when deployment will have them
|
||||
if curr.UID != deployment.UID {
|
||||
return false, errors.NewNotFound(extensions.Resource("Deployment"), name)
|
||||
}
|
||||
return curr.Status.Replicas == 0, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: When deployments will allow running cleanup policy while being
|
||||
// paused, move pausing to above update operation. Without it, we need to
|
||||
// pause deployment before stopping RSs, to prevent creating new RSs.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/20966
|
||||
deployment, err = deployments.Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deployment.Spec.Paused = true
|
||||
deployment, err = deployments.Update(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove remaining RSs
|
||||
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
rsList, err := replicaSets.List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errList := []error{}
|
||||
for _, rc := range rsList.Items {
|
||||
if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
errList = append(errList, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errList) > 0 {
|
||||
return utilerrors.NewAggregate(errList)
|
||||
}
|
||||
|
||||
// and finally deployment
|
||||
return deployments.Delete(name, gracePeriod)
|
||||
}
|
||||
|
||||
func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
||||
@ -364,11 +447,7 @@ func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gra
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pods.Delete(name, gracePeriod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return pods.Delete(name, gracePeriod)
|
||||
}
|
||||
|
||||
func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
|
||||
@ -377,8 +456,5 @@ func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := services.Delete(name); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return services.Delete(name)
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
||||
)
|
||||
|
||||
func TestReplicationControllerStop(t *testing.T) {
|
||||
@ -495,6 +496,107 @@ func TestJobStop(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentStop(t *testing.T) {
|
||||
name := "foo"
|
||||
ns := "default"
|
||||
deployment := extensions.Deployment{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 0,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}},
|
||||
},
|
||||
Status: extensions.DeploymentStatus{
|
||||
Replicas: 0,
|
||||
},
|
||||
}
|
||||
template := deploymentutil.GetNewReplicaSetTemplate(deployment)
|
||||
tests := []struct {
|
||||
Name string
|
||||
Objs []runtime.Object
|
||||
StopError error
|
||||
ExpectedActions []string
|
||||
}{
|
||||
{
|
||||
Name: "SimpleDeployment",
|
||||
Objs: []runtime.Object{
|
||||
&extensions.Deployment{ // GET
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 0,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}},
|
||||
},
|
||||
Status: extensions.DeploymentStatus{
|
||||
Replicas: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
StopError: nil,
|
||||
ExpectedActions: []string{"get:deployments", "update:deployments",
|
||||
"get:deployments", "get:deployments", "update:deployments",
|
||||
"list:replicasets", "delete:deployments"},
|
||||
},
|
||||
{
|
||||
Name: "Deployment with single replicaset",
|
||||
Objs: []runtime.Object{
|
||||
&deployment, // GET
|
||||
&extensions.ReplicaSetList{ // LIST
|
||||
Items: []extensions.ReplicaSet{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Template: &template,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
StopError: nil,
|
||||
ExpectedActions: []string{"get:deployments", "update:deployments",
|
||||
"get:deployments", "get:deployments", "update:deployments",
|
||||
"list:replicasets", "get:replicasets", "get:replicasets",
|
||||
"update:replicasets", "get:replicasets", "get:replicasets",
|
||||
"delete:replicasets", "delete:deployments"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fake := testclient.NewSimpleFake(test.Objs...)
|
||||
reaper := DeploymentReaper{fake, time.Millisecond, time.Millisecond}
|
||||
err := reaper.Stop(ns, name, 0, nil)
|
||||
if !reflect.DeepEqual(err, test.StopError) {
|
||||
t.Errorf("%s unexpected error: %v", test.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
actions := fake.Actions()
|
||||
if len(actions) != len(test.ExpectedActions) {
|
||||
t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions))
|
||||
continue
|
||||
}
|
||||
for i, expAction := range test.ExpectedActions {
|
||||
action := strings.Split(expAction, ":")
|
||||
if actions[i].GetVerb() != action[0] {
|
||||
t.Errorf("%s unexpected verb: %+v, expected %s", test.Name, actions[i], expAction)
|
||||
}
|
||||
if actions[i].GetResource() != action[1] {
|
||||
t.Errorf("%s unexpected resource: %+v, expected %s", test.Name, actions[i], expAction)
|
||||
}
|
||||
if len(action) == 3 && actions[i].GetSubresource() != action[2] {
|
||||
t.Errorf("%s unexpected subresource: %+v, expected %s", test.Name, actions[i], expAction)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type noSuchPod struct {
|
||||
*testclient.FakePods
|
||||
}
|
||||
|
@ -21,12 +21,15 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -65,6 +68,7 @@ var _ = Describe("Deployment [Feature:Deployment]", func() {
|
||||
})
|
||||
|
||||
func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
|
||||
zero := int64(0)
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: rsName,
|
||||
@ -77,6 +81,7 @@ func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName
|
||||
Labels: rsPodLabels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
@ -90,6 +95,7 @@ func newRS(rsName string, replicas int, rsPodLabels map[string]string, imageName
|
||||
}
|
||||
|
||||
func newDeployment(deploymentName string, replicas int, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int) *extensions.Deployment {
|
||||
zero := int64(0)
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
@ -106,6 +112,7 @@ func newDeployment(deploymentName string, replicas int, podLabels map[string]str
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
@ -149,6 +156,43 @@ func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revisio
|
||||
return deployment, newRS
|
||||
}
|
||||
|
||||
func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), oldC)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Logf("ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.Extensions().Deployments(ns).Get(deployment.Name)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
Logf("ensuring deployment %s rcs were deleted", deploymentName)
|
||||
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
rss, err := c.Extensions().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(rss.Items).Should(HaveLen(0))
|
||||
Logf("ensuring deployment %s pods were deleted", deploymentName)
|
||||
if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
pods, err := c.Core().Pods(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(pods.Items) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
Failf("Failed to remove deployment %s pods!", deploymentName)
|
||||
}
|
||||
}
|
||||
|
||||
func testNewDeployment(f *Framework) {
|
||||
ns := f.Namespace.Name
|
||||
// TODO: remove unversionedClient when the refactoring is done. Currently some
|
||||
@ -164,16 +208,8 @@ func testNewDeployment(f *Framework) {
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
_, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
|
||||
// Check that deployment is created fine.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -216,10 +252,6 @@ func testRollingUpdateDeployment(f *Framework) {
|
||||
replicas := 3
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Verify that the required pods have come up.
|
||||
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
|
||||
if err != nil {
|
||||
@ -232,16 +264,7 @@ func testRollingUpdateDeployment(f *Framework) {
|
||||
Logf("Creating deployment %s", deploymentName)
|
||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
|
||||
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -273,10 +296,6 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
||||
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Verify that the required pods have come up.
|
||||
err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1)
|
||||
if err != nil {
|
||||
@ -289,16 +308,7 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
|
||||
Logf("Creating deployment %s", deploymentName)
|
||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
|
||||
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -341,10 +351,6 @@ func testRecreateDeployment(f *Framework) {
|
||||
replicas := 3
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Verify that the required pods have come up.
|
||||
err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
|
||||
if err != nil {
|
||||
@ -357,16 +363,7 @@ func testRecreateDeployment(f *Framework) {
|
||||
Logf("Creating deployment %s", deploymentName)
|
||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RecreateDeploymentStrategyType, nil))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
|
||||
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
|
||||
if err != nil {
|
||||
@ -426,16 +423,7 @@ func testDeploymentCleanUpPolicy(f *Framework) {
|
||||
Logf("Creating deployment %s", deploymentName)
|
||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
|
||||
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -460,10 +448,6 @@ func testRolloverDeployment(f *Framework) {
|
||||
rsReplicas := 4
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, "nginx", "nginx"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
// Verify that the required pods have come up.
|
||||
err = verifyPods(unversionedClient, ns, podName, false, rsReplicas)
|
||||
if err != nil {
|
||||
@ -486,16 +470,8 @@ func testRolloverDeployment(f *Framework) {
|
||||
}
|
||||
_, err = c.Extensions().Deployments(ns).Create(newDeployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
|
||||
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -534,12 +510,7 @@ func testPausedDeployment(f *Framework) {
|
||||
Logf("Creating paused deployment %s", deploymentName)
|
||||
_, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
_, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
// Check that deployment is created fine.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -618,21 +589,8 @@ func testRollbackDeployment(f *Framework) {
|
||||
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
|
||||
_, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, oldRS := range oldRSs {
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}
|
||||
}()
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
|
||||
// Check that deployment is created fine.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -718,10 +676,6 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
|
||||
rs.Annotations["make"] = "difference"
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
Logf("deleting replica set %s", rsName)
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
// Create a deployment to create nginx pods, which have different template than the replica set created above.
|
||||
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
|
||||
@ -732,21 +686,8 @@ func testRollbackDeploymentRSNoRevision(f *Framework) {
|
||||
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
|
||||
_, err = c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Logf("deleting deployment %s", deploymentName)
|
||||
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
|
||||
// TODO: remove this once we can delete replica sets with deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
|
||||
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, oldRS := range oldRSs {
|
||||
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
|
||||
}
|
||||
}()
|
||||
defer stopDeployment(c, f.Client, ns, deploymentName)
|
||||
|
||||
// Check that deployment is created fine.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -160,7 +160,7 @@ var _ = Describe("Job", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should stop a job", func() {
|
||||
It("should delete a job", func() {
|
||||
By("Creating a job")
|
||||
job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
|
||||
job, err := createJob(f.Client, f.Namespace.Name, job)
|
||||
@ -170,7 +170,7 @@ var _ = Describe("Job", func() {
|
||||
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, parallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scale job down")
|
||||
By("delete a job")
|
||||
reaper, err := kubectl.ReaperFor(extensions.Kind("Job"), f.Client)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
|
Loading…
Reference in New Issue
Block a user