mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #51186 from dixudx/fix_delete_uninitialized_resources
Automatic merge from submit-queue (batch tested with PRs 51186, 50350, 51751, 51645, 51837) fix bug on kubectl deleting uninitialized resources **What this PR does / why we need it**: **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #51185 **Special notes for your reviewer**: /assign @caesarxuchao @ahmetb **Release note**: ```release-note fix bug on kubectl deleting uninitialized resources ```
This commit is contained in:
commit
ee4e4a5418
@ -2854,7 +2854,6 @@ run_deployment_tests() {
|
|||||||
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
|
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
|
||||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||||
|
|
||||||
@ -2883,6 +2882,18 @@ run_deployment_tests() {
|
|||||||
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
|
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
|
||||||
kubectl delete secret test-set-env-secret "${kube_flags[@]}"
|
kubectl delete secret test-set-env-secret "${kube_flags[@]}"
|
||||||
|
|
||||||
|
### Delete a deployment with initializer
|
||||||
|
# Pre-condition: no deployment exists
|
||||||
|
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||||
|
# Create a deployment
|
||||||
|
kubectl create --request-timeout=1 -f hack/testdata/deployment-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
|
||||||
|
kube::test::get_object_assert 'deployment web' "{{$id_field}}" 'web'
|
||||||
|
# Delete a deployment
|
||||||
|
kubectl delete deployment web "${kube_flags[@]}"
|
||||||
|
# Check Deployment web doesn't exist
|
||||||
|
output_message=$(! kubectl get deployment web 2>&1 "${kube_flags[@]}")
|
||||||
|
kube::test::if_has_string "${output_message}" '"web" not found'
|
||||||
|
|
||||||
set +o nounset
|
set +o nounset
|
||||||
set +o errexit
|
set +o errexit
|
||||||
}
|
}
|
||||||
@ -2995,6 +3006,18 @@ run_rs_tests() {
|
|||||||
# Post-condition: no replica set exists
|
# Post-condition: no replica set exists
|
||||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||||
|
|
||||||
|
### Delete a rs with initializer
|
||||||
|
# Pre-condition: no rs exists
|
||||||
|
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||||
|
# Create a rs
|
||||||
|
kubectl create --request-timeout=1 -f hack/testdata/replicaset-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
|
||||||
|
kube::test::get_object_assert 'rs nginx' "{{$id_field}}" 'nginx'
|
||||||
|
# Delete a rs
|
||||||
|
kubectl delete rs nginx "${kube_flags[@]}"
|
||||||
|
# check rs nginx doesn't exist
|
||||||
|
output_message=$(! kubectl get rs nginx 2>&1 "${kube_flags[@]}")
|
||||||
|
kube::test::if_has_string "${output_message}" '"nginx" not found'
|
||||||
|
|
||||||
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
|
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
|
||||||
### Auto scale replica set
|
### Auto scale replica set
|
||||||
# Pre-condition: no replica set exists
|
# Pre-condition: no replica set exists
|
||||||
|
25
hack/testdata/deployment-with-initializer.yaml
vendored
Normal file
25
hack/testdata/deployment-with-initializer.yaml
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: web
|
||||||
|
labels:
|
||||||
|
run: web
|
||||||
|
initializers:
|
||||||
|
pending:
|
||||||
|
- name: podimage.initializer.com
|
||||||
|
spec:
|
||||||
|
replicas: 5
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
run: web
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
run: web
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- image: nginx:1.10
|
||||||
|
name: web
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
protocol: TCP
|
23
hack/testdata/replicaset-with-initializer.yaml
vendored
Normal file
23
hack/testdata/replicaset-with-initializer.yaml
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: ReplicaSet
|
||||||
|
metadata:
|
||||||
|
name: nginx
|
||||||
|
initializers:
|
||||||
|
pending:
|
||||||
|
- name: podimage.initializer.com
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: nginx
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: nginx
|
||||||
|
labels:
|
||||||
|
app: nginx
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: nginx
|
||||||
|
image: nginx:1.10
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
@ -404,6 +404,11 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if deployment.Initializers != nil {
|
||||||
|
var falseVar = false
|
||||||
|
nonOrphanOption := metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||||
|
return deployments.Delete(name, &nonOrphanOption)
|
||||||
|
}
|
||||||
|
|
||||||
// Use observedGeneration to determine if the deployment controller noticed the pause.
|
// Use observedGeneration to determine if the deployment controller noticed the pause.
|
||||||
if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) {
|
if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) {
|
||||||
|
@ -305,6 +305,9 @@ func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, prec
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if rs.Initializers != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.ReplicaSetHasDesiredReplicas(scaler.c, rs))
|
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.ReplicaSetHasDesiredReplicas(scaler.c, rs))
|
||||||
|
|
||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
@ -373,6 +376,9 @@ func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, pre
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if job.Initializers != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.StatefulSetHasDesiredReplicas(scaler.c, job))
|
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.StatefulSetHasDesiredReplicas(scaler.c, job))
|
||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
return fmt.Errorf("timed out waiting for %q to be synced", name)
|
return fmt.Errorf("timed out waiting for %q to be synced", name)
|
||||||
|
Loading…
Reference in New Issue
Block a user