mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #40576 from nikhiljindal/kubectlcascDel
Automatic merge from submit-queue (batch tested with PRs 41994, 41969, 41997, 40952, 40576) Updating kubectl to send delete requests with orphanDependents=false if --cascade is true Ref https://github.com/kubernetes/kubernetes/issues/40568 #38897 Updating kubectl to always set `DeleteOptions.orphanDependents=false` when deleting a resource with `--cascade=true`. This is primarily for federation where we want to use server side cascading deletion. Impact on kubernetes: kubectl will do another GET after sending a DELETE and wait till the resource is actually deleted. This can have an impact if the resource has a finalizer. kubectl will wait till the finalizer is removed and then the resource is deleted, which is the right thing to do but a notable change in behavior. cc @caesarxuchao @lavalamp @smarterclayton @kubernetes/sig-federation-pr-reviews @kubernetes/sig-cli-pr-reviews
This commit is contained in:
commit
0bc16d8966
@ -42,30 +42,60 @@ kube::test::get_caller() {
|
||||
|
||||
# Force exact match of a returned result for a object query. Wrap this with || to support multiple
|
||||
# valid return types.
|
||||
# This runs `kubectl get` once and asserts that the result is as expected.
|
||||
## $1: Object on which get should be run
|
||||
# $2: The go-template to run on the result
|
||||
# $3: The expected output
|
||||
# $4: Additional args to be passed to kubectl
|
||||
kube::test::get_object_assert() {
|
||||
local object=$1
|
||||
local request=$2
|
||||
local expected=$3
|
||||
local args=${4:-}
|
||||
kube::test::object_assert 1 "$@"
|
||||
}
|
||||
|
||||
res=$(eval kubectl get "${kube_flags[@]}" ${args} $object -o go-template=\"$request\")
|
||||
# Asserts that the output of a given get query is as expected.
|
||||
# Runs the query multiple times before failing it.
|
||||
# $1: Object on which get should be run
|
||||
# $2: The go-template to run on the result
|
||||
# $3: The expected output
|
||||
# $4: Additional args to be passed to kubectl
|
||||
kube::test::wait_object_assert() {
|
||||
kube::test::object_assert 10 "$@"
|
||||
}
|
||||
|
||||
if [[ "$res" =~ ^$expected$ ]]; then
|
||||
echo -n ${green}
|
||||
echo "$(kube::test::get_caller): Successful get $object $request: $res"
|
||||
echo -n ${reset}
|
||||
return 0
|
||||
else
|
||||
echo ${bold}${red}
|
||||
echo "$(kube::test::get_caller): FAIL!"
|
||||
echo "Get $object $request"
|
||||
echo " Expected: $expected"
|
||||
echo " Got: $res"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
# Asserts that the output of a given get query is as expected.
|
||||
# Can run the query multiple times before failing it.
|
||||
# $1: Number of times the query should be run before failing it.
|
||||
# $2: Object on which get should be run
|
||||
# $3: The go-template to run on the result
|
||||
# $4: The expected output
|
||||
# $5: Additional args to be passed to kubectl
|
||||
kube::test::object_assert() {
|
||||
local tries=$1
|
||||
local object=$2
|
||||
local request=$3
|
||||
local expected=$4
|
||||
local args=${5:-}
|
||||
|
||||
for j in $(seq 1 ${tries}); do
|
||||
res=$(eval kubectl get "${kube_flags[@]}" ${args} $object -o go-template=\"$request\")
|
||||
if [[ "$res" =~ ^$expected$ ]]; then
|
||||
echo -n ${green}
|
||||
echo "$(kube::test::get_caller 3): Successful get $object $request: $res"
|
||||
echo -n ${reset}
|
||||
return 0
|
||||
fi
|
||||
echo "Waiting for Get $object $request $args: expected: $expected, got: $res"
|
||||
sleep $((${j}-1))
|
||||
done
|
||||
|
||||
echo ${bold}${red}
|
||||
echo "$(kube::test::get_caller 3): FAIL!"
|
||||
echo "Get $object $request"
|
||||
echo " Expected: $expected"
|
||||
echo " Got: $res"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
}
|
||||
|
||||
kube::test::get_object_jsonpath_assert() {
|
||||
|
@ -947,8 +947,8 @@ run_kubectl_apply_deployments_tests() {
|
||||
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
|
||||
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
|
||||
# Post-Condition: no Deployments, ReplicaSets, Pods exist
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
}
|
||||
|
||||
@ -2273,6 +2273,37 @@ run_deployment_tests() {
|
||||
# Clean up
|
||||
kubectl delete deployment test-nginx "${kube_flags[@]}"
|
||||
|
||||
### Test cascading deletion
|
||||
## Test that rs is deleted when deployment is deleted.
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Create deployment
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
|
||||
# Wait for rs to come up.
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
|
||||
# Deleting the deployment should delete the rs.
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
## Test that rs is not deleted when deployment is deleted with cascade set to false.
|
||||
# Pre-condition: no deployment and rs exist
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Create deployment
|
||||
kubectl create deployment nginx-deployment --image=gcr.io/google-containers/nginx:test-cmd
|
||||
# Wait for rs to come up.
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
|
||||
# Delete the deployment with cascade set to false.
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
|
||||
# Wait for the deployment to be deleted and then verify that rs is not
|
||||
# deleted.
|
||||
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
|
||||
# Cleanup
|
||||
# Find the name of the rs to be deleted.
|
||||
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
|
||||
kubectl delete rs ${output_message} "${kube_flags[@]}"
|
||||
|
||||
### Auto scale deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
@ -2385,6 +2416,21 @@ run_rs_tests() {
|
||||
# Post-condition: no pods from frontend replica set
|
||||
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
kube::log::status "Deleting rs"
|
||||
kubectl delete rs frontend "${kube_flags[@]}" --cascade=false
|
||||
# Wait for the rs to be deleted.
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Post-condition: All 3 pods still remain from frontend replica set
|
||||
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
|
||||
# Cleanup
|
||||
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
### Create replica set frontend from YAML
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
@ -2662,7 +2708,7 @@ runTests() {
|
||||
i=0
|
||||
create_and_use_new_namespace() {
|
||||
i=$(($i+1))
|
||||
kube::log::status "Creating namespace"
|
||||
kube::log::status "Creating namespace namespace${i}"
|
||||
kubectl create namespace "namespace${i}"
|
||||
kubectl config set-context "${CONTEXT}" --namespace="namespace${i}"
|
||||
}
|
||||
@ -2690,6 +2736,7 @@ runTests() {
|
||||
second_port_field="(index .spec.ports 1).port"
|
||||
second_port_name="(index .spec.ports 1).name"
|
||||
image_field="(index .spec.containers 0).image"
|
||||
pod_container_name_field="(index .spec.containers 0).name"
|
||||
container_name_field="(index .spec.template.spec.containers 0).name"
|
||||
hpa_min_field=".spec.minReplicas"
|
||||
hpa_max_field=".spec.maxReplicas"
|
||||
|
@ -253,7 +253,8 @@ func ReapResult(r *resource.Result, f cmdutil.Factory, out io.Writer, isDefaultD
|
||||
if err != nil {
|
||||
// If there is no reaper for this resources and the user didn't explicitly ask for stop.
|
||||
if kubectl.IsNoSuchReaperError(err) && isDefaultDelete {
|
||||
return deleteResource(info, out, shortOutput, mapper)
|
||||
// No client side reaper found. Let the server do cascading deletion.
|
||||
return cascadingDeleteResource(info, out, shortOutput, mapper)
|
||||
}
|
||||
return cmdutil.AddSourceToErr("reaping", info.Source, err)
|
||||
}
|
||||
@ -293,7 +294,7 @@ func DeleteResult(r *resource.Result, out io.Writer, ignoreNotFound bool, shortO
|
||||
return err
|
||||
}
|
||||
found++
|
||||
return deleteResource(info, out, shortOutput, mapper)
|
||||
return deleteResource(info, out, shortOutput, mapper, nil)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -304,8 +305,14 @@ func DeleteResult(r *resource.Result, out io.Writer, ignoreNotFound bool, shortO
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteResource(info *resource.Info, out io.Writer, shortOutput bool, mapper meta.RESTMapper) error {
|
||||
if err := resource.NewHelper(info.Client, info.Mapping).Delete(info.Namespace, info.Name); err != nil {
|
||||
func cascadingDeleteResource(info *resource.Info, out io.Writer, shortOutput bool, mapper meta.RESTMapper) error {
|
||||
falseVar := false
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
return deleteResource(info, out, shortOutput, mapper, deleteOptions)
|
||||
}
|
||||
|
||||
func deleteResource(info *resource.Info, out io.Writer, shortOutput bool, mapper meta.RESTMapper, deleteOptions *metav1.DeleteOptions) error {
|
||||
if err := resource.NewHelper(info.Client, info.Mapping).DeleteWithOptions(info.Namespace, info.Name, deleteOptions); err != nil {
|
||||
return cmdutil.AddSourceToErr("deleting", info.Source, err)
|
||||
}
|
||||
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, false, "deleted")
|
||||
|
@ -18,6 +18,9 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -88,6 +91,68 @@ func TestDeleteObjectByTuple(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func hasExpectedOrphanDependents(body io.ReadCloser, expectedOrphanDependents *bool) bool {
|
||||
if body == nil || expectedOrphanDependents == nil {
|
||||
return body == nil && expectedOrphanDependents == nil
|
||||
}
|
||||
var parsedBody metav1.DeleteOptions
|
||||
rawBody, _ := ioutil.ReadAll(body)
|
||||
json.Unmarshal(rawBody, &parsedBody)
|
||||
if parsedBody.OrphanDependents == nil {
|
||||
return false
|
||||
}
|
||||
return *expectedOrphanDependents == *parsedBody.OrphanDependents
|
||||
}
|
||||
|
||||
// Tests that DeleteOptions.OrphanDependents is appropriately set while deleting objects.
|
||||
func TestOrphanDependentsInDeleteObject(t *testing.T) {
|
||||
initTestErrorHandler(t)
|
||||
_, _, rc := testData()
|
||||
|
||||
f, tf, codec, _ := cmdtesting.NewAPIFactory()
|
||||
tf.Printer = &testPrinter{}
|
||||
var expectedOrphanDependents *bool
|
||||
tf.UnstructuredClient = &fake.RESTClient{
|
||||
APIRegistry: api.Registry,
|
||||
NegotiatedSerializer: unstructuredSerializer,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m, b := req.URL.Path, req.Method, req.Body; {
|
||||
|
||||
case p == "/namespaces/test/secrets/mysecret" && m == "DELETE" && hasExpectedOrphanDependents(b, expectedOrphanDependents):
|
||||
|
||||
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
tf.Namespace = "test"
|
||||
|
||||
// DeleteOptions.OrphanDependents should be false, when cascade is true (default).
|
||||
falseVar := false
|
||||
expectedOrphanDependents = &falseVar
|
||||
buf, errBuf := bytes.NewBuffer([]byte{}), bytes.NewBuffer([]byte{})
|
||||
cmd := NewCmdDelete(f, buf, errBuf)
|
||||
cmd.Flags().Set("namespace", "test")
|
||||
cmd.Flags().Set("output", "name")
|
||||
cmd.Run(cmd, []string{"secrets/mysecret"})
|
||||
if buf.String() != "secret/mysecret\n" {
|
||||
t.Errorf("unexpected output: %s", buf.String())
|
||||
}
|
||||
|
||||
// Test that delete options should be nil when cascade is false.
|
||||
expectedOrphanDependents = nil
|
||||
buf, errBuf = bytes.NewBuffer([]byte{}), bytes.NewBuffer([]byte{})
|
||||
cmd = NewCmdDelete(f, buf, errBuf)
|
||||
cmd.Flags().Set("namespace", "test")
|
||||
cmd.Flags().Set("cascade", "false")
|
||||
cmd.Flags().Set("output", "name")
|
||||
cmd.Run(cmd, []string{"secrets/mysecret"})
|
||||
if buf.String() != "secret/mysecret\n" {
|
||||
t.Errorf("unexpected output: %s", buf.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteNamedObject(t *testing.T) {
|
||||
initTestErrorHandler(t)
|
||||
initTestErrorHandler(t)
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -95,10 +96,15 @@ func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Int
|
||||
}
|
||||
|
||||
func (m *Helper) Delete(namespace, name string) error {
|
||||
return m.DeleteWithOptions(namespace, name, nil)
|
||||
}
|
||||
|
||||
func (m *Helper) DeleteWithOptions(namespace, name string, options *metav1.DeleteOptions) error {
|
||||
return m.RESTClient.Delete().
|
||||
NamespaceIfScoped(namespace, m.NamespaceScoped).
|
||||
Resource(m.Resource).
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
@ -322,7 +322,9 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio
|
||||
return err
|
||||
}
|
||||
|
||||
return reaper.client.DaemonSets(namespace).Delete(name, nil)
|
||||
falseVar := false
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
return reaper.client.DaemonSets(namespace).Delete(name, deleteOptions)
|
||||
}
|
||||
|
||||
func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
||||
@ -366,7 +368,9 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat
|
||||
|
||||
// TODO: Cleanup volumes? We don't want to accidentally delete volumes from
|
||||
// stop, so just leave this up to the statefulset.
|
||||
return statefulsets.Delete(name, nil)
|
||||
falseVar := false
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
return statefulsets.Delete(name, deleteOptions)
|
||||
}
|
||||
|
||||
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
||||
@ -408,8 +412,10 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra
|
||||
if len(errList) > 0 {
|
||||
return utilerrors.NewAggregate(errList)
|
||||
}
|
||||
// once we have all the pods removed we can safely remove the job itself
|
||||
return jobs.Delete(name, nil)
|
||||
// once we have all the pods removed we can safely remove the job itself.
|
||||
falseVar := false
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
return jobs.Delete(name, deleteOptions)
|
||||
}
|
||||
|
||||
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
||||
@ -509,5 +515,7 @@ func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return services.Delete(name, nil)
|
||||
falseVar := false
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
return services.Delete(name, deleteOptions)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user