mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Fix kubectl drain integration tests
These tests were not performing evictions because: - Test pods were not on the test node (fixed with spec.nodeName) - The test pods are unmanaged and require the --force flag to be evicted (added the flag) - The test node does not run pods, which prevents pod deletion from finalizing (worked around with --skip-wait-for-delete-timeout)
This commit is contained in:
parent
ff9ada1b55
commit
8bae3b449a
@ -18,15 +18,7 @@ set -o errexit
|
|||||||
set -o nounset
|
set -o nounset
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
run_cluster_management_tests() {
|
create_test_pods(){
|
||||||
set -o nounset
|
|
||||||
set -o errexit
|
|
||||||
|
|
||||||
create_and_use_new_namespace
|
|
||||||
kube::log::status "Testing cluster-management commands"
|
|
||||||
|
|
||||||
kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
|
|
||||||
|
|
||||||
# create test pods we can work with
|
# create test pods we can work with
|
||||||
kubectl create -f - "${kube_flags[@]:?}" << __EOF__
|
kubectl create -f - "${kube_flags[@]:?}" << __EOF__
|
||||||
{
|
{
|
||||||
@ -35,10 +27,12 @@ run_cluster_management_tests() {
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "test-pod-1",
|
"name": "test-pod-1",
|
||||||
"labels": {
|
"labels": {
|
||||||
"e": "f"
|
"e": "f",
|
||||||
|
"type": "test-pod"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
|
"nodeName": "127.0.0.1",
|
||||||
"containers": [
|
"containers": [
|
||||||
{
|
{
|
||||||
"name": "container-1",
|
"name": "container-1",
|
||||||
@ -57,10 +51,12 @@ __EOF__
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "test-pod-2",
|
"name": "test-pod-2",
|
||||||
"labels": {
|
"labels": {
|
||||||
"c": "d"
|
"c": "d",
|
||||||
|
"type": "test-pod"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
|
"nodeName": "127.0.0.1",
|
||||||
"containers": [
|
"containers": [
|
||||||
{
|
{
|
||||||
"name": "container-1",
|
"name": "container-1",
|
||||||
@ -71,6 +67,24 @@ __EOF__
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
__EOF__
|
__EOF__
|
||||||
|
}
|
||||||
|
|
||||||
|
delete_test_pods() {
|
||||||
|
# need to use --force because node is unready
|
||||||
|
kubectl delete pod/test-pod-1 --force --ignore-not-found
|
||||||
|
kubectl delete pod/test-pod-2 --force --ignore-not-found
|
||||||
|
}
|
||||||
|
|
||||||
|
run_cluster_management_tests() {
|
||||||
|
set -o nounset
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
create_and_use_new_namespace
|
||||||
|
kube::log::status "Testing cluster-management commands"
|
||||||
|
|
||||||
|
kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
|
||||||
|
|
||||||
|
create_test_pods
|
||||||
|
|
||||||
# taint/untaint
|
# taint/untaint
|
||||||
# Pre-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
|
# Pre-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
|
||||||
@ -109,8 +123,8 @@ __EOF__
|
|||||||
### kubectl drain update with --dry-run does not mark node unschedulable
|
### kubectl drain update with --dry-run does not mark node unschedulable
|
||||||
# Pre-condition: node is schedulable
|
# Pre-condition: node is schedulable
|
||||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
kubectl drain "127.0.0.1" --dry-run=client
|
kubectl drain "127.0.0.1" --dry-run=client --force
|
||||||
kubectl drain "127.0.0.1" --dry-run=server
|
kubectl drain "127.0.0.1" --dry-run=server --force
|
||||||
# Post-condition: node still exists, node is still schedulable
|
# Post-condition: node still exists, node is still schedulable
|
||||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
@ -121,15 +135,17 @@ __EOF__
|
|||||||
# Pre-condition: test-pod-1 and test-pod-2 exist
|
# Pre-condition: test-pod-1 and test-pod-2 exist
|
||||||
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
||||||
# dry-run command
|
# dry-run command
|
||||||
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=client
|
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=client --force
|
||||||
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=server
|
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=server --force
|
||||||
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
||||||
# command
|
# command - need --force because pod is unmanaged and --skip-wait-for-delete-timeout because node is unready
|
||||||
kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
|
response=$(kubectl drain "127.0.0.1" --force --pod-selector 'e in (f)' --skip-wait-for-delete-timeout=1)
|
||||||
# only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
|
kube::test::if_has_string "${response}" "evicting pod .*/test-pod-1"
|
||||||
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
|
# only "test-pod-1" should have been matched and deleted - test-pod-2 should not have a deletion timestamp
|
||||||
# delete pod no longer in use
|
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.deletionTimestamp}}" '<no value>'
|
||||||
kubectl delete pod/test-pod-2
|
# Post-condition: recreate test pods -- they have deletionTimestamp set but will not go away because node is unready
|
||||||
|
delete_test_pods
|
||||||
|
create_test_pods
|
||||||
# Post-condition: node is schedulable
|
# Post-condition: node is schedulable
|
||||||
kubectl uncordon "127.0.0.1"
|
kubectl uncordon "127.0.0.1"
|
||||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
@ -170,6 +186,9 @@ __EOF__
|
|||||||
# Post-condition: node "127.0.0.1" is cordoned
|
# Post-condition: node "127.0.0.1" is cordoned
|
||||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
|
||||||
|
|
||||||
|
# Clean up test pods
|
||||||
|
delete_test_pods
|
||||||
|
|
||||||
set +o nounset
|
set +o nounset
|
||||||
set +o errexit
|
set +o errexit
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user