mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Merge pull request #59506 from juanvallejo/jvallejo/handle-watch-multiple-reqs
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. fix --watch on multiple requests **Release note**: ```release-note NONE ``` `kubectl get <resource> --watch` only supports watching a single resource kind at a time. This check fails if more than one resource `Info` is returned. When dealing with large quantities of a single resource kind, or an amount that exceeds the value of `--chunk-size`, more than one request is made to the server causing a resource `Info` to be created for each of the requests, ultimately causing the above check to fail even though we are dealing with the same type of resource. This patch modifies that check to take into account the GVKs of all infos returned, and only fail if at least one differs. cc @deads2k
This commit is contained in:
commit
198a098d9d
@ -1430,6 +1430,18 @@ run_kubectl_get_tests() {
|
||||
# Post-condition: Check if we get a limit and continue
|
||||
kube::test::if_has_string "${output_message}" "/clusterroles?limit=500 200 OK"
|
||||
|
||||
### Test kubectl get chunk size does not result in a --watch error when resource list is served in multiple chunks
|
||||
# Pre-condition: no ConfigMaps exist
|
||||
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Post-condition: Create three configmaps and ensure that we can --watch them with a --chunk-size of 1
|
||||
kubectl create cm one "${kube_flags[@]}"
|
||||
kubectl create cm two "${kube_flags[@]}"
|
||||
kubectl create cm three "${kube_flags[@]}"
|
||||
output_message=$(kubectl get configmap --chunk-size=1 --watch --request-timeout=1s 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
|
||||
output_message=$(kubectl get configmap --chunk-size=1 --watch-only --request-timeout=1s 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
|
||||
|
||||
### Test --allow-missing-template-keys
|
||||
# Pre-condition: no POD exists
|
||||
create_and_use_new_namespace
|
||||
|
@ -463,8 +463,25 @@ func (options *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []s
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(infos) != 1 {
|
||||
return i18n.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(infos))
|
||||
if len(infos) > 1 {
|
||||
gvk := infos[0].Mapping.GroupVersionKind
|
||||
uniqueGVKs := 1
|
||||
|
||||
// If requesting a resource count greater than a request's --chunk-size,
|
||||
// we will end up making multiple requests to the server, with each
|
||||
// request producing its own "Info" object. Although overall we are
|
||||
// dealing with a single resource type, we will end up with multiple
|
||||
// infos returned by the builder. To handle this case, only fail if we
|
||||
// have at least one info with a different GVK than the others.
|
||||
for _, info := range infos {
|
||||
if info.Mapping.GroupVersionKind != gvk {
|
||||
uniqueGVKs++
|
||||
}
|
||||
}
|
||||
|
||||
if uniqueGVKs > 1 {
|
||||
return i18n.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", uniqueGVKs)
|
||||
}
|
||||
}
|
||||
|
||||
filterOpts := cmdutil.ExtractCmdPrintOptions(cmd, options.AllNamespaces)
|
||||
|
Loading…
Reference in New Issue
Block a user