mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #108777 from pjo256/recursive-rollout-status
feat(kubectl rollout): support multiple resources for rollout status
This commit is contained in:
commit
47bb8c6d0c
@ -168,73 +168,67 @@ func (o *RolloutStatusOptions) Run() error {
|
|||||||
LabelSelectorParam(o.LabelSelector).
|
LabelSelectorParam(o.LabelSelector).
|
||||||
FilenameParam(o.EnforceNamespace, o.FilenameOptions).
|
FilenameParam(o.EnforceNamespace, o.FilenameOptions).
|
||||||
ResourceTypeOrNameArgs(true, o.BuilderArgs...).
|
ResourceTypeOrNameArgs(true, o.BuilderArgs...).
|
||||||
SingleResourceType().
|
ContinueOnError().
|
||||||
Latest().
|
Latest().
|
||||||
Do()
|
Do()
|
||||||
|
|
||||||
err := r.Err()
|
err := r.Err()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
infos, err := r.Infos()
|
return r.Visit(func(info *resource.Info, err error) error {
|
||||||
if err != nil {
|
mapping := info.ResourceMapping()
|
||||||
return err
|
statusViewer, err := o.StatusViewerFn(mapping)
|
||||||
}
|
if err != nil {
|
||||||
if len(infos) != 1 {
|
return err
|
||||||
return fmt.Errorf("rollout status is only supported on individual resources and resource collections - %d resources were found", len(infos))
|
}
|
||||||
}
|
|
||||||
info := infos[0]
|
|
||||||
mapping := info.ResourceMapping()
|
|
||||||
|
|
||||||
statusViewer, err := o.StatusViewerFn(mapping)
|
fieldSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String()
|
||||||
if err != nil {
|
lw := &cache.ListWatch{
|
||||||
return err
|
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||||
}
|
options.FieldSelector = fieldSelector
|
||||||
|
return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(context.TODO(), options)
|
||||||
|
},
|
||||||
|
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||||
|
options.FieldSelector = fieldSelector
|
||||||
|
return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(context.TODO(), options)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
fieldSelector := fields.OneTermEqualSelector("metadata.name", info.Name).String()
|
// if the rollout isn't done yet, keep watching deployment status
|
||||||
lw := &cache.ListWatch{
|
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), o.Timeout)
|
||||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
intr := interrupt.New(nil, cancel)
|
||||||
options.FieldSelector = fieldSelector
|
return intr.Run(func() error {
|
||||||
return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(context.TODO(), options)
|
_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
|
||||||
},
|
switch t := e.Type; t {
|
||||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
case watch.Added, watch.Modified:
|
||||||
options.FieldSelector = fieldSelector
|
status, done, err := statusViewer.Status(e.Object.(runtime.Unstructured), o.Revision)
|
||||||
return o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(context.TODO(), options)
|
if err != nil {
|
||||||
},
|
return false, err
|
||||||
}
|
}
|
||||||
|
fmt.Fprintf(o.Out, "%s", status)
|
||||||
|
// Quit waiting if the rollout is done
|
||||||
|
if done {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
// if the rollout isn't done yet, keep watching deployment status
|
shouldWatch := o.Watch
|
||||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), o.Timeout)
|
if !shouldWatch {
|
||||||
intr := interrupt.New(nil, cancel)
|
return true, nil
|
||||||
return intr.Run(func() error {
|
}
|
||||||
_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
|
|
||||||
switch t := e.Type; t {
|
return false, nil
|
||||||
case watch.Added, watch.Modified:
|
|
||||||
status, done, err := statusViewer.Status(e.Object.(runtime.Unstructured), o.Revision)
|
case watch.Deleted:
|
||||||
if err != nil {
|
// We need to abort to avoid cases of recreation and not to silently watch the wrong (new) object
|
||||||
return false, err
|
return true, fmt.Errorf("object has been deleted")
|
||||||
|
|
||||||
|
default:
|
||||||
|
return true, fmt.Errorf("internal error: unexpected event %#v", e)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(o.Out, "%s", status)
|
})
|
||||||
// Quit waiting if the rollout is done
|
return err
|
||||||
if done {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
shouldWatch := o.Watch
|
|
||||||
if !shouldWatch {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
|
|
||||||
case watch.Deleted:
|
|
||||||
// We need to abort to avoid cases of recreation and not to silently watch the wrong (new) object
|
|
||||||
return true, fmt.Errorf("object has been deleted")
|
|
||||||
|
|
||||||
default:
|
|
||||||
return true, fmt.Errorf("internal error: unexpected event %#v", e)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
return err
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -383,6 +383,17 @@ run_recursive_resources_tests() {
|
|||||||
# Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
|
# Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
|
||||||
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
|
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
|
||||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||||
|
## Fetch rollout status for multiple resources
|
||||||
|
output_message=$(! kubectl rollout status -f hack/testdata/recursive/deployment/deployment --timeout=1s 2>&1 "${kube_flags[@]:?}")
|
||||||
|
# Post-condition: nginx1 should both exist and nginx2 should error
|
||||||
|
kube::test::if_has_string "${output_message}" "Waiting for deployment \"nginx1-deployment\" rollout to finish"
|
||||||
|
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||||
|
## Fetch rollout status for deployments recursively
|
||||||
|
output_message=$(! kubectl rollout status -f hack/testdata/recursive/deployment -R --timeout=1s 2>&1 "${kube_flags[@]:?}")
|
||||||
|
# Post-condition: nginx0 & nginx1 should both exist, nginx2 should error
|
||||||
|
kube::test::if_has_string "${output_message}" "Waiting for deployment \"nginx0-deployment\" rollout to finish"
|
||||||
|
kube::test::if_has_string "${output_message}" "Waiting for deployment \"nginx1-deployment\" rollout to finish"
|
||||||
|
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||||
## Retrieve the rollout history of the deployments recursively
|
## Retrieve the rollout history of the deployments recursively
|
||||||
output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
|
output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
|
||||||
# Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
|
# Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
|
||||||
|
Loading…
Reference in New Issue
Block a user