Merge pull request #114252 from ardaguclu/scale-piped-input

kubectl scale: Use visitor only once
This commit is contained in:
Kubernetes Prow Robot 2022-12-10 07:55:19 -08:00 committed by GitHub
commit 9758911fd5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 59 additions and 26 deletions

View File

@ -144,16 +144,18 @@ func (o *ScaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st
if err != nil {
return err
}
o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd)
if err != nil {
return err
}
cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy)
printer, err := o.PrintFlags.ToPrinter()
if err != nil {
return err
}
o.PrintObj = printer.PrintObj
o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd)
if err != nil {
return err
}
dynamicClient, err := f.DynamicClient()
if err != nil {
return err
@ -209,13 +211,11 @@ func (o *ScaleOptions) RunScale() error {
return err
}
infos := []*resource.Info{}
r.Visit(func(info *resource.Info, err error) error {
if err == nil {
infos = append(infos, info)
}
return nil
})
// We don't immediately return infoErr if it is not nil.
// Because we want to proceed for other valid resources and
// at the end of the function, we'll return this
// to show invalid resources to the user.
infos, infoErr := r.Infos()
if len(o.ResourceVersion) != 0 && len(infos) > 1 {
return fmt.Errorf("cannot use --resource-version with multiple resources")
@ -234,17 +234,19 @@ func (o *ScaleOptions) RunScale() error {
waitForReplicas = scale.NewRetryParams(1*time.Second, o.Timeout)
}
counter := 0
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
counter++
if len(infos) == 0 {
return fmt.Errorf("no objects passed to scale")
}
for _, info := range infos {
mapping := info.ResourceMapping()
if o.dryRunStrategy == cmdutil.DryRunClient {
return o.PrintObj(info.Object, o.Out)
if err := o.PrintObj(info.Object, o.Out); err != nil {
return err
}
continue
}
if err := o.scaler.Scale(info.Namespace, info.Name, uint(o.Replicas), precondition, retry, waitForReplicas, mapping.Resource, o.dryRunStrategy == cmdutil.DryRunServer); err != nil {
return err
}
@ -263,15 +265,13 @@ func (o *ScaleOptions) RunScale() error {
}
}
return o.PrintObj(info.Object, o.Out)
})
if err != nil {
return err
err := o.PrintObj(info.Object, o.Out)
if err != nil {
return err
}
}
if counter == 0 {
return fmt.Errorf("no objects passed to scale")
}
return nil
return infoErr
}
func scaler(f cmdutil.Factory) (scale.Scaler, error) {

View File

@ -1272,6 +1272,20 @@ run_rc_tests() {
### Scale multiple replication controllers
kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Command dry-run client
output_message=$(kubectl scale rc/redis-master rc/redis-slave --replicas=4 --dry-run=client "${kube_flags[@]}")
# Post-condition dry-run client: 1 replicas each
kube::test::if_has_string "${output_message}" 'replicationcontroller/redis-master scaled (dry run)'
kube::test::if_has_string "${output_message}" 'replicationcontroller/redis-slave scaled (dry run)'
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '2'
# Command dry-run server
output_message=$(kubectl scale rc/redis-master rc/redis-slave --replicas=4 --dry-run=server "${kube_flags[@]}")
# Post-condition dry-run server: 1 replicas each
kube::test::if_has_string "${output_message}" 'replicationcontroller/redis-master scaled (server dry run)'
kube::test::if_has_string "${output_message}" 'replicationcontroller/redis-slave scaled (server dry run)'
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '2'
# Command
kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}"
# Post-condition: 4 replicas each
@ -1282,6 +1296,16 @@ run_rc_tests() {
### Scale a deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Command dry-run client
output_message=$(kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment --dry-run=client)
# Post-condition: 3 replica for nginx-deployment dry-run client
kube::test::if_has_string "${output_message}" 'nginx-deployment scaled (dry run)'
kube::test::get_object_assert 'deployment nginx-deployment' "{{${deployment_replicas:?}}}" '3'
# Command dry-run server
output_message=$(kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment --dry-run=server)
# Post-condition: 3 replica for nginx-deployment dry-run server
kube::test::if_has_string "${output_message}" 'nginx-deployment scaled (server dry run)'
kube::test::get_object_assert 'deployment nginx-deployment' "{{${deployment_replicas:?}}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
# Post-condition: 1 replica for nginx-deployment
@ -1289,6 +1313,15 @@ run_rc_tests() {
# Clean-up
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
### Scale a deployment with piped input
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Command
kubectl get deployment/nginx-deployment -o json | kubectl scale --replicas=2 -f -
# Post-condition: 2 replica for nginx-deployment
kube::test::get_object_assert 'deployment nginx-deployment' "{{${deployment_replicas:?}}}" '2'
# Clean-up
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
### Expose deployments by creating a service
# Uses deployment selectors for created service
output_message=$(kubectl expose -f test/fixtures/pkg/kubectl/cmd/expose/appsv1deployment.yaml --port 80 2>&1 "${kube_flags[@]}")