Merge pull request #23673 from metral/multiple-resources

Automatic merge from submit-queue

allow kubectl subcmds to process multiple resources

~~autoscale, expose & patch~~ Many kubectl subcommands were limited to processing one resource at a time.

This PR allows those subcommands to process multiple resources.

This PR is in reference to https://github.com/kubernetes/kubernetes/pull/23116#issuecomment-202360784 by @deads2k
This commit is contained in:
k8s-merge-robot 2016-05-02 07:09:44 -07:00
commit f500194d92
19 changed files with 676 additions and 231 deletions

View File

@ -876,6 +876,166 @@ __EOF__
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
#####################################
# Recursive Resources via directory #
#####################################
### Create multiple busybox PODs recursively from directory of YAML files
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Replace multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Describe multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "app=busybox0"
kube::test::if_has_string "${output_message}" "app=busybox1"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Annotate multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Apply multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Convert multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Get multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message1=$(kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}")
output_message2=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up
kube::test::if_has_string "${output_message1}" "busybox0:busybox1:"
kube::test::if_has_string "${output_message2}" "Object 'Kind' is missing"
## Label multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Patch multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Create replication controller recursively from directory of YAML files
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
### Autoscale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox replication controllers are autoscaled
# with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kubectl delete hpa busybox0 "${kube_flags[@]}"
kubectl delete hpa busybox1 "${kube_flags[@]}"
### Expose multiple replication controllers as service recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}")
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Scale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox replication controllers are scaled to 2 # replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
##############
# Namespaces #
##############

View File

@ -0,0 +1,18 @@
apiVersion: extensions/v1beta1
ind: Deployment
metadata:
name: nginx2-deployment
labels:
app: nginx2-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: nginx2
spec:
containers:
- name: nginx
image: gcr.io/google-containers/nginx:1.7.9
ports:
- containerPort: 80

View File

@ -0,0 +1,18 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx1-deployment
labels:
app: nginx1-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: gcr.io/google-containers/nginx:1.7.9
ports:
- containerPort: 80

View File

@ -0,0 +1,18 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx0-deployment
labels:
app: nginx0-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: nginx0
spec:
containers:
- name: nginx
image: gcr.io/google-containers/nginx:1.7.9
ports:
- containerPort: 80

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
name: busybox0
labels:
app: busybox0
status: replaced
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

@ -0,0 +1,16 @@
apiVersion: v1
ind: Pod
metadata:
name: busybox2
labels:
app: busybox2
status: replaced
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
name: busybox1
labels:
app: busybox1
status: replaced
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: busybox0
labels:
app: busybox0
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

@ -0,0 +1,15 @@
apiVersion: v1
ind: Pod
metadata:
name: busybox2
labels:
app: busybox2
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: busybox1
labels:
app: busybox1
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

24
hack/testdata/recursive/rc/busybox.yaml vendored Normal file
View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: busybox0
labels:
app: busybox0
spec:
replicas: 1
selector:
app: busybox0
template:
metadata:
name: busybox0
labels:
app: busybox0
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

@ -0,0 +1,24 @@
apiVersion: v1
ind: ReplicationController
metadata:
name: busybox2
labels:
app: busybox2
spec:
replicas: 1
selector:
app: busybox2
template:
metadata:
name: busybox2
labels:
app: busybox2
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: busybox1
labels:
app: busybox1
spec:
replicas: 1
selector:
app: busybox1
template:
metadata:
name: busybox1
labels:
app: busybox1
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/kubectl"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/util/errors"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
"github.com/spf13/cobra"
)
@ -97,18 +97,10 @@ func RunAutoscale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []
ResourceTypeOrNameArgs(false, args...).
Flatten().
Do()
infos, err := r.Infos()
err = r.Err()
if err != nil {
return err
}
if len(infos) > 1 {
return fmt.Errorf("multiple resources provided: %v", args)
}
info := infos[0]
mapping := info.ResourceMapping()
if err := f.CanBeAutoscaled(mapping.GroupVersionKind.GroupKind()); err != nil {
return err
}
// Get the generator, setup and validate all required parameters
generatorName := cmdutil.GetFlagString(cmd, "generator")
@ -118,62 +110,84 @@ func RunAutoscale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []
return cmdutil.UsageError(cmd, fmt.Sprintf("generator %q not found.", generatorName))
}
names := generator.ParamNames()
params := kubectl.MakeParams(cmd, names)
name := info.Name
params["default-name"] = name
params["scaleRef-kind"] = mapping.GroupVersionKind.Kind
params["scaleRef-name"] = name
params["scaleRef-apiVersion"] = mapping.GroupVersionKind.GroupVersion().String()
if err = kubectl.ValidateParams(names, params); err != nil {
return err
}
// Check for invalid flags used against the present generator.
if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil {
return err
}
// Generate new object
object, err := generator.Generate(params)
if err != nil {
return err
}
resourceMapper := &resource.Mapper{
ObjectTyper: typer,
RESTMapper: mapper,
ClientMapper: resource.ClientMapperFunc(f.ClientForMapping),
Decoder: f.Decoder(true),
}
hpa, err := resourceMapper.InfoForObject(object, nil)
if err != nil {
return err
}
if cmdutil.ShouldRecord(cmd, hpa) {
if err := cmdutil.RecordChangeCause(hpa.Object, f.Command()); err != nil {
count := 0
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
object = hpa.Object
}
// TODO: extract this flag to a central location, when such a location exists.
if cmdutil.GetFlagBool(cmd, "dry-run") {
return f.PrintObject(cmd, mapper, object, out)
}
if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), hpa, f.JSONEncoder()); err != nil {
return err
}
mapping := info.ResourceMapping()
if err := f.CanBeAutoscaled(mapping.GroupVersionKind.GroupKind()); err != nil {
return err
}
object, err = resource.NewHelper(hpa.Client, hpa.Mapping).Create(namespace, false, object)
name := info.Name
params := kubectl.MakeParams(cmd, names)
params["default-name"] = name
params["scaleRef-kind"] = mapping.GroupVersionKind.Kind
params["scaleRef-name"] = name
params["scaleRef-apiVersion"] = mapping.GroupVersionKind.GroupVersion().String()
if err = kubectl.ValidateParams(names, params); err != nil {
return err
}
// Check for invalid flags used against the present generator.
if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil {
return err
}
// Generate new object
object, err := generator.Generate(params)
if err != nil {
return err
}
resourceMapper := &resource.Mapper{
ObjectTyper: typer,
RESTMapper: mapper,
ClientMapper: resource.ClientMapperFunc(f.ClientForMapping),
Decoder: f.Decoder(true),
}
hpa, err := resourceMapper.InfoForObject(object, nil)
if err != nil {
return err
}
if cmdutil.ShouldRecord(cmd, hpa) {
if err := cmdutil.RecordChangeCause(hpa.Object, f.Command()); err != nil {
return err
}
object = hpa.Object
}
// TODO: extract this flag to a central location, when such a location exists.
if cmdutil.GetFlagBool(cmd, "dry-run") {
return f.PrintObject(cmd, mapper, object, out)
}
if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), hpa, f.JSONEncoder()); err != nil {
return err
}
object, err = resource.NewHelper(hpa.Client, hpa.Mapping).Create(namespace, false, object)
if err != nil {
return err
}
count++
if len(cmdutil.GetFlagString(cmd, "output")) > 0 {
return f.PrintObject(cmd, mapper, object, out)
}
cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "autoscaled")
return nil
})
if err != nil {
return err
}
if len(cmdutil.GetFlagString(cmd, "output")) > 0 {
return f.PrintObject(cmd, mapper, object, out)
if count == 0 {
return fmt.Errorf("no objects passed to autoscale")
}
cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "autoscaled")
return nil
}
@ -186,5 +200,5 @@ func validateFlags(cmd *cobra.Command) error {
if cpu > 100 {
errs = append(errs, fmt.Errorf("CPU utilization (%%) cannot exceed 100"))
}
return errors.NewAggregate(errs)
return utilerrors.NewAggregate(errs)
}

View File

@ -154,15 +154,32 @@ func (o *ConvertOptions) Complete(f *cmdutil.Factory, out io.Writer, cmd *cobra.
// RunConvert implements the generic Convert command
func (o *ConvertOptions) RunConvert() error {
infos, err := o.builder.Do().Infos()
r := o.builder.Do()
err := r.Err()
if err != nil {
return err
}
objects, err := resource.AsVersionedObject(infos, false, o.outputVersion.String(), o.encoder)
count := 0
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
infos := []*resource.Info{info}
objects, err := resource.AsVersionedObject(infos, false, o.outputVersion.String(), o.encoder)
if err != nil {
return err
}
count++
return o.printer.PrintObj(objects, o.out)
})
if err != nil {
return err
}
return o.printer.PrintObj(objects, o.out)
if count == 0 {
return fmt.Errorf("no objects passed to convert")
}
return nil
}

View File

@ -140,20 +140,7 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str
ResourceTypeOrNameArgs(false, args...).
Flatten().
Do()
infos, err := r.Infos()
if err != nil {
return err
}
if len(infos) > 1 {
return fmt.Errorf("multiple resources provided: %v", args)
}
info := infos[0]
mapping := info.ResourceMapping()
if err := f.CanBeExposed(mapping.GroupVersionKind.GroupKind()); err != nil {
return err
}
// Get the input object
inputObject, err := r.Object()
err = r.Err()
if err != nil {
return err
}
@ -166,101 +153,118 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str
return cmdutil.UsageError(cmd, fmt.Sprintf("generator %q not found.", generatorName))
}
names := generator.ParamNames()
params := kubectl.MakeParams(cmd, names)
name := info.Name
if len(name) > validation.DNS952LabelMaxLength {
name = name[:validation.DNS952LabelMaxLength]
}
params["default-name"] = name
// For objects that need a pod selector, derive it from the exposed object in case a user
// didn't explicitly specify one via --selector
if s, found := params["selector"]; found && kubectl.IsZero(s) {
s, err := f.MapBasedSelectorForObject(inputObject)
if err != nil {
return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't retrieve selectors via --selector flag or introspection: %s", err))
}
params["selector"] = s
}
// For objects that need a port, derive it from the exposed object in case a user
// didn't explicitly specify one via --port
if port, found := params["port"]; found && kubectl.IsZero(port) {
ports, err := f.PortsForObject(inputObject)
if err != nil {
return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't find port via --port flag or introspection: %s", err))
}
switch len(ports) {
case 0:
return cmdutil.UsageError(cmd, "couldn't find port via --port flag or introspection")
case 1:
params["port"] = ports[0]
default:
params["ports"] = strings.Join(ports, ",")
}
}
if kubectl.IsZero(params["labels"]) {
labels, err := f.LabelsForObject(inputObject)
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
params["labels"] = kubectl.MakeLabels(labels)
}
if err = kubectl.ValidateParams(names, params); err != nil {
return err
}
// Check for invalid flags used against the present generator.
if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil {
return err
}
// Generate new object
object, err := generator.Generate(params)
if err != nil {
return err
}
mapping := info.ResourceMapping()
if err := f.CanBeExposed(mapping.GroupVersionKind.GroupKind()); err != nil {
return err
}
if inline := cmdutil.GetFlagString(cmd, "overrides"); len(inline) > 0 {
codec := runtime.NewCodec(f.JSONEncoder(), f.Decoder(true))
object, err = cmdutil.Merge(codec, object, inline, mapping.GroupVersionKind.Kind)
params := kubectl.MakeParams(cmd, names)
name := info.Name
if len(name) > validation.DNS952LabelMaxLength {
name = name[:validation.DNS952LabelMaxLength]
}
params["default-name"] = name
// For objects that need a pod selector, derive it from the exposed object in case a user
// didn't explicitly specify one via --selector
if s, found := params["selector"]; found && kubectl.IsZero(s) {
s, err := f.MapBasedSelectorForObject(info.Object)
if err != nil {
return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't retrieve selectors via --selector flag or introspection: %s", err))
}
params["selector"] = s
}
// For objects that need a port, derive it from the exposed object in case a user
// didn't explicitly specify one via --port
if port, found := params["port"]; found && kubectl.IsZero(port) {
ports, err := f.PortsForObject(info.Object)
if err != nil {
return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't find port via --port flag or introspection: %s", err))
}
switch len(ports) {
case 0:
return cmdutil.UsageError(cmd, "couldn't find port via --port flag or introspection")
case 1:
params["port"] = ports[0]
default:
params["ports"] = strings.Join(ports, ",")
}
}
if kubectl.IsZero(params["labels"]) {
labels, err := f.LabelsForObject(info.Object)
if err != nil {
return err
}
params["labels"] = kubectl.MakeLabels(labels)
}
if err = kubectl.ValidateParams(names, params); err != nil {
return err
}
// Check for invalid flags used against the present generator.
if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil {
return err
}
// Generate new object
object, err := generator.Generate(params)
if err != nil {
return err
}
}
resourceMapper := &resource.Mapper{
ObjectTyper: typer,
RESTMapper: mapper,
ClientMapper: resource.ClientMapperFunc(f.ClientForMapping),
Decoder: f.Decoder(true),
}
info, err = resourceMapper.InfoForObject(object, nil)
if err != nil {
return err
}
if cmdutil.ShouldRecord(cmd, info) {
if err := cmdutil.RecordChangeCause(object, f.Command()); err != nil {
if inline := cmdutil.GetFlagString(cmd, "overrides"); len(inline) > 0 {
codec := runtime.NewCodec(f.JSONEncoder(), f.Decoder(true))
object, err = cmdutil.Merge(codec, object, inline, mapping.GroupVersionKind.Kind)
if err != nil {
return err
}
}
resourceMapper := &resource.Mapper{
ObjectTyper: typer,
RESTMapper: mapper,
ClientMapper: resource.ClientMapperFunc(f.ClientForMapping),
Decoder: f.Decoder(true),
}
info, err = resourceMapper.InfoForObject(object, nil)
if err != nil {
return err
}
if cmdutil.ShouldRecord(cmd, info) {
if err := cmdutil.RecordChangeCause(object, f.Command()); err != nil {
return err
}
}
info.Refresh(object, true)
// TODO: extract this flag to a central location, when such a location exists.
if cmdutil.GetFlagBool(cmd, "dry-run") {
return f.PrintObject(cmd, mapper, object, out)
}
if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info, f.JSONEncoder()); err != nil {
return err
}
}
info.Refresh(object, true)
// TODO: extract this flag to a central location, when such a location exists.
if cmdutil.GetFlagBool(cmd, "dry-run") {
return f.PrintObject(cmd, mapper, object, out)
}
if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info, f.JSONEncoder()); err != nil {
return err
}
// Serialize the object with the annotation applied.
object, err = resource.NewHelper(info.Client, info.Mapping).Create(namespace, false, object)
// Serialize the object with the annotation applied.
object, err = resource.NewHelper(info.Client, info.Mapping).Create(namespace, false, object)
if err != nil {
return err
}
if len(cmdutil.GetFlagString(cmd, "output")) > 0 {
return f.PrintObject(cmd, mapper, object, out)
}
cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "exposed")
return nil
})
if err != nil {
return err
}
if len(cmdutil.GetFlagString(cmd, "output")) > 0 {
return f.PrintObject(cmd, mapper, object, out)
}
cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "exposed")
return nil
}

View File

@ -26,6 +26,7 @@ import (
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/runtime"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/watch"
)
@ -197,19 +198,39 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string
return nil
}
b := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).
r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).
NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces).
FilenameParam(enforceNamespace, options.Recursive, options.Filenames...).
SelectorParam(selector).
ExportParam(export).
ResourceTypeOrNameArgs(true, args...).
ContinueOnError().
Latest()
Latest().
Flatten().
Do()
err = r.Err()
if err != nil {
return err
}
printer, generic, err := cmdutil.PrinterForCommand(cmd)
if err != nil {
return err
}
infos := []*resource.Info{}
allErrs := []error{}
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
infos = append(infos, info)
return nil
})
if err != nil {
allErrs = append(allErrs, err)
}
if generic {
clientConfig, err := f.ClientConfig()
if err != nil {
@ -217,11 +238,7 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string
}
singular := false
r := b.Flatten().Do()
infos, err := r.IntoSingular(&singular).Infos()
if err != nil {
return err
}
r.IntoSingular(&singular)
// the outermost object will be converted to the output-version, but inner
// objects can use their mappings
@ -237,10 +254,6 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string
return printer.PrintObj(obj, out)
}
infos, err := b.Flatten().Do().Infos()
if err != nil {
return err
}
objs := make([]runtime.Object, len(infos))
for ix := range infos {
objs[ix] = infos[ix].Object
@ -262,7 +275,8 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string
for ix := range infos {
objs[ix], err = infos[ix].Mapping.ConvertToVersion(infos[ix].Object, version.String())
if err != nil {
return err
allErrs = append(allErrs, err)
continue
}
}
@ -291,19 +305,21 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string
if printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource {
printer, err = f.PrinterForMapping(cmd, mapping, allNamespaces)
if err != nil {
return err
allErrs = append(allErrs, err)
continue
}
lastMapping = mapping
}
if _, found := printer.(*kubectl.HumanReadablePrinter); found {
if err := printer.PrintObj(original, w); err != nil {
return err
allErrs = append(allErrs, err)
}
continue
}
if err := printer.PrintObj(original, w); err != nil {
return err
allErrs = append(allErrs, err)
continue
}
}
return nil
return utilerrors.NewAggregate(allErrs)
}

View File

@ -137,35 +137,41 @@ func RunPatch(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri
return err
}
infos, err := r.Infos()
if err != nil {
return err
}
if len(infos) > 1 {
return fmt.Errorf("multiple resources provided")
}
info := infos[0]
name, namespace := info.Name, info.Namespace
mapping := info.ResourceMapping()
client, err := f.ClientForMapping(mapping)
if err != nil {
return err
}
helper := resource.NewHelper(client, mapping)
patchedObject, err := helper.Patch(namespace, name, patchType, patchBytes)
if err != nil {
return err
}
if cmdutil.ShouldRecord(cmd, info) {
if err := cmdutil.RecordChangeCause(patchedObject, f.Command()); err == nil {
// don't return an error on failure. The patch itself succeeded, its only the hint for that change that failed
// don't bother checking for failures of this replace, because a failure to indicate the hint doesn't fail the command
// also, don't force the replacement. If the replacement fails on a resourceVersion conflict, then it means this
// record hint is likely to be invalid anyway, so avoid the bad hint
resource.NewHelper(client, mapping).Replace(namespace, name, false, patchedObject)
count := 0
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
name, namespace := info.Name, info.Namespace
mapping := info.ResourceMapping()
client, err := f.ClientForMapping(mapping)
if err != nil {
return err
}
helper := resource.NewHelper(client, mapping)
patchedObject, err := helper.Patch(namespace, name, patchType, patchBytes)
if err != nil {
return err
}
if cmdutil.ShouldRecord(cmd, info) {
if err := cmdutil.RecordChangeCause(patchedObject, f.Command()); err == nil {
// don't return an error on failure. The patch itself succeeded, its only the hint for that change that failed
// don't bother checking for failures of this replace, because a failure to indicate the hint doesn't fail the command
// also, don't force the replacement. If the replacement fails on a resourceVersion conflict, then it means this
// record hint is likely to be invalid anyway, so avoid the bad hint
resource.NewHelper(client, mapping).Replace(namespace, name, false, patchedObject)
}
}
count++
cmdutil.PrintSuccess(mapper, shortOutput, out, "", name, "patched")
return nil
})
if err != nil {
return err
}
if count == 0 {
return fmt.Errorf("no objects passed to patch")
}
cmdutil.PrintSuccess(mapper, shortOutput, out, "", name, "patched")
return nil
}

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/kubectl"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
)
// ScaleOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of
@ -122,43 +121,47 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri
return err
}
infos, err := r.Infos()
if err != nil {
return err
}
info := infos[0]
mapping := info.ResourceMapping()
scaler, err := f.Scaler(mapping)
if err != nil {
return err
}
infos := []*resource.Info{}
err = r.Visit(func(info *resource.Info, err error) error {
if err == nil {
infos = append(infos, info)
}
return nil
})
resourceVersion := cmdutil.GetFlagString(cmd, "resource-version")
if len(resourceVersion) != 0 && len(infos) > 1 {
return fmt.Errorf("cannot use --resource-version with multiple resources")
}
currentSize := cmdutil.GetFlagInt(cmd, "current-replicas")
if currentSize != -1 && len(infos) > 1 {
return fmt.Errorf("cannot use --current-replicas with multiple resources")
}
precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion}
retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
var waitForReplicas *kubectl.RetryParams
if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 {
waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout)
}
errs := []error{}
for _, info := range infos {
counter := 0
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
mapping := info.ResourceMapping()
scaler, err := f.Scaler(mapping)
if err != nil {
return err
}
currentSize := cmdutil.GetFlagInt(cmd, "current-replicas")
precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion}
retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
var waitForReplicas *kubectl.RetryParams
if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 {
waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout)
}
if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
errs = append(errs, err)
continue
return err
}
if cmdutil.ShouldRecord(cmd, info) {
patchBytes, err := cmdutil.ChangeResourcePatch(info, f.Command())
if err != nil {
errs = append(errs, err)
continue
return err
}
mapping := info.ResourceMapping()
client, err := f.ClientForMapping(mapping)
@ -168,12 +171,18 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri
helper := resource.NewHelper(client, mapping)
_, err = helper.Patch(info.Namespace, info.Name, api.StrategicMergePatchType, patchBytes)
if err != nil {
errs = append(errs, err)
continue
return err
}
}
counter++
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled")
return nil
})
if err != nil {
return err
}
return utilerrors.NewAggregate(errs)
if counter == 0 {
return fmt.Errorf("no objects passed to scale")
}
return nil
}