mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #36174 from JacobTanenbaum/v2resource_fixes
Automatic merge from submit-queue V2resource fixes when using kubectl set resources it resets all resource fields that are not being set. for example $ kubectl set resources deployments nginx --limits=cpu=100m followed by $ kubectl set resources deployments nginx --limits=memory=256Mi would result in the nginx deployment only limiting memory at 256Mi with the previous limit placed on the cpu being wiped out. This behavior is corrected so that each invocation only modifies fields set in that command and changed the testing so that the desired behavior is checked. Also a typo: you must specify an update to requests or limits or (in the form of --requests/--limits) corrected to you must specify an update to requests or limits (in the form of --requests/--limits) Implemented both the dry run and local flags. Added test cases to show that both flags are operating as intended. Removed the print statement "running in local mode" as in PR#35112 The original PR associated with these fixes where reverted due to causing a flake in hack/make-rules/test-cmd.sh, I gave the 'kubectl set resources' tests there own deployment and set the terminationGracePeriodSeconds to 0 and have run test-cmd.sh for hours without hitting the flake
This commit is contained in:
commit
fac05d9c81
@ -2213,33 +2213,37 @@ __EOF__
|
||||
## Set resource limits/request of a deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Set resources of a local file without talking to the server
|
||||
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
|
||||
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}"
|
||||
# Create a deployment
|
||||
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
|
||||
kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set the deployment's cpu limits
|
||||
kubectl set resources deployment nginx-deployment --limits=cpu=100m "${kube_flags[@]}"
|
||||
kubectl set resources deployment nginx-deployment-resources --limits=cpu=100m "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
|
||||
# Set a non-existing container should fail
|
||||
! kubectl set resources deployment nginx-deployment -c=redis --limits=cpu=100m
|
||||
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m
|
||||
# Set the limit of a specific container in deployment
|
||||
kubectl set resources deployment nginx-deployment -c=nginx --limits=cpu=200m "${kube_flags[@]}"
|
||||
kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
|
||||
# Set limits/requests of a deployment specified by a file
|
||||
kubectl set resources -f hack/testdata/deployment-multicontainer.yaml -c=perl --limits=cpu=300m --requests=cpu=300m "${kube_flags[@]}"
|
||||
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
|
||||
# Set limits on a local file without talking to the server
|
||||
kubectl set resources deployment -f hack/testdata/deployment-multicontainer.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}"
|
||||
# Show dry-run works on running deployments
|
||||
kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}"
|
||||
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete deployment nginx-deployment-resources "${kube_flags[@]}"
|
||||
|
||||
|
||||
######################
|
||||
|
24
hack/testdata/deployment-multicontainer-resources.yaml
vendored
Normal file
24
hack/testdata/deployment-multicontainer-resources.yaml
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment-resources
|
||||
labels:
|
||||
name: nginx-deployment-resources
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
name: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: gcr.io/google-containers/nginx:test-cmd
|
||||
ports:
|
||||
- containerPort: 80
|
||||
- name: perl
|
||||
image: gcr.io/google-containers/perl
|
||||
terminationGracePeriodSeconds: 0
|
@ -52,7 +52,7 @@ var (
|
||||
kubectl set resources deployment nginx --limits=cpu=0,memory=0 --requests=cpu=0,memory=0
|
||||
|
||||
# Print the result (in yaml format) of updating nginx container limits from a local, without hitting the server
|
||||
kubectl set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --dry-run -o yaml`)
|
||||
kubectl set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml`)
|
||||
)
|
||||
|
||||
// ResourcesOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of
|
||||
@ -73,6 +73,7 @@ type ResourcesOptions struct {
|
||||
All bool
|
||||
Record bool
|
||||
ChangeCause string
|
||||
Local bool
|
||||
Cmd *cobra.Command
|
||||
|
||||
Limits string
|
||||
@ -115,6 +116,7 @@ func NewCmdResources(f cmdutil.Factory, out io.Writer, errOut io.Writer) *cobra.
|
||||
cmd.Flags().BoolVar(&options.All, "all", false, "select all resources in the namespace of the specified resource types")
|
||||
cmd.Flags().StringVarP(&options.Selector, "selector", "l", "", "Selector (label query) to filter on")
|
||||
cmd.Flags().StringVarP(&options.ContainerSelector, "containers", "c", "*", "The names of containers in the selected pod templates to change, all containers are selected by default - may use wildcards")
|
||||
cmd.Flags().BoolVar(&options.Local, "local", false, "If true, set resources will NOT contact api-server but run locally.")
|
||||
cmdutil.AddDryRunFlag(cmd)
|
||||
cmdutil.AddRecordFlag(cmd)
|
||||
cmd.Flags().StringVar(&options.Limits, "limits", options.Limits, "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges.")
|
||||
@ -144,7 +146,7 @@ func (o *ResourcesOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args
|
||||
//FilenameParam(enforceNamespace, o.Filenames...).
|
||||
FilenameParam(enforceNamespace, &o.FilenameOptions).
|
||||
Flatten()
|
||||
if !cmdutil.GetDryRunFlag(cmd) {
|
||||
if !o.Local {
|
||||
builder = builder.
|
||||
SelectorParam(o.Selector).
|
||||
ResourceTypeOrNameArgs(o.All, args...).
|
||||
@ -161,7 +163,7 @@ func (o *ResourcesOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args
|
||||
func (o *ResourcesOptions) Validate() error {
|
||||
var err error
|
||||
if len(o.Limits) == 0 && len(o.Requests) == 0 {
|
||||
return fmt.Errorf("you must specify an update to requests or limits or (in the form of --requests/--limits)")
|
||||
return fmt.Errorf("you must specify an update to requests or limits (in the form of --requests/--limits)")
|
||||
}
|
||||
|
||||
o.ResourceRequirements, err = kubectl.HandleResourceRequirements(map[string]string{"limits": o.Limits, "requests": o.Requests})
|
||||
@ -180,7 +182,19 @@ func (o *ResourcesOptions) Run() error {
|
||||
containers, _ := selectContainers(spec.Containers, o.ContainerSelector)
|
||||
if len(containers) != 0 {
|
||||
for i := range containers {
|
||||
containers[i].Resources = o.ResourceRequirements
|
||||
if len(o.Limits) != 0 && len(containers[i].Resources.Limits) == 0 {
|
||||
containers[i].Resources.Limits = make(api.ResourceList)
|
||||
}
|
||||
for key, value := range o.ResourceRequirements.Limits {
|
||||
containers[i].Resources.Limits[key] = value
|
||||
}
|
||||
|
||||
if len(o.Requests) != 0 && len(containers[i].Resources.Requests) == 0 {
|
||||
containers[i].Resources.Requests = make(api.ResourceList)
|
||||
}
|
||||
for key, value := range o.ResourceRequirements.Requests {
|
||||
containers[i].Resources.Requests[key] = value
|
||||
}
|
||||
transformed = true
|
||||
}
|
||||
} else {
|
||||
@ -204,8 +218,7 @@ func (o *ResourcesOptions) Run() error {
|
||||
continue
|
||||
}
|
||||
|
||||
if cmdutil.GetDryRunFlag(o.Cmd) {
|
||||
fmt.Fprintln(o.Err, "info: running in local mode...")
|
||||
if o.Local || cmdutil.GetDryRunFlag(o.Cmd) {
|
||||
return o.PrintObject(o.Cmd, o.Mapper, info.Object, o.Out)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user