mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-07 12:11:43 +00:00
Merge pull request #51021 from zjj2wry/scale-selector-all
Automatic merge from submit-queue (batch tested with PRs 51021, 53225, 53094, 53219). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. "fix issue(#49965)kubectl scale also says that it can work based on a label selector or all" **What this PR does / why we need it**: Fixes #49965 #44800 **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes # **Special notes for your reviewer**: **Release note**: ```release-note NONE ```
This commit is contained in:
@@ -2967,8 +2967,27 @@ run_rs_tests() {
|
||||
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
|
||||
# Post-condition: 2 replicas
|
||||
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
|
||||
|
||||
# Set up three deploy, two deploy have same label
|
||||
kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
|
||||
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
|
||||
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
|
||||
# Test kubectl scale --selector
|
||||
kubectl scale deploy --replicas=2 -l run=hello
|
||||
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
|
||||
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
|
||||
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
|
||||
# Test kubectl scale --all
|
||||
kubectl scale deploy --replicas=3 --all
|
||||
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
|
||||
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
|
||||
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
|
||||
# Clean-up
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}"
|
||||
|
||||
### Expose replica set as service
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
|
25
hack/testdata/scale-deploy-1.yaml
vendored
Normal file
25
hack/testdata/scale-deploy-1.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
run: hello
|
||||
name: scale-1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
run: hello
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
run: hello
|
||||
spec:
|
||||
containers:
|
||||
- image: aronchick/hello-node:2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: hello
|
25
hack/testdata/scale-deploy-2.yaml
vendored
Normal file
25
hack/testdata/scale-deploy-2.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
run: hello
|
||||
name: scale-2
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
run: hello
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
run: hello
|
||||
spec:
|
||||
containers:
|
||||
- image: aronchick/hello-node:2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: hello
|
25
hack/testdata/scale-deploy-3.yaml
vendored
Normal file
25
hack/testdata/scale-deploy-3.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
foo: boo
|
||||
name: scale-3
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
run: hello
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
run: hello
|
||||
spec:
|
||||
containers:
|
||||
- image: aronchick/hello-node:2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: hello
|
@@ -77,6 +77,8 @@ func NewCmdScale(f cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||
ValidArgs: validArgs,
|
||||
ArgAliases: argAliases,
|
||||
}
|
||||
cmd.Flags().StringP("selector", "l", "", "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
cmd.Flags().Bool("all", false, "Select all resources in the namespace of the specified resource types")
|
||||
cmd.Flags().String("resource-version", "", i18n.T("Precondition for resource version. Requires that the current resource version match this value in order to scale."))
|
||||
cmd.Flags().Int("current-replicas", -1, "Precondition for current size. Requires that the current size of the resource match this value in order to scale.")
|
||||
cmd.Flags().Int("replicas", -1, "The new desired number of replicas. Required.")
|
||||
@@ -98,13 +100,17 @@ func RunScale(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []strin
|
||||
return err
|
||||
}
|
||||
|
||||
selector := cmdutil.GetFlagString(cmd, "selector")
|
||||
all := cmdutil.GetFlagBool(cmd, "all")
|
||||
|
||||
mapper, _ := f.Object()
|
||||
r := f.NewBuilder().
|
||||
ContinueOnError().
|
||||
NamespaceParam(cmdNamespace).DefaultNamespace().
|
||||
FilenameParam(enforceNamespace, options).
|
||||
ResourceTypeOrNameArgs(false, args...).
|
||||
ResourceTypeOrNameArgs(all, args...).
|
||||
Flatten().
|
||||
SelectorParam(selector).
|
||||
Do()
|
||||
err = r.Err()
|
||||
if resource.IsUsageError(err) {
|
||||
|
Reference in New Issue
Block a user