mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
removes job scaler
This commit is contained in:
parent
8438cbe669
commit
3b00b4a86f
@ -40,12 +40,10 @@ go_test(
|
|||||||
"//pkg/api/legacyscheme:go_default_library",
|
"//pkg/api/legacyscheme:go_default_library",
|
||||||
"//pkg/api/testapi:go_default_library",
|
"//pkg/api/testapi:go_default_library",
|
||||||
"//pkg/api/testing:go_default_library",
|
"//pkg/api/testing:go_default_library",
|
||||||
"//pkg/apis/batch:go_default_library",
|
|
||||||
"//pkg/apis/core:go_default_library",
|
"//pkg/apis/core:go_default_library",
|
||||||
"//pkg/apis/extensions:go_default_library",
|
"//pkg/apis/extensions:go_default_library",
|
||||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||||
"//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library",
|
|
||||||
"//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library",
|
"//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library",
|
||||||
"//pkg/kubectl/util:go_default_library",
|
"//pkg/kubectl/util:go_default_library",
|
||||||
"//pkg/util/pointer:go_default_library",
|
"//pkg/util/pointer:go_default_library",
|
||||||
|
@ -60,7 +60,7 @@ var (
|
|||||||
func NewCmdScale(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command {
|
func NewCmdScale(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command {
|
||||||
options := &resource.FilenameOptions{}
|
options := &resource.FilenameOptions{}
|
||||||
|
|
||||||
validArgs := []string{"deployment", "replicaset", "replicationcontroller", "job", "statefulset"}
|
validArgs := []string{"deployment", "replicaset", "replicationcontroller", "statefulset"}
|
||||||
argAliases := kubectl.ResourceAliases(validArgs)
|
argAliases := kubectl.ResourceAliases(validArgs)
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
@ -145,12 +145,7 @@ func RunScale(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
mapping := info.ResourceMapping()
|
scaler, err := f.Scaler()
|
||||||
if mapping.Resource == "jobs" {
|
|
||||||
fmt.Fprintf(errOut, "%s scale job is DEPRECATED and will be removed in a future version.\n", cmd.Parent().Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
scaler, err := f.Scaler(mapping)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -164,7 +159,9 @@ func RunScale(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args
|
|||||||
waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout)
|
waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
|
mapping := info.ResourceMapping()
|
||||||
|
gvk := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource)
|
||||||
|
if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas, gvk.GroupResource()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if cmdutil.ShouldRecord(cmd, info) {
|
if cmdutil.ShouldRecord(cmd, info) {
|
||||||
|
@ -211,7 +211,7 @@ type BuilderFactory interface {
|
|||||||
// PluginRunner provides the implementation to be used to run cli plugins.
|
// PluginRunner provides the implementation to be used to run cli plugins.
|
||||||
PluginRunner() plugins.PluginRunner
|
PluginRunner() plugins.PluginRunner
|
||||||
// Returns a Scaler for changing the size of the specified RESTMapping type or an error
|
// Returns a Scaler for changing the size of the specified RESTMapping type or an error
|
||||||
Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error)
|
Scaler() (kubectl.Scaler, error)
|
||||||
// ScaleClient gives you back scale getter
|
// ScaleClient gives you back scale getter
|
||||||
ScaleClient() (scaleclient.ScalesGetter, error)
|
ScaleClient() (scaleclient.ScalesGetter, error)
|
||||||
// Returns a Reaper for gracefully shutting down resources.
|
// Returns a Reaper for gracefully shutting down resources.
|
||||||
|
@ -103,19 +103,13 @@ func (f *ring2Factory) ScaleClient() (scaleclient.ScalesGetter, error) {
|
|||||||
return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil
|
return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *ring2Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
|
func (f *ring2Factory) Scaler() (kubectl.Scaler, error) {
|
||||||
clientset, err := f.clientAccessFactory.ClientSet()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
scalesGetter, err := f.ScaleClient()
|
scalesGetter, err := f.ScaleClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
gvk := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource)
|
|
||||||
|
|
||||||
return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset.Batch(), scalesGetter, gvk.GroupResource()), nil
|
return kubectl.NewScaler(scalesGetter), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *ring2Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
|
func (f *ring2Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
|
||||||
|
@ -30,12 +30,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
scaleclient "k8s.io/client-go/scale"
|
scaleclient "k8s.io/client-go/scale"
|
||||||
"k8s.io/kubernetes/pkg/apis/apps"
|
"k8s.io/kubernetes/pkg/apis/apps"
|
||||||
"k8s.io/kubernetes/pkg/apis/batch"
|
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion"
|
appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion"
|
||||||
batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion"
|
|
||||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||||
extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion"
|
extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion"
|
||||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
@ -82,9 +80,6 @@ func ReaperFor(kind schema.GroupKind, c internalclientset.Interface, sc scalecli
|
|||||||
case api.Kind("Pod"):
|
case api.Kind("Pod"):
|
||||||
return &PodReaper{c.Core()}, nil
|
return &PodReaper{c.Core()}, nil
|
||||||
|
|
||||||
case batch.Kind("Job"):
|
|
||||||
return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil
|
|
||||||
|
|
||||||
case apps.Kind("StatefulSet"):
|
case apps.Kind("StatefulSet"):
|
||||||
return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout, sc}, nil
|
return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout, sc}, nil
|
||||||
|
|
||||||
@ -114,11 +109,6 @@ type DaemonSetReaper struct {
|
|||||||
client extensionsclient.DaemonSetsGetter
|
client extensionsclient.DaemonSetsGetter
|
||||||
pollInterval, timeout time.Duration
|
pollInterval, timeout time.Duration
|
||||||
}
|
}
|
||||||
type JobReaper struct {
|
|
||||||
client batchclient.JobsGetter
|
|
||||||
podClient coreclient.PodsGetter
|
|
||||||
pollInterval, timeout time.Duration
|
|
||||||
}
|
|
||||||
type DeploymentReaper struct {
|
type DeploymentReaper struct {
|
||||||
dClient extensionsclient.DeploymentsGetter
|
dClient extensionsclient.DeploymentsGetter
|
||||||
rsClient extensionsclient.ReplicaSetsGetter
|
rsClient extensionsclient.ReplicaSetsGetter
|
||||||
@ -155,7 +145,7 @@ func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterfac
|
|||||||
|
|
||||||
func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
||||||
rc := reaper.client.ReplicationControllers(namespace)
|
rc := reaper.client.ReplicationControllers(namespace)
|
||||||
scaler := NewScaler(reaper.scaleClient, schema.GroupResource{Resource: "replicationcontrollers"})
|
scaler := NewScaler(reaper.scaleClient)
|
||||||
ctrl, err := rc.Get(name, metav1.GetOptions{})
|
ctrl, err := rc.Get(name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -206,7 +196,7 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout
|
|||||||
// No overlapping controllers.
|
// No overlapping controllers.
|
||||||
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
||||||
waitForReplicas := NewRetryParams(reaper.pollInterval, timeout)
|
waitForReplicas := NewRetryParams(reaper.pollInterval, timeout)
|
||||||
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil && !errors.IsNotFound(err) {
|
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil && !errors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -224,7 +214,7 @@ func getOverlappingReplicaSets(c extensionsclient.ReplicaSetInterface, rs *exten
|
|||||||
|
|
||||||
func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
||||||
rsc := reaper.client.ReplicaSets(namespace)
|
rsc := reaper.client.ReplicaSets(namespace)
|
||||||
scaler := NewScaler(reaper.scaleClient, reaper.gr)
|
scaler := NewScaler(reaper.scaleClient)
|
||||||
rs, err := rsc.Get(name, metav1.GetOptions{})
|
rs, err := rsc.Get(name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -276,7 +266,7 @@ func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Durati
|
|||||||
// No overlapping ReplicaSets.
|
// No overlapping ReplicaSets.
|
||||||
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
||||||
waitForReplicas := NewRetryParams(reaper.pollInterval, timeout)
|
waitForReplicas := NewRetryParams(reaper.pollInterval, timeout)
|
||||||
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil && !errors.IsNotFound(err) {
|
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas, reaper.gr); err != nil && !errors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -325,7 +315,7 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio
|
|||||||
|
|
||||||
func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
||||||
statefulsets := reaper.client.StatefulSets(namespace)
|
statefulsets := reaper.client.StatefulSets(namespace)
|
||||||
scaler := NewScaler(reaper.scaleClient, apps.Resource("statefulsets"))
|
scaler := NewScaler(reaper.scaleClient)
|
||||||
ss, err := statefulsets.Get(name, metav1.GetOptions{})
|
ss, err := statefulsets.Get(name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -340,7 +330,7 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat
|
|||||||
|
|
||||||
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
||||||
waitForStatefulSet := NewRetryParams(reaper.pollInterval, timeout)
|
waitForStatefulSet := NewRetryParams(reaper.pollInterval, timeout)
|
||||||
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForStatefulSet); err != nil && !errors.IsNotFound(err) {
|
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForStatefulSet, apps.Resource("statefulsets")); err != nil && !errors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -351,51 +341,6 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat
|
|||||||
return statefulsets.Delete(name, deleteOptions)
|
return statefulsets.Delete(name, deleteOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
|
||||||
jobs := reaper.client.Jobs(namespace)
|
|
||||||
pods := reaper.podClient.Pods(namespace)
|
|
||||||
scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, reaper.client, nil, schema.GroupResource{})
|
|
||||||
job, err := jobs.Get(name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if timeout == 0 {
|
|
||||||
// we will never have more active pods than job.Spec.Parallelism
|
|
||||||
parallelism := *job.Spec.Parallelism
|
|
||||||
timeout = Timeout + time.Duration(10*parallelism)*time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: handle overlapping jobs
|
|
||||||
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
|
||||||
waitForJobs := NewRetryParams(reaper.pollInterval, timeout)
|
|
||||||
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil && !errors.IsNotFound(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// at this point only dead pods are left, that should be removed
|
|
||||||
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
|
|
||||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
|
||||||
podList, err := pods.List(options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
errList := []error{}
|
|
||||||
for _, pod := range podList.Items {
|
|
||||||
if err := pods.Delete(pod.Name, gracePeriod); err != nil {
|
|
||||||
// ignores the error when the pod isn't found
|
|
||||||
if !errors.IsNotFound(err) {
|
|
||||||
errList = append(errList, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(errList) > 0 {
|
|
||||||
return utilerrors.NewAggregate(errList)
|
|
||||||
}
|
|
||||||
// once we have all the pods removed we can safely remove the job itself.
|
|
||||||
falseVar := false
|
|
||||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
|
||||||
return jobs.Delete(name, deleteOptions)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
|
||||||
deployments := reaper.dClient.Deployments(namespace)
|
deployments := reaper.dClient.Deployments(namespace)
|
||||||
rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout, reaper.scaleClient, schema.GroupResource{Group: reaper.gr.Group, Resource: "replicasets"}}
|
rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout, reaper.scaleClient, schema.GroupResource{Group: reaper.gr.Group, Resource: "replicasets"}}
|
||||||
|
@ -33,7 +33,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
fakescale "k8s.io/client-go/scale/fake"
|
fakescale "k8s.io/client-go/scale/fake"
|
||||||
testcore "k8s.io/client-go/testing"
|
testcore "k8s.io/client-go/testing"
|
||||||
"k8s.io/kubernetes/pkg/apis/batch"
|
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||||
@ -382,103 +381,6 @@ func TestReplicaSetStop(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJobStop(t *testing.T) {
|
|
||||||
name := "foo"
|
|
||||||
ns := "default"
|
|
||||||
zero := int32(0)
|
|
||||||
tests := []struct {
|
|
||||||
Name string
|
|
||||||
Objs []runtime.Object
|
|
||||||
StopError error
|
|
||||||
ExpectedActions []string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
Name: "OnlyOneJob",
|
|
||||||
Objs: []runtime.Object{
|
|
||||||
&batch.JobList{ // LIST
|
|
||||||
Items: []batch.Job{
|
|
||||||
{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: ns,
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &zero,
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"k1": "v1"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
StopError: nil,
|
|
||||||
ExpectedActions: []string{"get:jobs", "get:jobs", "update:jobs",
|
|
||||||
"get:jobs", "get:jobs", "list:pods", "delete:jobs"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "JobWithDeadPods",
|
|
||||||
Objs: []runtime.Object{
|
|
||||||
&batch.JobList{ // LIST
|
|
||||||
Items: []batch.Job{
|
|
||||||
{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: ns,
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &zero,
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"k1": "v1"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&api.PodList{ // LIST
|
|
||||||
Items: []api.Pod{
|
|
||||||
{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "pod1",
|
|
||||||
Namespace: ns,
|
|
||||||
Labels: map[string]string{"k1": "v1"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
StopError: nil,
|
|
||||||
ExpectedActions: []string{"get:jobs", "get:jobs", "update:jobs",
|
|
||||||
"get:jobs", "get:jobs", "list:pods", "delete:pods", "delete:jobs"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
fake := fake.NewSimpleClientset(test.Objs...)
|
|
||||||
reaper := JobReaper{fake.Batch(), fake.Core(), time.Millisecond, time.Millisecond}
|
|
||||||
err := reaper.Stop(ns, name, 0, nil)
|
|
||||||
if !reflect.DeepEqual(err, test.StopError) {
|
|
||||||
t.Errorf("%s unexpected error: %v", test.Name, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
actions := fake.Actions()
|
|
||||||
if len(actions) != len(test.ExpectedActions) {
|
|
||||||
t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for i, expAction := range test.ExpectedActions {
|
|
||||||
action := strings.Split(expAction, ":")
|
|
||||||
if actions[i].GetVerb() != action[0] {
|
|
||||||
t.Errorf("%s unexpected verb: %+v, expected %s", test.Name, actions[i], expAction)
|
|
||||||
}
|
|
||||||
if actions[i].GetResource().Resource != action[1] {
|
|
||||||
t.Errorf("%s unexpected resource: %+v, expected %s", test.Name, actions[i], expAction)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeploymentStop(t *testing.T) {
|
func TestDeploymentStop(t *testing.T) {
|
||||||
name := "foo"
|
name := "foo"
|
||||||
ns := "default"
|
ns := "default"
|
||||||
|
@ -400,8 +400,8 @@ func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desi
|
|||||||
|
|
||||||
// scalerScaleAndWait scales a controller using a Scaler and a real client.
|
// scalerScaleAndWait scales a controller using a Scaler and a real client.
|
||||||
func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) {
|
func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) {
|
||||||
scaler := NewScaler(r.scaleClient, schema.GroupResource{Resource: "replicationcontrollers"})
|
scaler := NewScaler(r.scaleClient)
|
||||||
if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait); err != nil {
|
if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{})
|
return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{})
|
||||||
|
@ -23,13 +23,11 @@ import (
|
|||||||
|
|
||||||
autoscalingapi "k8s.io/api/autoscaling/v1"
|
autoscalingapi "k8s.io/api/autoscaling/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/pkg/apis/batch"
|
"k8s.io/kubernetes/pkg/apis/batch"
|
||||||
|
|
||||||
scaleclient "k8s.io/client-go/scale"
|
scaleclient "k8s.io/client-go/scale"
|
||||||
batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Figure out if we should be waiting on initializers in the Scale() functions below.
|
// TODO: Figure out if we should be waiting on initializers in the Scale() functions below.
|
||||||
@ -40,30 +38,15 @@ type Scaler interface {
|
|||||||
// retries in the event of resource version mismatch (if retry is not nil),
|
// retries in the event of resource version mismatch (if retry is not nil),
|
||||||
// and optionally waits until the status of the resource matches newSize (if wait is not nil)
|
// and optionally waits until the status of the resource matches newSize (if wait is not nil)
|
||||||
// TODO: Make the implementation of this watch-based (#56075) once #31345 is fixed.
|
// TODO: Make the implementation of this watch-based (#56075) once #31345 is fixed.
|
||||||
Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams) error
|
Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams, gr schema.GroupResource) error
|
||||||
// ScaleSimple does a simple one-shot attempt at scaling - not useful on its own, but
|
// ScaleSimple does a simple one-shot attempt at scaling - not useful on its own, but
|
||||||
// a necessary building block for Scale
|
// a necessary building block for Scale
|
||||||
ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error)
|
ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gr schema.GroupResource) (updatedResourceVersion string, err error)
|
||||||
}
|
|
||||||
|
|
||||||
// ScalerFor gets a scaler for a given resource
|
|
||||||
func ScalerFor(kind schema.GroupKind, jobsClient batchclient.JobsGetter, scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) Scaler {
|
|
||||||
// it seems like jobs dont't follow "normal" scale semantics.
|
|
||||||
// For example it is not clear whether HPA could make use of it or not.
|
|
||||||
// For more details see: https://github.com/kubernetes/kubernetes/pull/58468
|
|
||||||
switch kind {
|
|
||||||
case batch.Kind("Job"):
|
|
||||||
return &jobScaler{jobsClient} // Either kind of job can be scaled with Batch interface.
|
|
||||||
default:
|
|
||||||
return NewScaler(scalesGetter, gr)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewScaler get a scaler for a given resource
|
// NewScaler get a scaler for a given resource
|
||||||
// Note that if you are trying to crate create a scaler for "job" then stop and use ScalerFor instead.
|
func NewScaler(scalesGetter scaleclient.ScalesGetter) Scaler {
|
||||||
// When scaling jobs is dead, we'll remove ScalerFor method.
|
return &genericScaler{scalesGetter}
|
||||||
func NewScaler(scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) Scaler {
|
|
||||||
return &genericScaler{scalesGetter, gr}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScalePrecondition describes a condition that must be true for the scale to take place
|
// ScalePrecondition describes a condition that must be true for the scale to take place
|
||||||
@ -97,9 +80,9 @@ func NewRetryParams(interval, timeout time.Duration) *RetryParams {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ScaleCondition is a closure around Scale that facilitates retries via util.wait
|
// ScaleCondition is a closure around Scale that facilitates retries via util.wait
|
||||||
func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string) wait.ConditionFunc {
|
func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string, gr schema.GroupResource) wait.ConditionFunc {
|
||||||
return func() (bool, error) {
|
return func() (bool, error) {
|
||||||
rv, err := r.ScaleSimple(namespace, name, precondition, count)
|
rv, err := r.ScaleSimple(namespace, name, precondition, count, gr)
|
||||||
if updatedResourceVersion != nil {
|
if updatedResourceVersion != nil {
|
||||||
*updatedResourceVersion = rv
|
*updatedResourceVersion = rv
|
||||||
}
|
}
|
||||||
@ -128,60 +111,6 @@ func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type jobScaler struct {
|
|
||||||
c batchclient.JobsGetter
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScaleSimple is responsible for updating job's parallelism. It returns the
|
|
||||||
// resourceVersion of the job if the update is successful.
|
|
||||||
func (scaler *jobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) {
|
|
||||||
job, err := scaler.c.Jobs(namespace).Get(name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if preconditions != nil {
|
|
||||||
if err := preconditions.ValidateJob(job); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
parallelism := int32(newSize)
|
|
||||||
job.Spec.Parallelism = ¶llelism
|
|
||||||
updatedJob, err := scaler.c.Jobs(namespace).Update(job)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return updatedJob.ObjectMeta.ResourceVersion, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil),
|
|
||||||
// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired
|
|
||||||
// number, which can be less than requested based on job's current progress.
|
|
||||||
func (scaler *jobScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
|
|
||||||
if preconditions == nil {
|
|
||||||
preconditions = &ScalePrecondition{-1, ""}
|
|
||||||
}
|
|
||||||
if retry == nil {
|
|
||||||
// Make it try only once, immediately
|
|
||||||
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
|
||||||
}
|
|
||||||
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil)
|
|
||||||
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if waitForReplicas != nil {
|
|
||||||
job, err := scaler.c.Jobs(namespace).Get(name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, JobHasDesiredParallelism(scaler.c, job))
|
|
||||||
if err == wait.ErrWaitTimeout {
|
|
||||||
return fmt.Errorf("timed out waiting for %q to be synced", name)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateGeneric ensures that the preconditions match. Returns nil if they are valid, otherwise an error
|
// validateGeneric ensures that the preconditions match. Returns nil if they are valid, otherwise an error
|
||||||
func (precondition *ScalePrecondition) validate(scale *autoscalingapi.Scale) error {
|
func (precondition *ScalePrecondition) validate(scale *autoscalingapi.Scale) error {
|
||||||
if precondition.Size != -1 && int(scale.Spec.Replicas) != precondition.Size {
|
if precondition.Size != -1 && int(scale.Spec.Replicas) != precondition.Size {
|
||||||
@ -196,14 +125,13 @@ func (precondition *ScalePrecondition) validate(scale *autoscalingapi.Scale) err
|
|||||||
// genericScaler can update scales for resources in a particular namespace
|
// genericScaler can update scales for resources in a particular namespace
|
||||||
type genericScaler struct {
|
type genericScaler struct {
|
||||||
scaleNamespacer scaleclient.ScalesGetter
|
scaleNamespacer scaleclient.ScalesGetter
|
||||||
targetGR schema.GroupResource
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Scaler = &genericScaler{}
|
var _ Scaler = &genericScaler{}
|
||||||
|
|
||||||
// ScaleSimple updates a scale of a given resource. It returns the resourceVersion of the scale if the update was successful.
|
// ScaleSimple updates a scale of a given resource. It returns the resourceVersion of the scale if the update was successful.
|
||||||
func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error) {
|
func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gr schema.GroupResource) (updatedResourceVersion string, err error) {
|
||||||
scale, err := s.scaleNamespacer.Scales(namespace).Get(s.targetGR, name)
|
scale, err := s.scaleNamespacer.Scales(namespace).Get(gr, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -214,7 +142,7 @@ func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *Scale
|
|||||||
}
|
}
|
||||||
|
|
||||||
scale.Spec.Replicas = int32(newSize)
|
scale.Spec.Replicas = int32(newSize)
|
||||||
updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(s.targetGR, scale)
|
updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(gr, scale)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -223,7 +151,7 @@ func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *Scale
|
|||||||
|
|
||||||
// Scale updates a scale of a given resource to a new size, with optional precondition check (if preconditions is not nil),
|
// Scale updates a scale of a given resource to a new size, with optional precondition check (if preconditions is not nil),
|
||||||
// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
|
// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
|
||||||
func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
|
func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams, gr schema.GroupResource) error {
|
||||||
if preconditions == nil {
|
if preconditions == nil {
|
||||||
preconditions = &ScalePrecondition{-1, ""}
|
preconditions = &ScalePrecondition{-1, ""}
|
||||||
}
|
}
|
||||||
@ -231,7 +159,7 @@ func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, prec
|
|||||||
// make it try only once, immediately
|
// make it try only once, immediately
|
||||||
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
||||||
}
|
}
|
||||||
cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil)
|
cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil, gr)
|
||||||
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
|
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -239,7 +167,7 @@ func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, prec
|
|||||||
err := wait.PollImmediate(
|
err := wait.PollImmediate(
|
||||||
waitForReplicas.Interval,
|
waitForReplicas.Interval,
|
||||||
waitForReplicas.Timeout,
|
waitForReplicas.Timeout,
|
||||||
scaleHasDesiredReplicas(s.scaleNamespacer, s.targetGR, resourceName, namespace, int32(newSize)))
|
scaleHasDesiredReplicas(s.scaleNamespacer, gr, resourceName, namespace, int32(newSize)))
|
||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
return fmt.Errorf("timed out waiting for %q to be synced", resourceName)
|
return fmt.Errorf("timed out waiting for %q to be synced", resourceName)
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package kubectl
|
package kubectl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -26,11 +25,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/client-go/scale"
|
"k8s.io/client-go/scale"
|
||||||
testcore "k8s.io/client-go/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/batch"
|
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
|
||||||
batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestReplicationControllerScaleRetry(t *testing.T) {
|
func TestReplicationControllerScaleRetry(t *testing.T) {
|
||||||
@ -39,13 +34,13 @@ func TestReplicationControllerScaleRetry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"get", "update", "get"}
|
scaleClientExpectedAction := []string{"get", "update", "get"}
|
||||||
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
preconditions := ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo-v1"
|
name := "foo-v1"
|
||||||
namespace := metav1.NamespaceDefault
|
namespace := metav1.NamespaceDefault
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"})
|
||||||
pass, err := scaleFunc()
|
pass, err := scaleFunc()
|
||||||
if pass {
|
if pass {
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||||
@ -54,7 +49,7 @@ func TestReplicationControllerScaleRetry(t *testing.T) {
|
|||||||
t.Errorf("Did not expect an error on update conflict failure, got %v", err)
|
t.Errorf("Did not expect an error on update conflict failure, got %v", err)
|
||||||
}
|
}
|
||||||
preconditions = ScalePrecondition{3, ""}
|
preconditions = ScalePrecondition{3, ""}
|
||||||
scaleFunc = ScaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
scaleFunc = ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"})
|
||||||
pass, err = scaleFunc()
|
pass, err = scaleFunc()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("Expected error on precondition failure")
|
t.Errorf("Expected error on precondition failure")
|
||||||
@ -76,13 +71,13 @@ func TestReplicationControllerScaleInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"get", "update"}
|
scaleClientExpectedAction := []string{"get", "update"}
|
||||||
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 1, verbsOnError)
|
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 1, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
preconditions := ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo-v1"
|
name := "foo-v1"
|
||||||
namespace := "default"
|
namespace := "default"
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"})
|
||||||
pass, err := scaleFunc()
|
pass, err := scaleFunc()
|
||||||
if pass {
|
if pass {
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||||
@ -104,11 +99,11 @@ func TestReplicationControllerScaleInvalid(t *testing.T) {
|
|||||||
func TestReplicationControllerScale(t *testing.T) {
|
func TestReplicationControllerScale(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"get", "update"}
|
scaleClientExpectedAction := []string{"get", "update"}
|
||||||
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, nil)
|
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, nil)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
preconditions := ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo-v1"
|
name := "foo-v1"
|
||||||
err := scaler.Scale("default", name, count, &preconditions, nil, nil)
|
err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error occurred = %v while scaling the resource", err)
|
t.Fatalf("unexpected error occurred = %v while scaling the resource", err)
|
||||||
@ -127,11 +122,11 @@ func TestReplicationControllerScale(t *testing.T) {
|
|||||||
func TestReplicationControllerScaleFailsPreconditions(t *testing.T) {
|
func TestReplicationControllerScaleFailsPreconditions(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"get"}
|
scaleClientExpectedAction := []string{"get"}
|
||||||
scaleClient := createFakeScaleClient("replicationcontrollers", "foo", 10, nil)
|
scaleClient := createFakeScaleClient("replicationcontrollers", "foo", 10, nil)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{2, ""}
|
preconditions := ScalePrecondition{2, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
err := scaler.Scale("default", name, count, &preconditions, nil, nil)
|
err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected to get an error but none was returned")
|
t.Fatal("expected to get an error but none was returned")
|
||||||
}
|
}
|
||||||
@ -146,281 +141,19 @@ func TestReplicationControllerScaleFailsPreconditions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type errorJobs struct {
|
|
||||||
batchclient.JobInterface
|
|
||||||
conflict bool
|
|
||||||
invalid bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *errorJobs) Update(job *batch.Job) (*batch.Job, error) {
|
|
||||||
switch {
|
|
||||||
case c.invalid:
|
|
||||||
return nil, kerrors.NewInvalid(api.Kind(job.Kind), job.Name, nil)
|
|
||||||
case c.conflict:
|
|
||||||
return nil, kerrors.NewConflict(api.Resource(job.Kind), job.Name, nil)
|
|
||||||
}
|
|
||||||
return nil, errors.New("Job update failure")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *errorJobs) Get(name string, options metav1.GetOptions) (*batch.Job, error) {
|
|
||||||
zero := int32(0)
|
|
||||||
return &batch.Job{
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &zero,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type errorJobClient struct {
|
|
||||||
batchclient.JobsGetter
|
|
||||||
conflict bool
|
|
||||||
invalid bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *errorJobClient) Jobs(namespace string) batchclient.JobInterface {
|
|
||||||
return &errorJobs{
|
|
||||||
JobInterface: c.JobsGetter.Jobs(namespace),
|
|
||||||
conflict: c.conflict,
|
|
||||||
invalid: c.invalid,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJobScaleRetry(t *testing.T) {
|
|
||||||
fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), conflict: true}
|
|
||||||
scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, fake, nil, schema.GroupResource{})
|
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
|
||||||
count := uint(3)
|
|
||||||
name := "foo"
|
|
||||||
namespace := "default"
|
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
|
||||||
pass, err := scaleFunc()
|
|
||||||
if pass != false {
|
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Did not expect an error on update failure, got %v", err)
|
|
||||||
}
|
|
||||||
preconditions = ScalePrecondition{3, ""}
|
|
||||||
scaleFunc = ScaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
|
||||||
pass, err = scaleFunc()
|
|
||||||
if err == nil {
|
|
||||||
t.Error("Expected error on precondition failure")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func job() *batch.Job {
|
|
||||||
return &batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: metav1.NamespaceDefault,
|
|
||||||
Name: "foo",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJobScale(t *testing.T) {
|
|
||||||
fakeClientset := fake.NewSimpleClientset(job())
|
|
||||||
scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, fakeClientset.Batch(), nil, schema.GroupResource{})
|
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
|
||||||
count := uint(3)
|
|
||||||
name := "foo"
|
|
||||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
|
||||||
|
|
||||||
actions := fakeClientset.Actions()
|
|
||||||
if len(actions) != 2 {
|
|
||||||
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
|
|
||||||
}
|
|
||||||
if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name {
|
|
||||||
t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name)
|
|
||||||
}
|
|
||||||
if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) {
|
|
||||||
t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJobScaleInvalid(t *testing.T) {
|
|
||||||
fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), invalid: true}
|
|
||||||
scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, fake, nil, schema.GroupResource{})
|
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
|
||||||
count := uint(3)
|
|
||||||
name := "foo"
|
|
||||||
namespace := "default"
|
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
|
||||||
pass, err := scaleFunc()
|
|
||||||
if pass {
|
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("Expected error on invalid update failure, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJobScaleFailsPreconditions(t *testing.T) {
|
|
||||||
ten := int32(10)
|
|
||||||
fake := fake.NewSimpleClientset(&batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: metav1.NamespaceDefault,
|
|
||||||
Name: "foo",
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &ten,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, fake.Batch(), nil, schema.GroupResource{})
|
|
||||||
preconditions := ScalePrecondition{2, ""}
|
|
||||||
count := uint(3)
|
|
||||||
name := "foo"
|
|
||||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
|
||||||
|
|
||||||
actions := fake.Actions()
|
|
||||||
if len(actions) != 1 {
|
|
||||||
t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions)
|
|
||||||
}
|
|
||||||
if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name {
|
|
||||||
t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateJob(t *testing.T) {
|
|
||||||
zero, ten, twenty := int32(0), int32(10), int32(20)
|
|
||||||
tests := []struct {
|
|
||||||
preconditions ScalePrecondition
|
|
||||||
job batch.Job
|
|
||||||
expectError bool
|
|
||||||
test string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
preconditions: ScalePrecondition{-1, ""},
|
|
||||||
expectError: false,
|
|
||||||
test: "defaults",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
preconditions: ScalePrecondition{-1, ""},
|
|
||||||
job: batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
ResourceVersion: "foo",
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &ten,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectError: false,
|
|
||||||
test: "defaults 2",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
preconditions: ScalePrecondition{0, ""},
|
|
||||||
job: batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
ResourceVersion: "foo",
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &zero,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectError: false,
|
|
||||||
test: "size matches",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
preconditions: ScalePrecondition{-1, "foo"},
|
|
||||||
job: batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
ResourceVersion: "foo",
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &ten,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectError: false,
|
|
||||||
test: "resource version matches",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
preconditions: ScalePrecondition{10, "foo"},
|
|
||||||
job: batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
ResourceVersion: "foo",
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &ten,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectError: false,
|
|
||||||
test: "both match",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
preconditions: ScalePrecondition{10, "foo"},
|
|
||||||
job: batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
ResourceVersion: "foo",
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &twenty,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectError: true,
|
|
||||||
test: "size different",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
preconditions: ScalePrecondition{10, "foo"},
|
|
||||||
job: batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
ResourceVersion: "foo",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectError: true,
|
|
||||||
test: "parallelism nil",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
preconditions: ScalePrecondition{10, "foo"},
|
|
||||||
job: batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
ResourceVersion: "bar",
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &ten,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectError: true,
|
|
||||||
test: "version different",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
preconditions: ScalePrecondition{10, "foo"},
|
|
||||||
job: batch.Job{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
ResourceVersion: "bar",
|
|
||||||
},
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: &twenty,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectError: true,
|
|
||||||
test: "both different",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, test := range tests {
|
|
||||||
err := test.preconditions.ValidateJob(&test.job)
|
|
||||||
if err != nil && !test.expectError {
|
|
||||||
t.Errorf("unexpected error: %v (%s)", err, test.test)
|
|
||||||
}
|
|
||||||
if err == nil && test.expectError {
|
|
||||||
t.Errorf("expected an error: %v (%s)", err, test.test)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeploymentScaleRetry(t *testing.T) {
|
func TestDeploymentScaleRetry(t *testing.T) {
|
||||||
verbsOnError := map[string]*kerrors.StatusError{
|
verbsOnError := map[string]*kerrors.StatusError{
|
||||||
"update": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
|
"update": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"get", "update", "get"}
|
scaleClientExpectedAction := []string{"get", "update", "get"}
|
||||||
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := &ScalePrecondition{-1, ""}
|
preconditions := &ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
namespace := "default"
|
namespace := "default"
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil)
|
scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "deployments"})
|
||||||
pass, err := scaleFunc()
|
pass, err := scaleFunc()
|
||||||
if pass != false {
|
if pass != false {
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||||
@ -429,7 +162,7 @@ func TestDeploymentScaleRetry(t *testing.T) {
|
|||||||
t.Errorf("Did not expect an error on update failure, got %v", err)
|
t.Errorf("Did not expect an error on update failure, got %v", err)
|
||||||
}
|
}
|
||||||
preconditions = &ScalePrecondition{3, ""}
|
preconditions = &ScalePrecondition{3, ""}
|
||||||
scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil)
|
scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "deployments"})
|
||||||
pass, err = scaleFunc()
|
pass, err = scaleFunc()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Expected error on precondition failure")
|
t.Error("Expected error on precondition failure")
|
||||||
@ -448,11 +181,11 @@ func TestDeploymentScaleRetry(t *testing.T) {
|
|||||||
func TestDeploymentScale(t *testing.T) {
|
func TestDeploymentScale(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"get", "update"}
|
scaleClientExpectedAction := []string{"get", "update"}
|
||||||
scaleClient := createFakeScaleClient("deployments", "foo", 2, nil)
|
scaleClient := createFakeScaleClient("deployments", "foo", 2, nil)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
preconditions := ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
err := scaler.Scale("default", name, count, &preconditions, nil, nil)
|
err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "apps", Resource: "deployments"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -473,13 +206,13 @@ func TestDeploymentScaleInvalid(t *testing.T) {
|
|||||||
"update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
"update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
preconditions := ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
namespace := "default"
|
namespace := "default"
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "deployments"})
|
||||||
pass, err := scaleFunc()
|
pass, err := scaleFunc()
|
||||||
if pass {
|
if pass {
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||||
@ -501,11 +234,11 @@ func TestDeploymentScaleInvalid(t *testing.T) {
|
|||||||
func TestDeploymentScaleFailsPreconditions(t *testing.T) {
|
func TestDeploymentScaleFailsPreconditions(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"get"}
|
scaleClientExpectedAction := []string{"get"}
|
||||||
scaleClient := createFakeScaleClient("deployments", "foo", 10, nil)
|
scaleClient := createFakeScaleClient("deployments", "foo", 10, nil)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{2, ""}
|
preconditions := ScalePrecondition{2, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
err := scaler.Scale("default", name, count, &preconditions, nil, nil)
|
err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "apps", Resource: "deployments"})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("exptected to get an error but none was returned")
|
t.Fatal("exptected to get an error but none was returned")
|
||||||
}
|
}
|
||||||
@ -523,11 +256,11 @@ func TestDeploymentScaleFailsPreconditions(t *testing.T) {
|
|||||||
func TestStatefulSetScale(t *testing.T) {
|
func TestStatefulSetScale(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"get", "update"}
|
scaleClientExpectedAction := []string{"get", "update"}
|
||||||
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, nil)
|
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, nil)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefullset"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
preconditions := ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
err := scaler.Scale("default", name, count, &preconditions, nil, nil)
|
err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "apps", Resource: "statefullset"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -548,13 +281,13 @@ func TestStatefulSetScaleRetry(t *testing.T) {
|
|||||||
"update": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
|
"update": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefulsets"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := &ScalePrecondition{-1, ""}
|
preconditions := &ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
namespace := "default"
|
namespace := "default"
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil)
|
scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "statefulsets"})
|
||||||
pass, err := scaleFunc()
|
pass, err := scaleFunc()
|
||||||
if pass != false {
|
if pass != false {
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||||
@ -563,7 +296,7 @@ func TestStatefulSetScaleRetry(t *testing.T) {
|
|||||||
t.Errorf("Did not expect an error on update failure, got %v", err)
|
t.Errorf("Did not expect an error on update failure, got %v", err)
|
||||||
}
|
}
|
||||||
preconditions = &ScalePrecondition{3, ""}
|
preconditions = &ScalePrecondition{3, ""}
|
||||||
scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil)
|
scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "statefulsets"})
|
||||||
pass, err = scaleFunc()
|
pass, err = scaleFunc()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Expected error on precondition failure")
|
t.Error("Expected error on precondition failure")
|
||||||
@ -585,13 +318,13 @@ func TestStatefulSetScaleInvalid(t *testing.T) {
|
|||||||
"update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
"update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefulsets"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
preconditions := ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
namespace := "default"
|
namespace := "default"
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "statefulsets"})
|
||||||
pass, err := scaleFunc()
|
pass, err := scaleFunc()
|
||||||
if pass {
|
if pass {
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||||
@ -613,11 +346,11 @@ func TestStatefulSetScaleInvalid(t *testing.T) {
|
|||||||
func TestStatefulSetScaleFailsPreconditions(t *testing.T) {
|
func TestStatefulSetScaleFailsPreconditions(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"get"}
|
scaleClientExpectedAction := []string{"get"}
|
||||||
scaleClient := createFakeScaleClient("statefulsets", "foo", 10, nil)
|
scaleClient := createFakeScaleClient("statefulsets", "foo", 10, nil)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefulsets"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{2, ""}
|
preconditions := ScalePrecondition{2, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
err := scaler.Scale("default", name, count, &preconditions, nil, nil)
|
err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "apps", Resource: "statefulsets"})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected to get an error but none was returned")
|
t.Fatal("expected to get an error but none was returned")
|
||||||
}
|
}
|
||||||
@ -635,11 +368,11 @@ func TestStatefulSetScaleFailsPreconditions(t *testing.T) {
|
|||||||
func TestReplicaSetScale(t *testing.T) {
|
func TestReplicaSetScale(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"get", "update"}
|
scaleClientExpectedAction := []string{"get", "update"}
|
||||||
scaleClient := createFakeScaleClient("replicasets", "foo", 10, nil)
|
scaleClient := createFakeScaleClient("replicasets", "foo", 10, nil)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
preconditions := ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
err := scaler.Scale("default", name, count, &preconditions, nil, nil)
|
err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -660,13 +393,13 @@ func TestReplicaSetScaleRetry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"get", "update", "get"}
|
scaleClientExpectedAction := []string{"get", "update", "get"}
|
||||||
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := &ScalePrecondition{-1, ""}
|
preconditions := &ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
namespace := "default"
|
namespace := "default"
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil)
|
scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"})
|
||||||
pass, err := scaleFunc()
|
pass, err := scaleFunc()
|
||||||
if pass != false {
|
if pass != false {
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||||
@ -675,7 +408,7 @@ func TestReplicaSetScaleRetry(t *testing.T) {
|
|||||||
t.Errorf("Did not expect an error on update failure, got %v", err)
|
t.Errorf("Did not expect an error on update failure, got %v", err)
|
||||||
}
|
}
|
||||||
preconditions = &ScalePrecondition{3, ""}
|
preconditions = &ScalePrecondition{3, ""}
|
||||||
scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil)
|
scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"})
|
||||||
pass, err = scaleFunc()
|
pass, err = scaleFunc()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Expected error on precondition failure")
|
t.Error("Expected error on precondition failure")
|
||||||
@ -697,13 +430,13 @@ func TestReplicaSetScaleInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"get", "update"}
|
scaleClientExpectedAction := []string{"get", "update"}
|
||||||
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{-1, ""}
|
preconditions := ScalePrecondition{-1, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
namespace := "default"
|
namespace := "default"
|
||||||
|
|
||||||
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil)
|
scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"})
|
||||||
pass, err := scaleFunc()
|
pass, err := scaleFunc()
|
||||||
if pass {
|
if pass {
|
||||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||||
@ -725,11 +458,11 @@ func TestReplicaSetScaleInvalid(t *testing.T) {
|
|||||||
func TestReplicaSetsGetterFailsPreconditions(t *testing.T) {
|
func TestReplicaSetsGetterFailsPreconditions(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"get"}
|
scaleClientExpectedAction := []string{"get"}
|
||||||
scaleClient := createFakeScaleClient("replicasets", "foo", 10, nil)
|
scaleClient := createFakeScaleClient("replicasets", "foo", 10, nil)
|
||||||
scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"})
|
scaler := NewScaler(scaleClient)
|
||||||
preconditions := ScalePrecondition{2, ""}
|
preconditions := ScalePrecondition{2, ""}
|
||||||
count := uint(3)
|
count := uint(3)
|
||||||
name := "foo"
|
name := "foo"
|
||||||
err := scaler.Scale("default", name, count, &preconditions, nil, nil)
|
err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected to get an error but non was returned")
|
t.Fatal("expected to get an error but non was returned")
|
||||||
}
|
}
|
||||||
@ -812,9 +545,9 @@ func TestGenericScaleSimple(t *testing.T) {
|
|||||||
// act
|
// act
|
||||||
for index, scenario := range scenarios {
|
for index, scenario := range scenarios {
|
||||||
t.Run(fmt.Sprintf("running scenario %d: %s", index+1, scenario.name), func(t *testing.T) {
|
t.Run(fmt.Sprintf("running scenario %d: %s", index+1, scenario.name), func(t *testing.T) {
|
||||||
target := NewScaler(scenario.scaleGetter, scenario.targetGR)
|
target := NewScaler(scenario.scaleGetter)
|
||||||
|
|
||||||
resVersion, err := target.ScaleSimple("default", scenario.resName, &scenario.precondition, uint(scenario.newSize))
|
resVersion, err := target.ScaleSimple("default", scenario.resName, &scenario.precondition, uint(scenario.newSize), scenario.targetGR)
|
||||||
|
|
||||||
if scenario.expectError && err == nil {
|
if scenario.expectError && err == nil {
|
||||||
t.Fatal("expected an error but was not returned")
|
t.Fatal("expected an error but was not returned")
|
||||||
@ -880,9 +613,9 @@ func TestGenericScale(t *testing.T) {
|
|||||||
// act
|
// act
|
||||||
for _, scenario := range scenarios {
|
for _, scenario := range scenarios {
|
||||||
t.Run(scenario.name, func(t *testing.T) {
|
t.Run(scenario.name, func(t *testing.T) {
|
||||||
target := NewScaler(scenario.scaleGetter, scenario.targetGR)
|
target := NewScaler(scenario.scaleGetter)
|
||||||
|
|
||||||
err := target.Scale("default", scenario.resName, uint(scenario.newSize), &scenario.precondition, nil, scenario.waitForReplicas)
|
err := target.Scale("default", scenario.resName, uint(scenario.newSize), &scenario.precondition, nil, scenario.waitForReplicas, scenario.targetGR)
|
||||||
|
|
||||||
if scenario.expectError && err == nil {
|
if scenario.expectError && err == nil {
|
||||||
t.Fatal("expected an error but was not returned")
|
t.Fatal("expected an error but was not returned")
|
||||||
|
@ -257,7 +257,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||||||
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
|
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
|
||||||
// to the same size achieves this, because the scale operation advances the RC's sequence number
|
// to the same size achieves this, because the scale operation advances the RC's sequence number
|
||||||
// and awaits it to be observed and reported back in the RC's status.
|
// and awaits it to be observed and reported back in the RC's status.
|
||||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods, true)
|
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
|
||||||
|
|
||||||
// Only check the keys, the pods can be different if the kubelet updated it.
|
// Only check the keys, the pods can be different if the kubelet updated it.
|
||||||
// TODO: Can it really?
|
// TODO: Can it really?
|
||||||
@ -288,9 +288,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||||||
restarter.kill()
|
restarter.kill()
|
||||||
// This is best effort to try and create pods while the scheduler is down,
|
// This is best effort to try and create pods while the scheduler is down,
|
||||||
// since we don't know exactly when it is restarted after the kill signal.
|
// since we don't know exactly when it is restarted after the kill signal.
|
||||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, false))
|
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
|
||||||
restarter.waitUp()
|
restarter.waitUp()
|
||||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, true))
|
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Kubelet should not restart containers across restart", func() {
|
It("Kubelet should not restart containers across restart", func() {
|
||||||
|
@ -521,7 +521,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("scaling rethinkdb")
|
By("scaling rethinkdb")
|
||||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "rethinkdb-rc", 2, true)
|
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, "rethinkdb-rc", 2, true)
|
||||||
checkDbInstances()
|
checkDbInstances()
|
||||||
|
|
||||||
By("starting admin")
|
By("starting admin")
|
||||||
@ -564,7 +564,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("scaling hazelcast")
|
By("scaling hazelcast")
|
||||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "hazelcast", 2, true)
|
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, "hazelcast", 2, true)
|
||||||
forEachPod("name", "hazelcast", func(pod v1.Pod) {
|
forEachPod("name", "hazelcast", func(pod v1.Pod) {
|
||||||
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
|
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
@ -31,7 +31,6 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
scaleclient "k8s.io/client-go/scale"
|
scaleclient "k8s.io/client-go/scale"
|
||||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
)
|
)
|
||||||
@ -179,8 +178,8 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
func ScaleDeployment(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||||
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments"))
|
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func RunDeployment(config testutils.DeploymentConfig) error {
|
func RunDeployment(config testutils.DeploymentConfig) error {
|
||||||
|
@ -85,9 +85,7 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
|
|||||||
|
|
||||||
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||||
//TODO(p0lyn0mial): remove internalClientset.
|
func ScaleRCByLabels(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error {
|
||||||
//TODO(p0lyn0mial): update the callers.
|
|
||||||
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error {
|
|
||||||
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
||||||
rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts)
|
rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -99,7 +97,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
|
|||||||
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
||||||
for _, labelRC := range rcs.Items {
|
for _, labelRC := range rcs.Items {
|
||||||
name := labelRC.Name
|
name := labelRC.Name
|
||||||
if err := ScaleRC(clientset, internalClientset, scalesGetter, ns, name, replicas, false); err != nil {
|
if err := ScaleRC(clientset, scalesGetter, ns, name, replicas, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||||
@ -159,8 +157,8 @@ func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalcl
|
|||||||
return DeleteResourceAndPods(clientset, internalClientset, scaleClient, api.Kind("ReplicationController"), ns, name)
|
return DeleteResourceAndPods(clientset, internalClientset, scaleClient, api.Kind("ReplicationController"), ns, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||||
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers"))
|
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func RunRC(config testutils.RCConfig) error {
|
func RunRC(config testutils.RCConfig) error {
|
||||||
|
@ -2799,7 +2799,6 @@ func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) {
|
|||||||
|
|
||||||
func ScaleResource(
|
func ScaleResource(
|
||||||
clientset clientset.Interface,
|
clientset clientset.Interface,
|
||||||
internalClientset internalclientset.Interface,
|
|
||||||
scalesGetter scaleclient.ScalesGetter,
|
scalesGetter scaleclient.ScalesGetter,
|
||||||
ns, name string,
|
ns, name string,
|
||||||
size uint,
|
size uint,
|
||||||
@ -2808,8 +2807,8 @@ func ScaleResource(
|
|||||||
gr schema.GroupResource,
|
gr schema.GroupResource,
|
||||||
) error {
|
) error {
|
||||||
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
|
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
|
||||||
scaler := kubectl.ScalerFor(kind, internalClientset.Batch(), scalesGetter, gr)
|
scaler := kubectl.NewScaler(scalesGetter)
|
||||||
if err := testutils.ScaleResourceWithRetries(scaler, ns, name, size); err != nil {
|
if err := testutils.ScaleResourceWithRetries(scaler, ns, name, size, gr); err != nil {
|
||||||
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
|
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
|
||||||
}
|
}
|
||||||
if !wait {
|
if !wait {
|
||||||
|
@ -1291,7 +1291,7 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
By("Scaling down replication controller to zero")
|
By("Scaling down replication controller to zero")
|
||||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
|
framework.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
|
||||||
|
|
||||||
By("Update service to not tolerate unready services")
|
By("Update service to not tolerate unready services")
|
||||||
_, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
|
_, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
|
||||||
|
@ -649,7 +649,6 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
|
|||||||
newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2)
|
newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2)
|
||||||
framework.ExpectNoError(framework.ScaleResource(
|
framework.ExpectNoError(framework.ScaleResource(
|
||||||
config.GetClient(),
|
config.GetClient(),
|
||||||
config.GetInternalClient(),
|
|
||||||
config.GetScalesGetter(),
|
config.GetScalesGetter(),
|
||||||
config.GetNamespace(),
|
config.GetNamespace(),
|
||||||
config.GetName(),
|
config.GetName(),
|
||||||
|
@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
|||||||
By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
|
By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
|
||||||
// use scale to create another equivalent pod and wait for failure event
|
// use scale to create another equivalent pod and wait for failure event
|
||||||
WaitForSchedulerAfterAction(f, func() error {
|
WaitForSchedulerAfterAction(f, func() error {
|
||||||
err := framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
|
err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
|
||||||
return err
|
return err
|
||||||
}, ns, affinityRCName, false)
|
}, ns, affinityRCName, false)
|
||||||
// and this new pod should be rejected since node label has been updated
|
// and this new pod should be rejected since node label has been updated
|
||||||
|
@ -194,7 +194,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
|||||||
|
|
||||||
By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1))
|
By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1))
|
||||||
|
|
||||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true)
|
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true)
|
||||||
testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{
|
testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{
|
||||||
LabelSelector: "name=scheduler-priority-avoid-pod",
|
LabelSelector: "name=scheduler-priority-avoid-pod",
|
||||||
})
|
})
|
||||||
|
@ -68,8 +68,8 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() {
|
|||||||
deployment := deployments.Items[0]
|
deployment := deployments.Items[0]
|
||||||
replicas := uint(*(deployment.Spec.Replicas))
|
replicas := uint(*(deployment.Spec.Replicas))
|
||||||
|
|
||||||
err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
|
err = framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
|
||||||
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
|
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
})
|
})
|
||||||
@ -80,7 +80,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
|
|||||||
replicas := millicores / 100
|
replicas := millicores / 100
|
||||||
|
|
||||||
reserveCpu(f, id, 1, 100)
|
reserveCpu(f, id, 1, 100)
|
||||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
|
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
|
||||||
|
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
||||||
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
|
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/kubernetes/pkg/kubectl"
|
"k8s.io/kubernetes/pkg/kubectl"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -32,10 +33,10 @@ const (
|
|||||||
waitRetryTImeout = 5 * time.Minute
|
waitRetryTImeout = 5 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
func ScaleResourceWithRetries(scaler kubectl.Scaler, namespace, name string, size uint) error {
|
func ScaleResourceWithRetries(scaler kubectl.Scaler, namespace, name string, size uint, gr schema.GroupResource) error {
|
||||||
waitForScale := kubectl.NewRetryParams(updateRetryInterval, updateRetryTimeout)
|
waitForScale := kubectl.NewRetryParams(updateRetryInterval, updateRetryTimeout)
|
||||||
waitForReplicas := kubectl.NewRetryParams(waitRetryInterval, waitRetryTImeout)
|
waitForReplicas := kubectl.NewRetryParams(waitRetryInterval, waitRetryTImeout)
|
||||||
if err := scaler.Scale(namespace, name, size, nil, waitForScale, waitForReplicas); err != nil {
|
if err := scaler.Scale(namespace, name, size, nil, waitForScale, waitForReplicas, gr); err != nil {
|
||||||
return fmt.Errorf("Error while scaling %s to %d replicas: %v", name, size, err)
|
return fmt.Errorf("Error while scaling %s to %d replicas: %v", name, size, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
Loading…
Reference in New Issue
Block a user