Merge pull request #44525 from irfanurrehman/fed-common-preferences

Automatic merge from submit-queue (batch tested with PRs 45860, 45119, 44525, 45625, 44403)

[Federation] Move annotations and related parsing code as common code

This PR moves some code, which was duplicate, around as common code.
Changes the names of structures used for annotations to common names.
s/FederatedReplicaSetPreferences/ReplicaAllocationPreferences/
s/ClusterReplicaSetPreferences/PerClusterPreferences/
This can be reused in job controller and hpa controller code.

**Special notes for your reviewer**:
@kubernetes/sig-federation-misc 

**Release note**:

```NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-05-16 16:14:54 -07:00 committed by GitHub
commit 31cb269d4c
16 changed files with 331 additions and 229 deletions

View File

@ -122,36 +122,40 @@ type ClusterList struct {
Items []Cluster
}
// Temporary/alpha structures to support custom replica assignments within FederatedReplicaSet.
// Temporary/alpha structures to support custom replica assignments within Federated workloads.
// A set of preferences that can be added to federated version of ReplicaSet as a json-serialized annotation.
// The preferences allow the user to express in which clusters he wants to put his replicas within the
// mentioned FederatedReplicaSet.
type FederatedReplicaSetPreferences struct {
// If set to true then already scheduled and running replicas may be moved to other clusters to
// in order to bring cluster replicasets towards a desired state. Otherwise, if set to false,
// A set of preferences that can be added to federated version of workloads (deployments, replicasets, ..)
// as a json-serialized annotation. The preferences allow the users to express in which clusters they
// want to put their replicas within the mentioned workload objects.
type ReplicaAllocationPreferences struct {
// If set to true then already scheduled and running replicas may be moved to other clusters
// in order to match current state to the specified preferences. Otherwise, if set to false,
// up and running replicas will not be moved.
// +optional
Rebalance bool
// A mapping between cluster names and preferences regarding local ReplicaSet in these clusters.
// "*" (if provided) applies to all clusters if an explicit mapping is not provided. If there is no
// "*" that clusters without explicit preferences should not have any replicas scheduled.
// A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in
// these clusters.
// "*" (if provided) applies to all clusters if an explicit mapping is not provided.
// If omitted, clusters without explicit preferences should not have any replicas scheduled.
// +optional
Clusters map[string]ClusterReplicaSetPreferences
Clusters map[string]ClusterPreferences
}
// Preferences regarding number of replicas assigned to a cluster replicaset within a federated replicaset.
type ClusterReplicaSetPreferences struct {
// Minimum number of replicas that should be assigned to this Local ReplicaSet. 0 by default.
// Preferences regarding number of replicas assigned to a cluster workload object (dep, rs, ..) within
// a federated workload object.
type ClusterPreferences struct {
// Minimum number of replicas that should be assigned to this cluster workload object. 0 by default.
// +optional
MinReplicas int64
// Maximum number of replicas that should be assigned to this Local ReplicaSet. Unbounded if no value provided (default).
// Maximum number of replicas that should be assigned to this cluster workload object.
// Unbounded if no value provided (default).
// +optional
MaxReplicas *int64
// A number expressing the preference to put an additional replica to this LocalReplicaSet. 0 by default.
// A number expressing the preference to put an additional replica to this cluster workload object.
// 0 by default.
Weight int64
}

View File

@ -40,12 +40,12 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_Cluster, InType: reflect.TypeOf(&Cluster{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterCondition, InType: reflect.TypeOf(&ClusterCondition{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterList, InType: reflect.TypeOf(&ClusterList{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterReplicaSetPreferences, InType: reflect.TypeOf(&ClusterReplicaSetPreferences{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterPreferences, InType: reflect.TypeOf(&ClusterPreferences{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterServiceIngress, InType: reflect.TypeOf(&ClusterServiceIngress{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterSpec, InType: reflect.TypeOf(&ClusterSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterStatus, InType: reflect.TypeOf(&ClusterStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_FederatedReplicaSetPreferences, InType: reflect.TypeOf(&FederatedReplicaSetPreferences{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_FederatedServiceIngress, InType: reflect.TypeOf(&FederatedServiceIngress{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ReplicaAllocationPreferences, InType: reflect.TypeOf(&ReplicaAllocationPreferences{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})},
)
}
@ -99,10 +99,10 @@ func DeepCopy_federation_ClusterList(in interface{}, out interface{}, c *convers
}
}
func DeepCopy_federation_ClusterReplicaSetPreferences(in interface{}, out interface{}, c *conversion.Cloner) error {
func DeepCopy_federation_ClusterPreferences(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*ClusterReplicaSetPreferences)
out := out.(*ClusterReplicaSetPreferences)
in := in.(*ClusterPreferences)
out := out.(*ClusterPreferences)
*out = *in
if in.MaxReplicas != nil {
in, out := &in.MaxReplicas, &out.MaxReplicas
@ -169,26 +169,6 @@ func DeepCopy_federation_ClusterStatus(in interface{}, out interface{}, c *conve
}
}
func DeepCopy_federation_FederatedReplicaSetPreferences(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*FederatedReplicaSetPreferences)
out := out.(*FederatedReplicaSetPreferences)
*out = *in
if in.Clusters != nil {
in, out := &in.Clusters, &out.Clusters
*out = make(map[string]ClusterReplicaSetPreferences)
for key, val := range *in {
newVal := new(ClusterReplicaSetPreferences)
if err := DeepCopy_federation_ClusterReplicaSetPreferences(&val, newVal, c); err != nil {
return err
}
(*out)[key] = *newVal
}
}
return nil
}
}
func DeepCopy_federation_FederatedServiceIngress(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*FederatedServiceIngress)
@ -207,6 +187,26 @@ func DeepCopy_federation_FederatedServiceIngress(in interface{}, out interface{}
}
}
func DeepCopy_federation_ReplicaAllocationPreferences(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*ReplicaAllocationPreferences)
out := out.(*ReplicaAllocationPreferences)
*out = *in
if in.Clusters != nil {
in, out := &in.Clusters, &out.Clusters
*out = make(map[string]ClusterPreferences)
for key, val := range *in {
newVal := new(ClusterPreferences)
if err := DeepCopy_federation_ClusterPreferences(&val, newVal, c); err != nil {
return err
}
(*out)[key] = *newVal
}
}
return nil
}
}
func DeepCopy_federation_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*ServerAddressByClientCIDR)

View File

@ -21,6 +21,7 @@ go_library(
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//federation/pkg/federation-controller/util/planner:go_default_library",
"//federation/pkg/federation-controller/util/podanalyzer:go_default_library",
"//federation/pkg/federation-controller/util/replicapreferences:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
@ -55,7 +56,6 @@ go_test(
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",

View File

@ -18,7 +18,6 @@ package deployment
import (
"bytes"
"encoding/json"
"fmt"
"sort"
"time"
@ -44,6 +43,7 @@ import (
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/planner"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/podanalyzer"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/replicapreferences"
"k8s.io/kubernetes/pkg/api"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
@ -67,21 +67,6 @@ var (
updateTimeout = 30 * time.Second
)
func parseFederationDeploymentPreference(fd *extensionsv1.Deployment) (*fed.FederatedReplicaSetPreferences, error) {
if fd.Annotations == nil {
return nil, nil
}
fdPrefString, found := fd.Annotations[FedDeploymentPreferencesAnnotation]
if !found {
return nil, nil
}
var fdPref fed.FederatedReplicaSetPreferences
if err := json.Unmarshal([]byte(fdPrefString), &fdPref); err != nil {
return nil, err
}
return &fdPref, nil
}
type DeploymentController struct {
fedClient fedclientset.Interface
@ -116,8 +101,8 @@ func NewDeploymentController(federationClient fedclientset.Interface) *Deploymen
clusterDeliverer: fedutil.NewDelayingDeliverer(),
deploymentWorkQueue: workqueue.New(),
deploymentBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
defaultPlanner: planner.NewPlanner(&fed.FederatedReplicaSetPreferences{
Clusters: map[string]fed.ClusterReplicaSetPreferences{
defaultPlanner: planner.NewPlanner(&fed.ReplicaAllocationPreferences{
Clusters: map[string]fed.ClusterPreferences{
"*": {Weight: 1},
},
}),
@ -372,7 +357,7 @@ func (fdc *DeploymentController) schedule(fd *extensionsv1.Deployment, clusters
// TODO: integrate real scheduler
plannerToBeUsed := fdc.defaultPlanner
fdPref, err := parseFederationDeploymentPreference(fd)
fdPref, err := replicapreferences.GetAllocationPreferences(fd, FedDeploymentPreferencesAnnotation)
if err != nil {
glog.Info("Invalid Deployment specific preference, use default. deployment: %v, err: %v", fd.Name, err)
}

View File

@ -22,7 +22,6 @@ import (
"testing"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@ -43,39 +42,6 @@ const (
pods = "pods"
)
func TestParseFederationDeploymentPreference(t *testing.T) {
successPrefs := []string{
`{"rebalance": true,
"clusters": {
"k8s-1": {"minReplicas": 10, "maxReplicas": 20, "weight": 2},
"*": {"weight": 1}
}}`,
}
failedPrefes := []string{
`{`, // bad json
}
rs := newDeploymentWithReplicas("d-1", 100)
accessor, _ := meta.Accessor(rs)
anno := accessor.GetAnnotations()
if anno == nil {
anno = make(map[string]string)
accessor.SetAnnotations(anno)
}
for _, prefString := range successPrefs {
anno[FedDeploymentPreferencesAnnotation] = prefString
pref, err := parseFederationDeploymentPreference(rs)
assert.NotNil(t, pref)
assert.Nil(t, err)
}
for _, prefString := range failedPrefes {
anno[FedDeploymentPreferencesAnnotation] = prefString
pref, err := parseFederationDeploymentPreference(rs)
assert.Nil(t, pref)
assert.NotNil(t, err)
}
}
func TestDeploymentController(t *testing.T) {
flag.Set("logtostderr", "true")
flag.Set("v", "5")

View File

@ -21,6 +21,7 @@ go_library(
"//federation/pkg/federation-controller/util/eventsink:go_default_library",
"//federation/pkg/federation-controller/util/planner:go_default_library",
"//federation/pkg/federation-controller/util/podanalyzer:go_default_library",
"//federation/pkg/federation-controller/util/replicapreferences:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
@ -55,7 +56,6 @@ go_test(
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",

View File

@ -18,7 +18,6 @@ package replicaset
import (
"bytes"
"encoding/json"
"fmt"
"sort"
"time"
@ -44,6 +43,7 @@ import (
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/planner"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/podanalyzer"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/replicapreferences"
"k8s.io/kubernetes/pkg/api"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
@ -67,21 +67,6 @@ var (
updateTimeout = 30 * time.Second
)
func parseFederationReplicaSetReference(frs *extensionsv1.ReplicaSet) (*fed.FederatedReplicaSetPreferences, error) {
if frs.Annotations == nil {
return nil, nil
}
frsPrefString, found := frs.Annotations[FedReplicaSetPreferencesAnnotation]
if !found {
return nil, nil
}
var frsPref fed.FederatedReplicaSetPreferences
if err := json.Unmarshal([]byte(frsPrefString), &frsPref); err != nil {
return nil, err
}
return &frsPref, nil
}
type ReplicaSetController struct {
fedClient fedclientset.Interface
@ -118,8 +103,8 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
clusterDeliverer: fedutil.NewDelayingDeliverer(),
replicasetWorkQueue: workqueue.New(),
replicaSetBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
defaultPlanner: planner.NewPlanner(&fed.FederatedReplicaSetPreferences{
Clusters: map[string]fed.ClusterReplicaSetPreferences{
defaultPlanner: planner.NewPlanner(&fed.ReplicaAllocationPreferences{
Clusters: map[string]fed.ClusterPreferences{
"*": {Weight: 1},
},
}),
@ -352,7 +337,7 @@ func (frsc *ReplicaSetController) schedule(frs *extensionsv1.ReplicaSet, cluster
// TODO: integrate real scheduler
plnr := frsc.defaultPlanner
frsPref, err := parseFederationReplicaSetReference(frs)
frsPref, err := replicapreferences.GetAllocationPreferences(frs, FedReplicaSetPreferencesAnnotation)
if err != nil {
glog.Info("Invalid ReplicaSet specific preference, use default. rs: %v, err: %v", frs, err)
}

View File

@ -22,7 +22,6 @@ import (
"testing"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
core "k8s.io/client-go/testing"
@ -44,39 +43,6 @@ const (
k8s2 = "k8s-2"
)
func TestParseFederationReplicaSetReference(t *testing.T) {
successPrefs := []string{
`{"rebalance": true,
"clusters": {
"k8s-1": {"minReplicas": 10, "maxReplicas": 20, "weight": 2},
"*": {"weight": 1}
}}`,
}
failedPrefes := []string{
`{`, // bad json
}
rs := newReplicaSetWithReplicas("rs-1", 100)
accessor, _ := meta.Accessor(rs)
anno := accessor.GetAnnotations()
if anno == nil {
anno = make(map[string]string)
accessor.SetAnnotations(anno)
}
for _, prefString := range successPrefs {
anno[FedReplicaSetPreferencesAnnotation] = prefString
pref, err := parseFederationReplicaSetReference(rs)
assert.NotNil(t, pref)
assert.Nil(t, err)
}
for _, prefString := range failedPrefes {
anno[FedReplicaSetPreferencesAnnotation] = prefString
pref, err := parseFederationReplicaSetReference(rs)
assert.Nil(t, pref)
assert.NotNil(t, err)
}
}
func TestReplicaSetController(t *testing.T) {
flag.Set("logtostderr", "true")
flag.Set("v", "5")

View File

@ -93,6 +93,7 @@ filegroup(
"//federation/pkg/federation-controller/util/finalizers:all-srcs",
"//federation/pkg/federation-controller/util/planner:all-srcs",
"//federation/pkg/federation-controller/util/podanalyzer:all-srcs",
"//federation/pkg/federation-controller/util/replicapreferences:all-srcs",
"//federation/pkg/federation-controller/util/test:all-srcs",
],
tags = ["automanaged"],

View File

@ -26,16 +26,16 @@ import (
// Planner decides how many out of the given replicas should be placed in each of the
// federated clusters.
type Planner struct {
preferences *fedapi.FederatedReplicaSetPreferences
preferences *fedapi.ReplicaAllocationPreferences
}
type namedClusterReplicaSetPreferences struct {
type namedClusterPreferences struct {
clusterName string
hash uint32
fedapi.ClusterReplicaSetPreferences
fedapi.ClusterPreferences
}
type byWeight []*namedClusterReplicaSetPreferences
type byWeight []*namedClusterPreferences
func (a byWeight) Len() int { return len(a) }
func (a byWeight) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
@ -46,7 +46,7 @@ func (a byWeight) Less(i, j int) bool {
return (a[i].Weight > a[j].Weight) || (a[i].Weight == a[j].Weight && a[i].hash < a[j].hash)
}
func NewPlanner(preferences *fedapi.FederatedReplicaSetPreferences) *Planner {
func NewPlanner(preferences *fedapi.ReplicaAllocationPreferences) *Planner {
return &Planner{
preferences: preferences,
}
@ -67,20 +67,20 @@ func NewPlanner(preferences *fedapi.FederatedReplicaSetPreferences) *Planner {
func (p *Planner) Plan(replicasToDistribute int64, availableClusters []string, currentReplicaCount map[string]int64,
estimatedCapacity map[string]int64, replicaSetKey string) (map[string]int64, map[string]int64) {
preferences := make([]*namedClusterReplicaSetPreferences, 0, len(availableClusters))
preferences := make([]*namedClusterPreferences, 0, len(availableClusters))
plan := make(map[string]int64, len(preferences))
overflow := make(map[string]int64, len(preferences))
named := func(name string, pref fedapi.ClusterReplicaSetPreferences) *namedClusterReplicaSetPreferences {
named := func(name string, pref fedapi.ClusterPreferences) *namedClusterPreferences {
// Seems to work better than addler for our case.
hasher := fnv.New32()
hasher.Write([]byte(name))
hasher.Write([]byte(replicaSetKey))
return &namedClusterReplicaSetPreferences{
clusterName: name,
hash: hasher.Sum32(),
ClusterReplicaSetPreferences: pref,
return &namedClusterPreferences{
clusterName: name,
hash: hasher.Sum32(),
ClusterPreferences: pref,
}
}
@ -158,7 +158,7 @@ func (p *Planner) Plan(replicasToDistribute int64, availableClusters []string, c
for _, preference := range preferences {
weightSum += preference.Weight
}
newPreferences := make([]*namedClusterReplicaSetPreferences, 0, len(preferences))
newPreferences := make([]*namedClusterPreferences, 0, len(preferences))
distributeInThisLoop := remainingReplicas

View File

@ -24,8 +24,8 @@ import (
"github.com/stretchr/testify/assert"
)
func doCheck(t *testing.T, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string, expected map[string]int64) {
planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
func doCheck(t *testing.T, pref map[string]fedapi.ClusterPreferences, replicas int64, clusters []string, expected map[string]int64) {
planer := NewPlanner(&fedapi.ReplicaAllocationPreferences{
Clusters: pref,
})
plan, overflow := planer.Plan(replicas, clusters, map[string]int64{}, map[string]int64{}, "")
@ -33,9 +33,9 @@ func doCheck(t *testing.T, pref map[string]fedapi.ClusterReplicaSetPreferences,
assert.Equal(t, 0, len(overflow))
}
func doCheckWithExisting(t *testing.T, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string,
func doCheckWithExisting(t *testing.T, pref map[string]fedapi.ClusterPreferences, replicas int64, clusters []string,
existing map[string]int64, expected map[string]int64) {
planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
planer := NewPlanner(&fedapi.ReplicaAllocationPreferences{
Clusters: pref,
})
plan, overflow := planer.Plan(replicas, clusters, existing, map[string]int64{}, "")
@ -43,12 +43,12 @@ func doCheckWithExisting(t *testing.T, pref map[string]fedapi.ClusterReplicaSetP
assert.EqualValues(t, expected, plan)
}
func doCheckWithExistingAndCapacity(t *testing.T, rebalance bool, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string,
func doCheckWithExistingAndCapacity(t *testing.T, rebalance bool, pref map[string]fedapi.ClusterPreferences, replicas int64, clusters []string,
existing map[string]int64,
capacity map[string]int64,
expected map[string]int64,
expectedOverflow map[string]int64) {
planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
planer := NewPlanner(&fedapi.ReplicaAllocationPreferences{
Rebalance: rebalance,
Clusters: pref,
})
@ -62,102 +62,102 @@ func pint(val int64) *int64 {
}
func TestEqual(t *testing.T) {
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
50, []string{"A", "B", "C"},
// hash dependent
map[string]int64{"A": 16, "B": 17, "C": 17})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 25, "B": 25})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
1, []string{"A", "B"},
// hash dependent
map[string]int64{"A": 0, "B": 1})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
1, []string{"A", "B", "C", "D"},
// hash dependent
map[string]int64{"A": 0, "B": 0, "C": 0, "D": 1})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
1, []string{"A"},
map[string]int64{"A": 1})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
1, []string{},
map[string]int64{})
}
func TestEqualWithExisting(t *testing.T) {
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"C": 30},
map[string]int64{"A": 10, "B": 10, "C": 30})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 30},
map[string]int64{"A": 30, "B": 20})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 0, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 1, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 4, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 5, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 6, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 7, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
500000, []string{"A", "B"},
map[string]int64{"A": 300000},
map[string]int64{"A": 300000, "B": 200000})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 10},
map[string]int64{"A": 25, "B": 25})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 10, "B": 70},
@ -165,13 +165,13 @@ func TestEqualWithExisting(t *testing.T) {
// TODO: Should be 10:40, update algorithm. Issue: #31816
map[string]int64{"A": 0, "B": 50})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
1, []string{"A", "B"},
map[string]int64{"A": 30},
map[string]int64{"A": 1, "B": 0})
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 10, "B": 20},
@ -180,7 +180,7 @@ func TestEqualWithExisting(t *testing.T) {
func TestWithExistingAndCapacity(t *testing.T) {
// desired without capacity: map[string]int64{"A": 17, "B": 17, "C": 16})
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{},
@ -189,7 +189,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"C": 7})
// desired B:50 C:0
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterPreferences{
"A": {Weight: 10000},
"B": {Weight: 1}},
50, []string{"B", "C"},
@ -200,7 +200,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
)
// desired A:20 B:40
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterPreferences{
"A": {Weight: 1},
"B": {Weight: 2}},
60, []string{"A", "B", "C"},
@ -210,7 +210,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"B": 30})
// map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10})
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)},
@ -223,7 +223,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
)
// desired A:20 B:20
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterPreferences{
"A": {Weight: 1},
"B": {Weight: 1}},
60, []string{"A", "B", "C"},
@ -233,7 +233,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"A": 20, "B": 20})
// desired A:10 B:50 although A:50 B:10 is fuly acceptable because rebalance = false
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterPreferences{
"A": {Weight: 1},
"B": {Weight: 5}},
60, []string{"A", "B", "C"},
@ -242,7 +242,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"A": 50, "B": 10, "C": 0},
map[string]int64{})
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterPreferences{
"*": {MinReplicas: 20, Weight: 0}},
50, []string{"A", "B", "C"},
map[string]int64{},
@ -251,7 +251,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{})
// Actually we would like to have extra 20 in B but 15 is also good.
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterPreferences{
"*": {MinReplicas: 20, Weight: 1}},
60, []string{"A", "B"},
map[string]int64{},
@ -261,75 +261,75 @@ func TestWithExistingAndCapacity(t *testing.T) {
}
func TestMin(t *testing.T) {
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {MinReplicas: 2, Weight: 0}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 2, "B": 2, "C": 2})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {MinReplicas: 20, Weight: 0}},
50, []string{"A", "B", "C"},
// hash dependant.
map[string]int64{"A": 10, "B": 20, "C": 20})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {MinReplicas: 20, Weight: 0},
"A": {MinReplicas: 100, Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 50, "B": 0, "C": 0})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {MinReplicas: 10, Weight: 1, MaxReplicas: pint(12)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 12, "B": 12, "C": 12})
}
func TestMax(t *testing.T) {
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 1, MaxReplicas: pint(2)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 2, "B": 2, "C": 2})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"*": {Weight: 0, MaxReplicas: pint(2)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 0, "B": 0, "C": 0})
}
func TestWeight(t *testing.T) {
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"A": {Weight: 1},
"B": {Weight: 2}},
60, []string{"A", "B", "C"},
map[string]int64{"A": 20, "B": 40, "C": 0})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"A": {Weight: 10000},
"B": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 50, "B": 0, "C": 0})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"A": {Weight: 10000},
"B": {Weight: 1}},
50, []string{"B", "C"},
map[string]int64{"B": 50, "C": 0})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 10, "B": 20, "C": 20})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(10)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 10, "B": 30, "C": 10})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)},
@ -337,7 +337,7 @@ func TestWeight(t *testing.T) {
71, []string{"A", "B", "C", "D"},
map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10})
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)},

View File

@ -0,0 +1,47 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["preferences.go"],
tags = ["automanaged"],
deps = [
"//federation/apis/federation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["preferences_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/apis/extensions/v1beta1:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)

View File

@ -0,0 +1,55 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package replicapreferences
import (
"encoding/json"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
fed "k8s.io/kubernetes/federation/apis/federation"
)
// GetAllocationPreferences reads the preferences from the annotations on the given object.
// It takes in an object and determines the supported types.
// Callers need to pass the string key used to store the annotations.
// Returns nil if the annotations with the given key are not found.
func GetAllocationPreferences(obj runtime.Object, key string) (*fed.ReplicaAllocationPreferences, error) {
if obj == nil {
return nil, nil
}
accessor, err := meta.Accessor(obj)
if err != nil {
return nil, err
}
annotations := accessor.GetAnnotations()
if annotations == nil {
return nil, nil
}
prefString, found := annotations[key]
if !found {
return nil, nil
}
var pref fed.ReplicaAllocationPreferences
if err := json.Unmarshal([]byte(prefString), &pref); err != nil {
return nil, err
}
return &pref, nil
}

View File

@ -0,0 +1,92 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package replicapreferences
import (
"testing"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/runtime"
)
const (
TestPreferencesAnnotationKey = "federation.kubernetes.io/test-preferences"
)
func TestGetAllocationPreferences(t *testing.T) {
testCases := []struct {
testname string
prefs string
obj runtime.Object
errorExpected bool
}{
{
testname: "good preferences",
prefs: `{"rebalance": true,
"clusters": {
"k8s-1": {"minReplicas": 10, "maxReplicas": 20, "weight": 2},
"*": {"weight": 1}
}}`,
obj: &extensionsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-obj",
Namespace: metav1.NamespaceDefault,
SelfLink: "/api/v1/namespaces/default/obj/test-obj",
},
},
errorExpected: false,
},
{
testname: "failed preferences",
prefs: `{`, // bad json
obj: &extensionsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-obj",
Namespace: metav1.NamespaceDefault,
SelfLink: "/api/v1/namespaces/default/obj/test-obj",
},
},
errorExpected: true,
},
}
// prepare the objects
for _, tc := range testCases {
accessor, _ := meta.Accessor(tc.obj)
anno := accessor.GetAnnotations()
if anno == nil {
anno = make(map[string]string)
accessor.SetAnnotations(anno)
}
anno[TestPreferencesAnnotationKey] = tc.prefs
}
// test get preferences
for _, tc := range testCases {
pref, err := GetAllocationPreferences(tc.obj, TestPreferencesAnnotationKey)
if tc.errorExpected {
assert.NotNil(t, err)
} else {
assert.NotNil(t, pref)
assert.Nil(t, err)
}
}
}

View File

@ -48,6 +48,7 @@ federation/cmd/federation-apiserver
federation/cmd/federation-controller-manager
federation/cmd/genfeddocs
federation/cmd/kubefed
federation/pkg/federation-controller/util/replicapreferences
hack
hack/boilerplate/test
hack/cmd/teststale

View File

@ -220,7 +220,7 @@ func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName
return rs
}
func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterSlice, pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet {
func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterSlice, pref *federation.ReplicaAllocationPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet {
framework.Logf("Replicas: %d, Preference: %#v", replicas, pref)
rs := newReplicaSet(nsName, FederationReplicaSetPrefix, replicas, pref)
rs = createReplicaSetOrFail(clientset, rs)
@ -274,18 +274,18 @@ func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clu
}
}
func generateFedRSPrefsWithWeight(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
func generateFedRSPrefsWithWeight(clusters fedframework.ClusterSlice) (pref *federation.ReplicaAllocationPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with weights")
clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{
Clusters: map[string]federation.ClusterReplicaSetPreferences{},
pref = &federation.ReplicaAllocationPreferences{
Clusters: map[string]federation.ClusterPreferences{},
}
replicas = 0
expect = map[string]int32{}
for i, clusterName := range clusterNames {
if i != 0 { // do not set weight for cluster[0] thus it should have no replicas scheduled
pref.Clusters[clusterName] = federation.ClusterReplicaSetPreferences{
pref.Clusters[clusterName] = federation.ClusterPreferences{
Weight: int64(i),
}
replicas += int32(i)
@ -295,11 +295,11 @@ func generateFedRSPrefsWithWeight(clusters fedframework.ClusterSlice) (pref *fed
return
}
func generateFedRSPrefsWithMin(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
func generateFedRSPrefsWithMin(clusters fedframework.ClusterSlice) (pref *federation.ReplicaAllocationPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with min replicas")
clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{
Clusters: map[string]federation.ClusterReplicaSetPreferences{
pref = &federation.ReplicaAllocationPreferences{
Clusters: map[string]federation.ClusterPreferences{
clusterNames[0]: {Weight: 100},
},
}
@ -308,7 +308,7 @@ func generateFedRSPrefsWithMin(clusters fedframework.ClusterSlice) (pref *federa
for i, clusterName := range clusterNames {
if i != 0 { // do not set weight and minReplicas for cluster[0] thus it should have no replicas scheduled
pref.Clusters[clusterName] = federation.ClusterReplicaSetPreferences{
pref.Clusters[clusterName] = federation.ClusterPreferences{
Weight: int64(1),
MinReplicas: int64(i + 2),
}
@ -322,11 +322,11 @@ func generateFedRSPrefsWithMin(clusters fedframework.ClusterSlice) (pref *federa
return
}
func generateFedRSPrefsWithMax(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
func generateFedRSPrefsWithMax(clusters fedframework.ClusterSlice) (pref *federation.ReplicaAllocationPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with max replicas")
clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{
Clusters: map[string]federation.ClusterReplicaSetPreferences{
pref = &federation.ReplicaAllocationPreferences{
Clusters: map[string]federation.ClusterPreferences{
clusterNames[0]: {Weight: 1},
},
}
@ -336,7 +336,7 @@ func generateFedRSPrefsWithMax(clusters fedframework.ClusterSlice) (pref *federa
for i, clusterName := range clusterNames {
if i != 0 { // do not set maxReplicas for cluster[0] thus replicas exceeds the total maxReplicas turned to cluster[0]
maxReplicas := int64(i)
pref.Clusters[clusterName] = federation.ClusterReplicaSetPreferences{
pref.Clusters[clusterName] = federation.ClusterPreferences{
Weight: int64(100),
MaxReplicas: &maxReplicas,
}
@ -350,18 +350,18 @@ func generateFedRSPrefsWithMax(clusters fedframework.ClusterSlice) (pref *federa
return
}
func updateFedRSPrefsRebalance(pref *federation.FederatedReplicaSetPreferences, rebalance bool) *federation.FederatedReplicaSetPreferences {
func updateFedRSPrefsRebalance(pref *federation.ReplicaAllocationPreferences, rebalance bool) *federation.ReplicaAllocationPreferences {
pref.Rebalance = rebalance
return pref
}
func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterSlice) (pref1, pref2 *federation.FederatedReplicaSetPreferences, replicas int32, expect1, expect2 map[string]int32) {
func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterSlice) (pref1, pref2 *federation.ReplicaAllocationPreferences, replicas int32, expect1, expect2 map[string]int32) {
By("Generating replicaset for rebalancing")
clusterNames := extractClusterNames(clusters)
replicas = 3
pref1 = &federation.FederatedReplicaSetPreferences{
Clusters: map[string]federation.ClusterReplicaSetPreferences{
pref1 = &federation.ReplicaAllocationPreferences{
Clusters: map[string]federation.ClusterPreferences{
clusterNames[0]: {Weight: 1},
clusterNames[1]: {Weight: 2},
},
@ -370,8 +370,8 @@ func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterSlice) (pref1
clusterNames[0]: 1,
clusterNames[1]: 2,
}
pref2 = &federation.FederatedReplicaSetPreferences{
Clusters: map[string]federation.ClusterReplicaSetPreferences{
pref2 = &federation.ReplicaAllocationPreferences{
Clusters: map[string]federation.ClusterPreferences{
clusterNames[0]: {Weight: 2},
clusterNames[1]: {Weight: 1},
},
@ -478,7 +478,7 @@ func updateReplicaSetOrFail(clientset *fedclientset.Clientset, replicaset *v1bet
return newRS
}
func newReplicaSetObj(namespace string, replicas int32, pref *federation.FederatedReplicaSetPreferences) *v1beta1.ReplicaSet {
func newReplicaSetObj(namespace string, replicas int32, pref *federation.ReplicaAllocationPreferences) *v1beta1.ReplicaSet {
// When the tests are run in parallel, replicasets from different tests can
// collide with each other. Prevent that by creating a unique label and
// label selector for each created replica set.
@ -519,13 +519,13 @@ func newReplicaSetObj(namespace string, replicas int32, pref *federation.Federat
}
func newReplicaSet(namespace string, prefix string, replicas int32, pref *federation.FederatedReplicaSetPreferences) *v1beta1.ReplicaSet {
func newReplicaSet(namespace string, prefix string, replicas int32, pref *federation.ReplicaAllocationPreferences) *v1beta1.ReplicaSet {
rs := newReplicaSetObj(namespace, replicas, pref)
rs.GenerateName = prefix
return rs
}
func newReplicaSetWithName(namespace string, name string, replicas int32, pref *federation.FederatedReplicaSetPreferences) *v1beta1.ReplicaSet {
func newReplicaSetWithName(namespace string, name string, replicas int32, pref *federation.ReplicaAllocationPreferences) *v1beta1.ReplicaSet {
rs := newReplicaSetObj(namespace, replicas, pref)
rs.Name = name
return rs