mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 07:20:13 +00:00
Delete overlapping controllers, add clarity to docs
This commit is contained in:
parent
0b14613361
commit
f4fe40e2d1
@ -73,7 +73,7 @@ Pods created by a replication controller are intended to be fungible and semanti
|
||||
|
||||
The population of pods that a replication controller is monitoring is defined with a [label selector](labels.md#label-selectors), which creates a loosely coupled relationship between the controller and the pods controlled, in contrast to pods, which are more tightly coupled to their definition. We deliberately chose not to represent the set of pods controlled using a fixed-length array of pod specifications, because our experience is that approach increases complexity of management operations, for both clients and the system.
|
||||
|
||||
The replication controller should verify that the pods created from the specified template have labels that match its label selector. Though it isn't verified yet, you should also ensure that only one replication controller controls any given pod, by ensuring that the label selectors of replication controllers do not target overlapping sets.
|
||||
The replication controller should verify that the pods created from the specified template have labels that match its label selector. Though it isn't verified yet, you should also ensure that only one replication controller controls any given pod, by ensuring that the label selectors of replication controllers do not target overlapping sets. If you do end up with multiple controllers that have overlapping selectors, you will have to manage the deletion yourself with --cascade=false until there are no controllers with an overlapping superset of selectors.
|
||||
|
||||
Note that replication controllers may themselves have labels and would generally carry the labels their corresponding pods have in common, but these labels do not affect the behavior of the replication controllers.
|
||||
|
||||
|
@ -35,7 +35,7 @@ const (
|
||||
UpdateControllerAction = "update-replicationController"
|
||||
WatchControllerAction = "watch-replicationController"
|
||||
DeleteControllerAction = "delete-replicationController"
|
||||
ListControllerAction = "list-replicationControllers"
|
||||
ListControllerAction = "list-replicationController"
|
||||
CreateControllerAction = "create-replicationController"
|
||||
)
|
||||
|
||||
|
@ -209,6 +209,11 @@ func (rm *ReplicationManager) getPodControllers(pod *api.Pod) *api.ReplicationCo
|
||||
glog.V(4).Infof("No controllers found for pod %v, replication manager will avoid syncing", pod.Name)
|
||||
return nil
|
||||
}
|
||||
// In theory, overlapping controllers is user error. This sorting will not prevent
|
||||
// osciallation of replicas in all cases, eg:
|
||||
// rc1 (older rc): [(k1:v1)], replicas=1 rc2: [(k2:v2), (k1:v1)], replicas=2
|
||||
// pod: [(k1:v1)] will wake both rc1 and rc2, and we will sync rc1.
|
||||
// pod: [(k2:v2), (k1:v1)] will wake rc2 which creates a new replica.
|
||||
sort.Sort(overlappingControllers(controllers))
|
||||
return &controllers[0]
|
||||
}
|
||||
@ -281,6 +286,12 @@ func (rm *ReplicationManager) enqueueController(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Handle overlapping controllers better. Either disallow them at admission time or
|
||||
// deterministically avoid syncing controllers that fight over pods. Currently, we only
|
||||
// ensure that the same controller is synced for a given pod. When we periodically relist
|
||||
// all controllers there will still be some replica instability. One way to handle this is
|
||||
// by querying the store for all controllers that this rc overlaps, as well as all
|
||||
// controllers that overlap this rc, and sorting them.
|
||||
rm.queue.Add(key)
|
||||
}
|
||||
|
||||
|
@ -18,11 +18,13 @@ package kubectl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -82,23 +84,81 @@ type objInterface interface {
|
||||
Get(name string) (meta.Interface, error)
|
||||
}
|
||||
|
||||
// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller.
|
||||
func getOverlappingControllers(c client.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) {
|
||||
rcs, err := c.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting replication controllers: %v", err)
|
||||
}
|
||||
var matchingRCs []api.ReplicationController
|
||||
rcLabels := labels.Set(rc.Spec.Selector)
|
||||
for _, controller := range rcs.Items {
|
||||
newRCLabels := labels.Set(controller.Spec.Selector)
|
||||
if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) {
|
||||
matchingRCs = append(matchingRCs, controller)
|
||||
}
|
||||
}
|
||||
return matchingRCs, nil
|
||||
}
|
||||
|
||||
func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) {
|
||||
rc := reaper.ReplicationControllers(namespace)
|
||||
scaler, err := ScalerFor("ReplicationController", NewScalerClient(*reaper))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ctrl, err := rc.Get(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if timeout == 0 {
|
||||
rc, err := rc.Get(name)
|
||||
if err != nil {
|
||||
timeout = Timeout + time.Duration(10*ctrl.Spec.Replicas)*time.Second
|
||||
}
|
||||
|
||||
// The rc manager will try and detect all matching rcs for a pod's labels,
|
||||
// and only sync the oldest one. This means if we have a pod with labels
|
||||
// [(k1, v1)] and rcs with selectors [(k1, v2)] and [(k1, v1), (k2, v2)],
|
||||
// the rc manager will sync the older of the two rcs.
|
||||
//
|
||||
// If there are rcs with a superset of labels, eg:
|
||||
// deleting: (k1:v1), superset: (k2:v2, k1:v1)
|
||||
// - It isn't safe to delete the rc because there could be a pod with labels
|
||||
// (k1:v1) that isn't managed by the superset rc. We can't scale it down
|
||||
// either, because there could be a pod (k2:v2, k1:v1) that it deletes
|
||||
// causing a fight with the superset rc.
|
||||
// If there are rcs with a subset of labels, eg:
|
||||
// deleting: (k2:v2, k1:v1), subset: (k1: v1), superset: (k2:v2, k1:v1, k3:v3)
|
||||
// - It's safe to delete this rc without a scale down because all it's pods
|
||||
// are being controlled by the subset rc.
|
||||
// In theory, creating overlapping controllers is user error, so the loop below
|
||||
// tries to account for this logic only in the common case, where we end up
|
||||
// with multiple rcs that have an exact match on selectors.
|
||||
|
||||
overlappingCtrls, err := getOverlappingControllers(rc, ctrl)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error getting replication controllers: %v", err)
|
||||
}
|
||||
exactMatchRCs := []api.ReplicationController{}
|
||||
overlapRCs := []string{}
|
||||
for _, overlappingRC := range overlappingCtrls {
|
||||
if len(overlappingRC.Spec.Selector) == len(ctrl.Spec.Selector) {
|
||||
exactMatchRCs = append(exactMatchRCs, overlappingRC)
|
||||
} else {
|
||||
overlapRCs = append(overlapRCs, overlappingRC.Name)
|
||||
}
|
||||
}
|
||||
if len(overlapRCs) > 0 {
|
||||
return "", fmt.Errorf(
|
||||
"Detected overlapping controllers for rc %v: %v, please manage deletion individually with --cascade=false.",
|
||||
ctrl.Name, strings.Join(overlapRCs, ","))
|
||||
}
|
||||
if len(exactMatchRCs) == 1 {
|
||||
// No overlapping controllers.
|
||||
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
||||
waitForReplicas := NewRetryParams(reaper.pollInterval, timeout)
|
||||
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil {
|
||||
return "", err
|
||||
}
|
||||
timeout = Timeout + time.Duration(10*rc.Spec.Replicas)*time.Second
|
||||
}
|
||||
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
||||
waitForReplicas := NewRetryParams(reaper.pollInterval, timeout)
|
||||
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := rc.Delete(name); err != nil {
|
||||
return "", err
|
||||
|
@ -27,14 +27,19 @@ import (
|
||||
)
|
||||
|
||||
func TestReplicationControllerStop(t *testing.T) {
|
||||
name := "foo"
|
||||
ns := "default"
|
||||
fake := testclient.NewSimpleFake(&api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: 0,
|
||||
},
|
||||
})
|
||||
reaper := ReplicationControllerReaper{fake, time.Millisecond, time.Millisecond}
|
||||
name := "foo"
|
||||
s, err := reaper.Stop("default", name, 0, nil)
|
||||
s, err := reaper.Stop(ns, name, 0, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -42,12 +47,12 @@ func TestReplicationControllerStop(t *testing.T) {
|
||||
if s != expected {
|
||||
t.Errorf("expected %s, got %s", expected, s)
|
||||
}
|
||||
if len(fake.Actions) != 6 {
|
||||
t.Errorf("unexpected actions: %v, expected 6 actions (get, get, update, get, get, delete)", fake.Actions)
|
||||
if len(fake.Actions) != 7 {
|
||||
t.Errorf("unexpected actions: %v, expected 6 actions (get, list, get, update, get, get, delete)", fake.Actions)
|
||||
}
|
||||
for i, action := range []string{"get", "get", "update", "get", "get", "delete"} {
|
||||
for i, action := range []string{"get", "list", "get", "update", "get", "get", "delete"} {
|
||||
if fake.Actions[i].Action != action+"-replicationController" {
|
||||
t.Errorf("unexpected action: %v, expected %s-replicationController", fake.Actions[i], action)
|
||||
t.Errorf("unexpected action: %+v, expected %s-replicationController", fake.Actions[i], action)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user