mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #23223 from mikedanese/dont-sync
Auto commit by PR queue bot
This commit is contained in:
commit
b94b3942f1
@ -65,8 +65,9 @@ const (
|
||||
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
|
||||
// in the system with actual running pods.
|
||||
type DaemonSetsController struct {
|
||||
kubeClient clientset.Interface
|
||||
podControl controller.PodControlInterface
|
||||
kubeClient clientset.Interface
|
||||
eventRecorder record.EventRecorder
|
||||
podControl controller.PodControlInterface
|
||||
|
||||
// An dsc is temporarily suspended after creating/deleting these many replicas.
|
||||
// It resumes normal action after observing the watch events for them.
|
||||
@ -105,7 +106,8 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemonset-controller"}),
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}),
|
||||
@ -131,7 +133,7 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro
|
||||
AddFunc: func(obj interface{}) {
|
||||
ds := obj.(*extensions.DaemonSet)
|
||||
glog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(obj)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldDS := old.(*extensions.DaemonSet)
|
||||
@ -152,12 +154,12 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
dsc.enqueueDaemonSet(cur)
|
||||
dsc.enqueueDaemonSet(curDS)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
ds := obj.(*extensions.DaemonSet)
|
||||
glog.V(4).Infof("Deleting daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(obj)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
},
|
||||
},
|
||||
)
|
||||
@ -246,10 +248,10 @@ func (dsc *DaemonSetsController) enqueueAllDaemonSets() {
|
||||
}
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) enqueueDaemonSet(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", ds, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -623,6 +625,12 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
}
|
||||
ds := obj.(*extensions.DaemonSet)
|
||||
|
||||
everything := unversioned.LabelSelector{}
|
||||
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
|
||||
dsc.eventRecorder.Eventf(ds, api.EventTypeWarning, "SelectingAll", "This controller is selecting all pods. Skipping sync.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Don't process a daemon set until all its creations and deletions have been processed.
|
||||
// For example if daemon set foo asked for 3 new daemon pods in the previous call to manage,
|
||||
// then we do not want to call manage on foo until the daemon pods have been created.
|
||||
|
@ -167,7 +167,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||
}
|
||||
|
||||
// DaemonSets without node selectors should launch pods on every node.
|
||||
// DaemonSets should do nothing if there aren't any nodes
|
||||
func TestNoNodesDoesNothing(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
ds := newDaemonSet("foo")
|
||||
@ -175,7 +175,8 @@ func TestNoNodesDoesNothing(t *testing.T) {
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
}
|
||||
|
||||
// DaemonSets without node selectors should launch pods on every node.
|
||||
// DaemonSets without node selectors should launch on a single node in a
|
||||
// single node cluster.
|
||||
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
manager.nodeStore.Add(newNode("only-node", nil))
|
||||
@ -350,6 +351,41 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||
}
|
||||
|
||||
// DaemonSetController should not sync DaemonSets with empty pod selectors.
|
||||
//
|
||||
// issue https://github.com/kubernetes/kubernetes/pull/23223
|
||||
func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
manager.nodeStore.Store.Add(newNode("node1", nil))
|
||||
// Create pod not controlled by a daemonset.
|
||||
manager.podStore.Add(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{"bang": "boom"},
|
||||
Namespace: api.NamespaceDefault,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "node1",
|
||||
},
|
||||
})
|
||||
|
||||
// Create a misconfigured DaemonSet. An empty pod selector is invalid but could happen
|
||||
// if we upgrade and make a backwards incompatible change.
|
||||
//
|
||||
// The node selector matches no nodes which mimics the behavior of kubectl delete.
|
||||
//
|
||||
// The DaemonSet should not schedule pods and should not delete scheduled pods in
|
||||
// this case even though it's empty pod selector matches all pods. The DaemonSetController
|
||||
// should detect this misconfiguration and choose not to sync the DaemonSet. We should
|
||||
// not observe a deletion of the pod on node1.
|
||||
ds := newDaemonSet("foo")
|
||||
ls := unversioned.LabelSelector{}
|
||||
ds.Spec.Selector = &ls
|
||||
ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"}
|
||||
manager.dsStore.Add(ds)
|
||||
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
}
|
||||
|
||||
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
||||
func TestDealsWithExistingPods(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
|
Loading…
Reference in New Issue
Block a user