mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
DaemonSet: Use ControllerRefManager to adopt/orphan.
This commit is contained in:
parent
2217363845
commit
421e0bbd83
@ -73,6 +73,7 @@ go_test(
|
|||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/intstr",
|
"//vendor:k8s.io/apimachinery/pkg/util/intstr",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/util/uuid",
|
||||||
"//vendor:k8s.io/apiserver/pkg/storage/names",
|
"//vendor:k8s.io/apiserver/pkg/storage/names",
|
||||||
"//vendor:k8s.io/apiserver/pkg/util/feature",
|
"//vendor:k8s.io/apiserver/pkg/util/feature",
|
||||||
"//vendor:k8s.io/client-go/testing",
|
"//vendor:k8s.io/client-go/testing",
|
||||||
|
@ -465,32 +465,37 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
|
// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
|
||||||
|
// This also reconciles ControllerRef by adopting/orphaning.
|
||||||
|
// Note that returned Pods are pointers to objects in the cache.
|
||||||
|
// If you want to modify one, you need to deep-copy it first.
|
||||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) {
|
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) {
|
||||||
nodeToDaemonPods := make(map[string][]*v1.Pod)
|
|
||||||
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector)
|
|
||||||
|
// List all pods to include those that don't match the selector anymore but
|
||||||
|
// have a ControllerRef pointing to this controller.
|
||||||
|
pods, err := dsc.podLister.Pods(ds.Namespace).List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nodeToDaemonPods, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for i := range daemonPods {
|
// Use ControllerRefManager to adopt/orphan as needed.
|
||||||
// TODO: Do we need to copy here?
|
cm := controller.NewPodControllerRefManager(dsc.podControl, ds, selector, ControllerKind)
|
||||||
daemonPod := &(*daemonPods[i])
|
claimedPods, err := cm.ClaimPods(pods)
|
||||||
nodeName := daemonPod.Spec.NodeName
|
if err != nil {
|
||||||
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], daemonPod)
|
return nil, err
|
||||||
|
}
|
||||||
|
// Group Pods by Node name.
|
||||||
|
nodeToDaemonPods := make(map[string][]*v1.Pod)
|
||||||
|
for _, pod := range claimedPods {
|
||||||
|
nodeName := pod.Spec.NodeName
|
||||||
|
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], pod)
|
||||||
}
|
}
|
||||||
return nodeToDaemonPods, nil
|
return nodeToDaemonPods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) error {
|
||||||
// Find out which nodes are running the daemon pods selected by ds.
|
|
||||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
|
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
|
||||||
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
|
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
|
||||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||||
@ -589,15 +594,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet
|
|||||||
for i := 0; i < createDiff; i++ {
|
for i := 0; i < createDiff; i++ {
|
||||||
go func(ix int) {
|
go func(ix int) {
|
||||||
defer createWait.Done()
|
defer createWait.Done()
|
||||||
isController := true
|
if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &template, ds, newControllerRef(ds)); err != nil {
|
||||||
controllerRef := &metav1.OwnerReference{
|
|
||||||
APIVersion: controllerKind.GroupVersion().String(),
|
|
||||||
Kind: controllerKind.Kind,
|
|
||||||
Name: ds.Name,
|
|
||||||
UID: ds.UID,
|
|
||||||
Controller: &isController,
|
|
||||||
}
|
|
||||||
if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &template, ds, controllerRef); err != nil {
|
|
||||||
glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||||
dsc.expectations.CreationObserved(dsKey)
|
dsc.expectations.CreationObserved(dsKey)
|
||||||
errCh <- err
|
errCh <- err
|
||||||
@ -676,12 +673,8 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
|
|||||||
return updateErr
|
return updateErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) error {
|
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) error {
|
||||||
glog.V(4).Infof("Updating daemon set status")
|
glog.V(4).Infof("Updating daemon set status")
|
||||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -758,6 +751,12 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Find out which nodes are running the daemon pods controlled by ds.
|
||||||
|
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
// Don't process a daemon set until all its creations and deletions have been processed.
|
// Don't process a daemon set until all its creations and deletions have been processed.
|
||||||
// For example if daemon set foo asked for 3 new daemon pods in the previous call to manage,
|
// For example if daemon set foo asked for 3 new daemon pods in the previous call to manage,
|
||||||
// then we do not want to call manage on foo until the daemon pods have been created.
|
// then we do not want to call manage on foo until the daemon pods have been created.
|
||||||
@ -767,7 +766,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
|||||||
}
|
}
|
||||||
dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey)
|
dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey)
|
||||||
if dsNeedsSync && ds.DeletionTimestamp == nil {
|
if dsNeedsSync && ds.DeletionTimestamp == nil {
|
||||||
if err := dsc.manage(ds); err != nil {
|
if err := dsc.manage(ds, nodeToDaemonPods); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -776,14 +775,14 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
|||||||
if dsNeedsSync && ds.DeletionTimestamp == nil {
|
if dsNeedsSync && ds.DeletionTimestamp == nil {
|
||||||
switch ds.Spec.UpdateStrategy.Type {
|
switch ds.Spec.UpdateStrategy.Type {
|
||||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||||
err = dsc.rollingUpdate(ds)
|
err = dsc.rollingUpdate(ds, nodeToDaemonPods)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return dsc.updateDaemonSetStatus(ds)
|
return dsc.updateDaemonSetStatus(ds, nodeToDaemonPods)
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
|
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
|
||||||
@ -972,6 +971,18 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
|
|||||||
return len(predicateFails) == 0, predicateFails, nil
|
return len(predicateFails) == 0, predicateFails, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newControllerRef creates a ControllerRef pointing to the given DaemonSet.
|
||||||
|
func newControllerRef(ds *extensions.DaemonSet) *metav1.OwnerReference {
|
||||||
|
isController := true
|
||||||
|
return &metav1.OwnerReference{
|
||||||
|
APIVersion: ControllerKind.GroupVersion().String(),
|
||||||
|
Kind: ControllerKind.Kind,
|
||||||
|
Name: ds.Name,
|
||||||
|
UID: ds.UID,
|
||||||
|
Controller: &isController,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||||
type byCreationTimestamp []*extensions.DaemonSet
|
type byCreationTimestamp []*extensions.DaemonSet
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
@ -80,6 +81,7 @@ func newDaemonSet(name string) *extensions.DaemonSet {
|
|||||||
return &extensions.DaemonSet{
|
return &extensions.DaemonSet{
|
||||||
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
|
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
UID: uuid.NewUUID(),
|
||||||
Name: name,
|
Name: name,
|
||||||
Namespace: metav1.NamespaceDefault,
|
Namespace: metav1.NamespaceDefault,
|
||||||
},
|
},
|
||||||
@ -130,7 +132,7 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
func newPod(podName string, nodeName string, label map[string]string, ds *extensions.DaemonSet) *v1.Pod {
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -152,18 +154,21 @@ func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
pod.Name = names.SimpleNameGenerator.GenerateName(podName)
|
pod.Name = names.SimpleNameGenerator.GenerateName(podName)
|
||||||
|
if ds != nil {
|
||||||
|
pod.OwnerReferences = []metav1.OwnerReference{*newControllerRef(ds)}
|
||||||
|
}
|
||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
|
||||||
func addPods(podStore cache.Store, nodeName string, label map[string]string, number int) {
|
func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) {
|
||||||
for i := 0; i < number; i++ {
|
for i := 0; i < number; i++ {
|
||||||
podStore.Add(newPod(fmt.Sprintf("%s-", nodeName), nodeName, label))
|
podStore.Add(newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, number int) {
|
func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) {
|
||||||
for i := 0; i < number; i++ {
|
for i := 0; i < number; i++ {
|
||||||
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label)
|
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
|
||||||
pod.Status = v1.PodStatus{Phase: v1.PodFailed}
|
pod.Status = v1.PodStatus{Phase: v1.PodFailed}
|
||||||
podStore.Add(pod)
|
podStore.Add(pod)
|
||||||
}
|
}
|
||||||
@ -618,13 +623,13 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
|||||||
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
||||||
func TestDealsWithExistingPods(t *testing.T) {
|
func TestDealsWithExistingPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore, 0, 5, nil)
|
|
||||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1)
|
|
||||||
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 2)
|
|
||||||
addPods(manager.podStore, "node-3", simpleDaemonSetLabel, 5)
|
|
||||||
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, 2)
|
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1)
|
||||||
|
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2)
|
||||||
|
addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5)
|
||||||
|
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -642,34 +647,34 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
|||||||
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
|
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
|
||||||
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
|
manager.dsStore.Add(ds)
|
||||||
addNodes(manager.nodeStore, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
|
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
|
||||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, 2)
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, ds, 2)
|
||||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3)
|
||||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 1)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1)
|
||||||
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1)
|
||||||
daemon := newDaemonSet("foo")
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4)
|
||||||
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
|
||||||
manager.dsStore.Add(daemon)
|
|
||||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 5, 4)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
|
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
|
||||||
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore, 0, 5, nil)
|
|
||||||
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
|
|
||||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
|
|
||||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3)
|
|
||||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 2)
|
|
||||||
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 4)
|
|
||||||
addPods(manager.podStore, "node-6", simpleDaemonSetLabel, 13)
|
|
||||||
addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, 4)
|
|
||||||
addPods(manager.podStore, "node-9", simpleDaemonSetLabel, 1)
|
|
||||||
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, 1)
|
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
|
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
|
||||||
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
|
||||||
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3)
|
||||||
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 2)
|
||||||
|
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 4)
|
||||||
|
addPods(manager.podStore, "node-6", simpleDaemonSetLabel, ds, 13)
|
||||||
|
addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, ds, 4)
|
||||||
|
addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1)
|
||||||
|
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -756,7 +761,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNumberReadyStatus(t *testing.T) {
|
func TestNumberReadyStatus(t *testing.T) {
|
||||||
daemon := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
manager, podControl, clientset := newTestController()
|
manager, podControl, clientset := newTestController()
|
||||||
var updated *extensions.DaemonSet
|
var updated *extensions.DaemonSet
|
||||||
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
@ -769,31 +774,31 @@ func TestNumberReadyStatus(t *testing.T) {
|
|||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
})
|
})
|
||||||
addNodes(manager.nodeStore, 0, 2, simpleNodeLabel)
|
addNodes(manager.nodeStore, 0, 2, simpleNodeLabel)
|
||||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
|
||||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1)
|
||||||
manager.dsStore.Add(daemon)
|
manager.dsStore.Add(ds)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
if updated.Status.NumberReady != 0 {
|
if updated.Status.NumberReady != 0 {
|
||||||
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
|
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, _ := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
|
selector, _ := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||||
daemonPods, _ := manager.podLister.Pods(daemon.Namespace).List(selector)
|
daemonPods, _ := manager.podLister.Pods(ds.Namespace).List(selector)
|
||||||
for _, pod := range daemonPods {
|
for _, pod := range daemonPods {
|
||||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||||
pod.Status.Conditions = append(pod.Status.Conditions, condition)
|
pod.Status.Conditions = append(pod.Status.Conditions, condition)
|
||||||
}
|
}
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
if updated.Status.NumberReady != 2 {
|
if updated.Status.NumberReady != 2 {
|
||||||
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
|
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestObservedGeneration(t *testing.T) {
|
func TestObservedGeneration(t *testing.T) {
|
||||||
daemon := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
daemon.Generation = 1
|
ds.Generation = 1
|
||||||
manager, podControl, clientset := newTestController()
|
manager, podControl, clientset := newTestController()
|
||||||
var updated *extensions.DaemonSet
|
var updated *extensions.DaemonSet
|
||||||
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
@ -807,12 +812,12 @@ func TestObservedGeneration(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
addNodes(manager.nodeStore, 0, 1, simpleNodeLabel)
|
addNodes(manager.nodeStore, 0, 1, simpleNodeLabel)
|
||||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
|
||||||
manager.dsStore.Add(daemon)
|
manager.dsStore.Add(ds)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
if updated.Status.ObservedGeneration != daemon.Generation {
|
if updated.Status.ObservedGeneration != ds.Generation {
|
||||||
t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, daemon.Generation, updated.Status.ObservedGeneration)
|
t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -832,11 +837,11 @@ func TestDaemonKillFailedPods(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Logf("test case: %s\n", test.test)
|
t.Logf("test case: %s\n", test.test)
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore, 0, 1, nil)
|
|
||||||
addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numFailedPods)
|
|
||||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numNormalPods)
|
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
addNodes(manager.nodeStore, 0, 1, nil)
|
||||||
|
addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numFailedPods)
|
||||||
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numNormalPods)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1183,3 +1188,59 @@ func TestUpdateNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetNodesToDaemonPods(t *testing.T) {
|
||||||
|
manager, _, _ := newTestController()
|
||||||
|
ds := newDaemonSet("foo")
|
||||||
|
ds2 := newDaemonSet("foo2")
|
||||||
|
manager.dsStore.Add(ds)
|
||||||
|
manager.dsStore.Add(ds2)
|
||||||
|
addNodes(manager.nodeStore, 0, 2, nil)
|
||||||
|
|
||||||
|
// These pods should be returned.
|
||||||
|
wantedPods := []*v1.Pod{
|
||||||
|
newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds),
|
||||||
|
newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil),
|
||||||
|
newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds),
|
||||||
|
newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil),
|
||||||
|
}
|
||||||
|
failedPod := newPod("matching-owned-failed-pod-1-", "node-1", simpleDaemonSetLabel, ds)
|
||||||
|
failedPod.Status = v1.PodStatus{Phase: v1.PodFailed}
|
||||||
|
wantedPods = append(wantedPods, failedPod)
|
||||||
|
for _, pod := range wantedPods {
|
||||||
|
manager.podStore.Add(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
// These pods should be ignored.
|
||||||
|
ignoredPods := []*v1.Pod{
|
||||||
|
newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds),
|
||||||
|
newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil),
|
||||||
|
newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2),
|
||||||
|
}
|
||||||
|
for _, pod := range ignoredPods {
|
||||||
|
manager.podStore.Add(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
nodesToDaemonPods, err := manager.getNodesToDaemonPods(ds)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("getNodesToDaemonPods() error: %v", err)
|
||||||
|
}
|
||||||
|
gotPods := map[string]bool{}
|
||||||
|
for node, pods := range nodesToDaemonPods {
|
||||||
|
for _, pod := range pods {
|
||||||
|
if pod.Spec.NodeName != node {
|
||||||
|
t.Errorf("pod %v grouped into %v but belongs in %v", pod.Name, node, pod.Spec.NodeName)
|
||||||
|
}
|
||||||
|
gotPods[pod.Name] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, pod := range wantedPods {
|
||||||
|
if !gotPods[pod.Name] {
|
||||||
|
t.Errorf("expected pod %v but didn't get it", pod.Name)
|
||||||
|
}
|
||||||
|
delete(gotPods, pod.Name)
|
||||||
|
}
|
||||||
|
for podName := range gotPods {
|
||||||
|
t.Errorf("unexpected pod %v was returned", podName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -31,11 +31,9 @@ import (
|
|||||||
|
|
||||||
// rollingUpdate deletes old daemon set pods making sure that no more than
|
// rollingUpdate deletes old daemon set pods making sure that no more than
|
||||||
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
|
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
|
||||||
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet) error {
|
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) error {
|
||||||
newPods, oldPods, err := dsc.getAllDaemonSetPods(ds)
|
_, oldPods, err := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods)
|
||||||
allPods := append(oldPods, newPods...)
|
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeToDaemonPods)
|
||||||
|
|
||||||
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, allPods)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
|
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
|
||||||
}
|
}
|
||||||
@ -67,29 +65,23 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet) error {
|
|||||||
return utilerrors.NewAggregate(errors)
|
return utilerrors.NewAggregate(errors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet) ([]*v1.Pod, []*v1.Pod, error) {
|
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) ([]*v1.Pod, []*v1.Pod, error) {
|
||||||
var newPods []*v1.Pod
|
var newPods []*v1.Pod
|
||||||
var oldPods []*v1.Pod
|
var oldPods []*v1.Pod
|
||||||
|
|
||||||
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
for _, pods := range nodeToDaemonPods {
|
||||||
if err != nil {
|
for _, pod := range pods {
|
||||||
return newPods, oldPods, err
|
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod) {
|
||||||
}
|
newPods = append(newPods, pod)
|
||||||
daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector)
|
} else {
|
||||||
if err != nil {
|
oldPods = append(oldPods, pod)
|
||||||
return newPods, oldPods, fmt.Errorf("Couldn't get list of pods for daemon set %#v: %v", ds, err)
|
}
|
||||||
}
|
|
||||||
for _, pod := range daemonPods {
|
|
||||||
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod) {
|
|
||||||
newPods = append(newPods, pod)
|
|
||||||
} else {
|
|
||||||
oldPods = append(oldPods, pod)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newPods, oldPods, nil
|
return newPods, oldPods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, allPods []*v1.Pod) (int, int, error) {
|
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
|
||||||
glog.V(4).Infof("Getting unavailable numbers")
|
glog.V(4).Infof("Getting unavailable numbers")
|
||||||
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
|
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
|
||||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||||
@ -97,11 +89,6 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet,
|
|||||||
return -1, -1, fmt.Errorf("couldn't get list of nodes during rolling update of daemon set %#v: %v", ds, err)
|
return -1, -1, fmt.Errorf("couldn't get list of nodes during rolling update of daemon set %#v: %v", ds, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("couldn't get node to daemon pods mapping for daemon set %#v: %v", ds, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var numUnavailable, desiredNumberScheduled int
|
var numUnavailable, desiredNumberScheduled int
|
||||||
for i := range nodeList {
|
for i := range nodeList {
|
||||||
node := nodeList[i]
|
node := nodeList[i]
|
||||||
|
Loading…
Reference in New Issue
Block a user