mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
daemonset: Remove unnecessary error returns in strategy code
The nodeShouldRunDaemonPod method does not need to return an error because there are no scenarios under which it fails. Remove the error return path for its direct calls as well.
This commit is contained in:
parent
9f296c133d
commit
8d8884a907
@ -642,11 +642,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
||||
}
|
||||
node := obj.(*v1.Node)
|
||||
for _, ds := range dsList {
|
||||
shouldRun, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if shouldRun {
|
||||
if shouldRun, _ := dsc.nodeShouldRunDaemonPod(node, ds); shouldRun {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
}
|
||||
@ -704,14 +700,8 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
}
|
||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||
for _, ds := range dsList {
|
||||
oldShouldRun, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
currentShouldRun, currentShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(curNode, ds)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
oldShouldRun, oldShouldContinueRunning := dsc.nodeShouldRunDaemonPod(oldNode, ds)
|
||||
currentShouldRun, currentShouldContinueRunning := dsc.nodeShouldRunDaemonPod(curNode, ds)
|
||||
if (oldShouldRun != currentShouldRun) || (oldShouldContinueRunning != currentShouldContinueRunning) {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@ -807,13 +797,9 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
nodeToDaemonPods map[string][]*v1.Pod,
|
||||
ds *apps.DaemonSet,
|
||||
hash string,
|
||||
) (nodesNeedingDaemonPods, podsToDelete []string, err error) {
|
||||
|
||||
shouldRun, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
) (nodesNeedingDaemonPods, podsToDelete []string) {
|
||||
|
||||
shouldRun, shouldContinueRunning := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||
|
||||
switch {
|
||||
@ -918,7 +904,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
}
|
||||
}
|
||||
|
||||
return nodesNeedingDaemonPods, podsToDelete, nil
|
||||
return nodesNeedingDaemonPods, podsToDelete
|
||||
}
|
||||
|
||||
// manage manages the scheduling and running of Pods of ds on nodes.
|
||||
@ -936,14 +922,9 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, nodeList []*v1.Node,
|
||||
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
|
||||
var nodesNeedingDaemonPods, podsToDelete []string
|
||||
for _, node := range nodeList {
|
||||
nodesNeedingDaemonPodsOnNode, podsToDeleteOnNode, err := dsc.podsShouldBeOnNode(
|
||||
nodesNeedingDaemonPodsOnNode, podsToDeleteOnNode := dsc.podsShouldBeOnNode(
|
||||
node, nodeToDaemonPods, ds, hash)
|
||||
|
||||
if err != nil {
|
||||
klog.V(0).Infof("DEBUG: sync of node %s for ds %s failed: %v", node.Name, ds.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, nodesNeedingDaemonPodsOnNode...)
|
||||
podsToDelete = append(podsToDelete, podsToDeleteOnNode...)
|
||||
}
|
||||
@ -1124,11 +1105,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, nodeL
|
||||
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int
|
||||
now := dsc.failedPodsBackoff.Clock.Now()
|
||||
for _, node := range nodeList {
|
||||
shouldRun, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
shouldRun, _ := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
scheduled := len(nodeToDaemonPods[node.Name]) > 0
|
||||
|
||||
if shouldRun {
|
||||
@ -1272,39 +1249,39 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
// * shouldContinueRunning:
|
||||
// Returns true when a daemonset should continue running on a node if a daemonset pod is already
|
||||
// running on that node.
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (bool, bool, error) {
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (bool, bool) {
|
||||
pod := NewPod(ds, node.Name)
|
||||
|
||||
// If the daemon set specifies a node name, check that it matches with node.Name.
|
||||
if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) {
|
||||
return false, false, nil
|
||||
return false, false
|
||||
}
|
||||
|
||||
taints := node.Spec.Taints
|
||||
fitsNodeName, fitsNodeAffinity, fitsTaints := Predicates(pod, node, taints)
|
||||
if !fitsNodeName || !fitsNodeAffinity {
|
||||
return false, false, nil
|
||||
return false, false
|
||||
}
|
||||
|
||||
if !fitsTaints {
|
||||
// Scheduled daemon pods should continue running if they tolerate NoExecute taint.
|
||||
_, untolerated := v1helper.FindMatchingUntoleratedTaint(taints, pod.Spec.Tolerations, func(t *v1.Taint) bool {
|
||||
_, hasUntoleratedTaint := v1helper.FindMatchingUntoleratedTaint(taints, pod.Spec.Tolerations, func(t *v1.Taint) bool {
|
||||
return t.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
return false, !untolerated, nil
|
||||
return false, !hasUntoleratedTaint
|
||||
}
|
||||
|
||||
return true, true, nil
|
||||
return true, true
|
||||
}
|
||||
|
||||
// Predicates checks if a DaemonSet's pod can run on a node.
|
||||
func Predicates(pod *v1.Pod, node *v1.Node, taints []v1.Taint) (fitsNodeName, fitsNodeAffinity, fitsTaints bool) {
|
||||
fitsNodeName = len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == node.Name
|
||||
fitsNodeAffinity = pluginhelper.PodMatchesNodeSelectorAndAffinityTerms(pod, node)
|
||||
_, untolerated := v1helper.FindMatchingUntoleratedTaint(taints, pod.Spec.Tolerations, func(t *v1.Taint) bool {
|
||||
_, hasUntoleratedTaint := v1helper.FindMatchingUntoleratedTaint(taints, pod.Spec.Tolerations, func(t *v1.Taint) bool {
|
||||
return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule
|
||||
})
|
||||
fitsTaints = !untolerated
|
||||
fitsTaints = !hasUntoleratedTaint
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1983,7 +1983,6 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
nodeUnschedulable bool
|
||||
ds *apps.DaemonSet
|
||||
shouldRun, shouldContinueRunning bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
predicateName: "ShouldRunDaemonPod",
|
||||
@ -2262,7 +2261,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
manager.podNodeIndex.Add(p)
|
||||
}
|
||||
c.ds.Spec.UpdateStrategy = *strategy
|
||||
shouldRun, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
||||
shouldRun, shouldContinueRunning := manager.nodeShouldRunDaemonPod(node, c.ds)
|
||||
|
||||
if shouldRun != c.shouldRun {
|
||||
t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldRun, shouldRun)
|
||||
@ -2270,9 +2269,6 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
if shouldContinueRunning != c.shouldContinueRunning {
|
||||
t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning)
|
||||
}
|
||||
if err != c.err {
|
||||
t.Errorf("[%v] strategy: %v, predicateName: %v expected err: %v, got: %v", i, c.predicateName, c.ds.Spec.UpdateStrategy.Type, c.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -521,10 +521,7 @@ func (dsc *DaemonSetsController) updatedDesiredNodeCounts(ds *apps.DaemonSet, no
|
||||
var desiredNumberScheduled int
|
||||
for i := range nodeList {
|
||||
node := nodeList[i]
|
||||
wantToRun, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
wantToRun, _ := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if !wantToRun {
|
||||
continue
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user