mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
addressed comments
This commit is contained in:
parent
7d604c318c
commit
041e5b5560
@ -623,11 +623,11 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
|||||||
}
|
}
|
||||||
node := obj.(*v1.Node)
|
node := obj.(*v1.Node)
|
||||||
for _, ds := range dsList {
|
for _, ds := range dsList {
|
||||||
shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
shouldRun, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if shouldSchedule {
|
if shouldRun {
|
||||||
dsc.enqueueDaemonSet(ds)
|
dsc.enqueueDaemonSet(ds)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -685,15 +685,15 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
|||||||
}
|
}
|
||||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||||
for _, ds := range dsList {
|
for _, ds := range dsList {
|
||||||
oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
|
oldShouldRun, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
currentShouldSchedule, currentShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(curNode, ds)
|
currentShouldRun, currentShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(curNode, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if (oldShouldSchedule != currentShouldSchedule) || (oldShouldContinueRunning != currentShouldContinueRunning) {
|
if (oldShouldRun != currentShouldRun) || (oldShouldContinueRunning != currentShouldContinueRunning) {
|
||||||
dsc.enqueueDaemonSet(ds)
|
dsc.enqueueDaemonSet(ds)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -789,7 +789,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
|||||||
ds *apps.DaemonSet,
|
ds *apps.DaemonSet,
|
||||||
) (nodesNeedingDaemonPods, podsToDelete []string, err error) {
|
) (nodesNeedingDaemonPods, podsToDelete []string, err error) {
|
||||||
|
|
||||||
shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
shouldRun, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -797,7 +797,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
|||||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case shouldSchedule && !exists:
|
case shouldRun && !exists:
|
||||||
// If daemon pod is supposed to be running on node, but isn't, create daemon pod.
|
// If daemon pod is supposed to be running on node, but isn't, create daemon pod.
|
||||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, node.Name)
|
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, node.Name)
|
||||||
case shouldContinueRunning:
|
case shouldContinueRunning:
|
||||||
@ -1054,14 +1054,14 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, nodeL
|
|||||||
|
|
||||||
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int
|
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int
|
||||||
for _, node := range nodeList {
|
for _, node := range nodeList {
|
||||||
wantToRun, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
shouldRun, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
scheduled := len(nodeToDaemonPods[node.Name]) > 0
|
scheduled := len(nodeToDaemonPods[node.Name]) > 0
|
||||||
|
|
||||||
if wantToRun {
|
if shouldRun {
|
||||||
desiredNumberScheduled++
|
desiredNumberScheduled++
|
||||||
if scheduled {
|
if scheduled {
|
||||||
currentNumberScheduled++
|
currentNumberScheduled++
|
||||||
@ -1195,8 +1195,8 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
|||||||
|
|
||||||
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
|
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
|
||||||
// summary. Returned booleans are:
|
// summary. Returned booleans are:
|
||||||
// * shouldSchedule:
|
// * shouldRun:
|
||||||
// Returns true when a daemonset should be scheduled to a node if a daemonset pod is not already
|
// Returns true when a daemonset should run on the node if a daemonset pod is not already
|
||||||
// running on that node.
|
// running on that node.
|
||||||
// * shouldContinueRunning:
|
// * shouldContinueRunning:
|
||||||
// Returns true when a daemonset should continue running on a node if a daemonset pod is already
|
// Returns true when a daemonset should continue running on a node if a daemonset pod is already
|
||||||
@ -1217,24 +1217,24 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
|
|||||||
return false, false, err
|
return false, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fitsNodeName, fitsNodeAffinity, fitsTaints := PodFitsNode(pod, node, taints)
|
fitsNodeName, fitsNodeAffinity, fitsTaints := Predicates(pod, node, taints)
|
||||||
if !fitsNodeName || !fitsNodeAffinity {
|
if !fitsNodeName || !fitsNodeAffinity {
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !fitsTaints {
|
if !fitsTaints {
|
||||||
fitsNoExecuteTaint := v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
|
// Scheduled daemon pods should continue running if they tolerate NoExecute taint.
|
||||||
|
shouldContinueRunning := v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
|
||||||
return t.Effect == v1.TaintEffectNoExecute
|
return t.Effect == v1.TaintEffectNoExecute
|
||||||
})
|
})
|
||||||
|
return false, shouldContinueRunning, nil
|
||||||
return false, fitsNoExecuteTaint, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, true, nil
|
return true, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodFitsNode Checks if a DaemonSet's pod can be scheduled on a node.
|
// Predicates checks if a DaemonSet's pod can run on a node.
|
||||||
func PodFitsNode(pod *v1.Pod, node *v1.Node, taints []v1.Taint) (fitsNodeName, fitsNodeAffinity, fitsTaints bool) {
|
func Predicates(pod *v1.Pod, node *v1.Node, taints []v1.Taint) (fitsNodeName, fitsNodeAffinity, fitsTaints bool) {
|
||||||
fitsNodeName = len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == node.Name
|
fitsNodeName = len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == node.Name
|
||||||
fitsNodeAffinity = pluginhelper.PodMatchesNodeSelectorAndAffinityTerms(pod, node)
|
fitsNodeAffinity = pluginhelper.PodMatchesNodeSelectorAndAffinityTerms(pod, node)
|
||||||
fitsTaints = v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
|
fitsTaints = v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
|
||||||
|
@ -1539,17 +1539,16 @@ func setDaemonSetCritical(ds *apps.DaemonSet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeShouldRunDaemonPod(t *testing.T) {
|
func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||||
var shouldCreate, shouldContinueRunning bool
|
shouldRun := true
|
||||||
shouldCreate = true
|
shouldContinueRunning := true
|
||||||
shouldContinueRunning = true
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
predicateName string
|
predicateName string
|
||||||
podsOnNode []*v1.Pod
|
podsOnNode []*v1.Pod
|
||||||
nodeCondition []v1.NodeCondition
|
nodeCondition []v1.NodeCondition
|
||||||
nodeUnschedulable bool
|
nodeUnschedulable bool
|
||||||
ds *apps.DaemonSet
|
ds *apps.DaemonSet
|
||||||
wantToRun, shouldCreate, shouldContinueRunning bool
|
shouldRun, shouldContinueRunning bool
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
predicateName: "ShouldRunDaemonPod",
|
predicateName: "ShouldRunDaemonPod",
|
||||||
@ -1564,7 +1563,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: true,
|
shouldRun: true,
|
||||||
shouldContinueRunning: true,
|
shouldContinueRunning: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1580,7 +1579,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: shouldCreate,
|
shouldRun: shouldRun,
|
||||||
shouldContinueRunning: true,
|
shouldContinueRunning: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1596,7 +1595,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: false,
|
shouldRun: false,
|
||||||
shouldContinueRunning: false,
|
shouldContinueRunning: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1629,7 +1628,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: shouldCreate,
|
shouldRun: shouldRun,
|
||||||
shouldContinueRunning: shouldContinueRunning,
|
shouldContinueRunning: shouldContinueRunning,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1657,7 +1656,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: shouldCreate, // This is because we don't care about the resource constraints any more and let default scheduler handle it.
|
shouldRun: shouldRun, // This is because we don't care about the resource constraints any more and let default scheduler handle it.
|
||||||
shouldContinueRunning: true,
|
shouldContinueRunning: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1685,7 +1684,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: true,
|
shouldRun: true,
|
||||||
shouldContinueRunning: true,
|
shouldContinueRunning: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1703,7 +1702,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: false,
|
shouldRun: false,
|
||||||
shouldContinueRunning: false,
|
shouldContinueRunning: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1721,7 +1720,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: true,
|
shouldRun: true,
|
||||||
shouldContinueRunning: true,
|
shouldContinueRunning: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1755,7 +1754,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: false,
|
shouldRun: false,
|
||||||
shouldContinueRunning: false,
|
shouldContinueRunning: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1789,7 +1788,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
shouldCreate: true,
|
shouldRun: true,
|
||||||
shouldContinueRunning: true,
|
shouldContinueRunning: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1806,7 +1805,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodeUnschedulable: true,
|
nodeUnschedulable: true,
|
||||||
shouldCreate: true,
|
shouldRun: true,
|
||||||
shouldContinueRunning: true,
|
shouldContinueRunning: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1830,8 +1829,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
c.ds.Spec.UpdateStrategy = *strategy
|
c.ds.Spec.UpdateStrategy = *strategy
|
||||||
shouldRun, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
shouldRun, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
||||||
|
|
||||||
if shouldRun != c.shouldCreate {
|
if shouldRun != c.shouldRun {
|
||||||
t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldCreate, shouldRun)
|
t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldRun, shouldRun)
|
||||||
}
|
}
|
||||||
if shouldContinueRunning != c.shouldContinueRunning {
|
if shouldContinueRunning != c.shouldContinueRunning {
|
||||||
t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning)
|
t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning)
|
||||||
|
@ -693,7 +693,7 @@ func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool {
|
|||||||
framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
|
framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
fitsNodeName, fitsNodeAffinity, fitsTaints := daemon.PodFitsNode(newPod, &node, taints)
|
fitsNodeName, fitsNodeAffinity, fitsTaints := daemon.Predicates(newPod, &node, taints)
|
||||||
return fitsNodeName && fitsNodeAffinity && fitsTaints
|
return fitsNodeName && fitsNodeAffinity && fitsTaints
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user