mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Do not fire InsufficientResourceError when there are intentional reasons.
This commit is contained in:
parent
cc294cfb7e
commit
2b311fefba
@ -1016,6 +1016,30 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
return dsc.updateDaemonSetStatus(ds)
|
||||
}
|
||||
|
||||
// hasIntentionalPredicatesReasons checks if any of the given predicate failure reasons
|
||||
// is intentional.
|
||||
func hasIntentionalPredicatesReasons(reasons []algorithm.PredicateFailureReason) bool {
|
||||
for _, r := range reasons {
|
||||
switch reason := r.(type) {
|
||||
case *predicates.PredicateFailureError:
|
||||
switch reason {
|
||||
// intentional
|
||||
case
|
||||
predicates.ErrNodeSelectorNotMatch,
|
||||
predicates.ErrPodNotMatchHostName,
|
||||
predicates.ErrNodeLabelPresenceViolated,
|
||||
// this one is probably intentional since it's a workaround for not having
|
||||
// pod hard anti affinity.
|
||||
predicates.ErrPodNotFitsHostPorts,
|
||||
// DaemonSet is expected to respect taints and tolerations
|
||||
predicates.ErrTaintsTolerationsNotMatch:
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
|
||||
// summary. Returned booleans are:
|
||||
// * wantToRun:
|
||||
@ -1105,6 +1129,12 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
||||
return false, false, false, err
|
||||
}
|
||||
|
||||
// Return directly if there is any intentional predicate failure reason, so that daemonset controller skips
|
||||
// checking other predicate failures, such as InsufficientResourceError and unintentional errors.
|
||||
if hasIntentionalPredicatesReasons(reasons) {
|
||||
return false, false, false, nil
|
||||
}
|
||||
|
||||
for _, r := range reasons {
|
||||
glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||
switch reason := r.(type) {
|
||||
@ -1113,20 +1143,8 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
||||
shouldSchedule = false
|
||||
case *predicates.PredicateFailureError:
|
||||
var emitEvent bool
|
||||
// we try to partition predicates into two partitions here: intentional on the part of the operator and not.
|
||||
switch reason {
|
||||
// intentional
|
||||
case
|
||||
predicates.ErrNodeSelectorNotMatch,
|
||||
predicates.ErrPodNotMatchHostName,
|
||||
predicates.ErrNodeLabelPresenceViolated,
|
||||
// this one is probably intentional since it's a workaround for not having
|
||||
// pod hard anti affinity.
|
||||
predicates.ErrPodNotFitsHostPorts,
|
||||
// DaemonSet is expected to respect taints and tolerations
|
||||
predicates.ErrTaintsTolerationsNotMatch:
|
||||
wantToRun, shouldSchedule, shouldContinueRunning = false, false, false
|
||||
// unintentional
|
||||
// unintentional predicates reasons need to be fired out to event.
|
||||
case
|
||||
predicates.ErrDiskConflict,
|
||||
predicates.ErrVolumeZoneConflict,
|
||||
|
@ -273,9 +273,10 @@ func (f *fakePodControl) DeletePod(namespace string, podID string, object runtim
|
||||
type daemonSetsController struct {
|
||||
*DaemonSetsController
|
||||
|
||||
dsStore cache.Store
|
||||
podStore cache.Store
|
||||
nodeStore cache.Store
|
||||
dsStore cache.Store
|
||||
podStore cache.Store
|
||||
nodeStore cache.Store
|
||||
fakeRecorder *record.FakeRecorder
|
||||
}
|
||||
|
||||
func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, *fakePodControl, *fake.Clientset) {
|
||||
@ -289,7 +290,9 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
clientset,
|
||||
)
|
||||
manager.eventRecorder = record.NewFakeRecorder(100)
|
||||
|
||||
fakeRecorder := record.NewFakeRecorder(100)
|
||||
manager.eventRecorder = fakeRecorder
|
||||
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.nodeStoreSynced = alwaysReady
|
||||
@ -303,6 +306,7 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
informerFactory.Extensions().V1beta1().DaemonSets().Informer().GetStore(),
|
||||
informerFactory.Core().V1().Pods().Informer().GetStore(),
|
||||
informerFactory.Core().V1().Nodes().Informer().GetStore(),
|
||||
fakeRecorder,
|
||||
}, podControl, clientset
|
||||
}
|
||||
|
||||
@ -486,6 +490,16 @@ func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
||||
}
|
||||
}
|
||||
|
||||
func resourcePodSpecWithoutNodeName(memory, cpu string) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: allocatableResources(memory, cpu),
|
||||
},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func allocatableResources(memory, cpu string) v1.ResourceList {
|
||||
return v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse(memory),
|
||||
@ -533,6 +547,27 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonSets should only place onto nodes with sufficient free resource and matched node selector
|
||||
func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod(t *testing.T) {
|
||||
podSpec := resourcePodSpecWithoutNodeName("50M", "75m")
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec = podSpec
|
||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
manager, podControl, _ := newTestController(ds)
|
||||
node1 := newNode("not-enough-resource", nil)
|
||||
node1.Status.Allocatable = allocatableResources("10M", "20m")
|
||||
node2 := newNode("enough-resource", simpleNodeLabel)
|
||||
node2.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
manager.nodeStore.Add(node1)
|
||||
manager.nodeStore.Add(node2)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||
// we do not expect any event for insufficient free resource
|
||||
if len(manager.fakeRecorder.Events) != 0 {
|
||||
t.Fatalf("unexpected events, got %v, expected %v: %+v", len(manager.fakeRecorder.Events), 0, manager.fakeRecorder.Events)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
|
||||
for _, strategy := range updateStrategies() {
|
||||
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
||||
|
Loading…
Reference in New Issue
Block a user