mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-06 11:42:14 +00:00
Allow admin user to explicitly unschedule the node
Setting Unschedulable on the node will not touch any existing pods on the node but will block scheduling of new pods on the node.
This commit is contained in:
@@ -301,7 +301,7 @@ func (s *NodeController) PopulateAddresses(nodes *api.NodeList) (*api.NodeList,
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// UpdateNodesStatus performs health checking for given list of nodes.
|
||||
// UpdateNodesStatus performs various condition checks for given list of nodes.
|
||||
func (s *NodeController) UpdateNodesStatus(nodes *api.NodeList) *api.NodeList {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(nodes.Items))
|
||||
@@ -330,21 +330,14 @@ func (s *NodeController) updateNodeInfo(node *api.Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoCheck performs health checking for given node.
|
||||
// DoCheck performs various condition checks for given node.
|
||||
func (s *NodeController) DoCheck(node *api.Node) []api.NodeCondition {
|
||||
var conditions []api.NodeCondition
|
||||
|
||||
// Check Condition: NodeReady. TODO: More node conditions.
|
||||
oldReadyCondition := s.getCondition(node, api.NodeReady)
|
||||
newReadyCondition := s.checkNodeReady(node)
|
||||
if oldReadyCondition != nil && oldReadyCondition.Status == newReadyCondition.Status {
|
||||
// If node status doesn't change, transition time is same as last time.
|
||||
newReadyCondition.LastTransitionTime = oldReadyCondition.LastTransitionTime
|
||||
} else {
|
||||
// Set transition time to Now() if node status changes or `oldReadyCondition` is nil, which
|
||||
// happens only when the node is checked for the first time.
|
||||
newReadyCondition.LastTransitionTime = util.Now()
|
||||
}
|
||||
s.updateLastTransitionTime(oldReadyCondition, newReadyCondition)
|
||||
|
||||
if newReadyCondition.Status != api.ConditionFull {
|
||||
// Node is not ready for this probe, we need to check if pods need to be deleted.
|
||||
@@ -355,12 +348,48 @@ func (s *NodeController) DoCheck(node *api.Node) []api.NodeCondition {
|
||||
s.deletePods(node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
conditions = append(conditions, *newReadyCondition)
|
||||
|
||||
// Check Condition: NodeSchedulable
|
||||
oldSchedulableCondition := s.getCondition(node, api.NodeSchedulable)
|
||||
newSchedulableCondition := s.checkNodeSchedulable(node)
|
||||
s.updateLastTransitionTime(oldSchedulableCondition, newSchedulableCondition)
|
||||
conditions = append(conditions, *newSchedulableCondition)
|
||||
|
||||
return conditions
|
||||
}
|
||||
|
||||
// updateLastTransitionTime updates LastTransitionTime for the newCondition based on oldCondition.
|
||||
func (s *NodeController) updateLastTransitionTime(oldCondition, newCondition *api.NodeCondition) {
|
||||
if oldCondition != nil && oldCondition.Status == newCondition.Status {
|
||||
// If node status doesn't change, transition time is same as last time.
|
||||
newCondition.LastTransitionTime = oldCondition.LastTransitionTime
|
||||
} else {
|
||||
// Set transition time to Now() if node status changes or `oldCondition` is nil, which
|
||||
// happens only when the node is checked for the first time.
|
||||
newCondition.LastTransitionTime = util.Now()
|
||||
}
|
||||
}
|
||||
|
||||
// checkNodeSchedulable checks node schedulable condition, without transition timestamp set.
|
||||
func (s *NodeController) checkNodeSchedulable(node *api.Node) *api.NodeCondition {
|
||||
if node.Spec.Unschedulable {
|
||||
return &api.NodeCondition{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionNone,
|
||||
Reason: "User marked unschedulable during node create/update",
|
||||
LastProbeTime: util.Now(),
|
||||
}
|
||||
} else {
|
||||
return &api.NodeCondition{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
LastProbeTime: util.Now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkNodeReady checks raw node ready condition, without transition timestamp set.
|
||||
func (s *NodeController) checkNodeReady(node *api.Node) *api.NodeCondition {
|
||||
switch status, err := s.kubeletClient.HealthCheck(node.Name); {
|
||||
|
@@ -550,13 +550,15 @@ func TestSyncCloudDeletePods(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckNode(t *testing.T) {
|
||||
func TestNodeConditionsCheck(t *testing.T) {
|
||||
table := []struct {
|
||||
node *api.Node
|
||||
fakeKubeletClient *FakeKubeletClient
|
||||
expectedConditions []api.NodeCondition
|
||||
}{
|
||||
{
|
||||
// Node with default spec and kubelet /healthz probe returns success.
|
||||
// Expected node condition to be ready and marked schedulable.
|
||||
node: newNode("node0"),
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Success,
|
||||
@@ -568,10 +570,17 @@ func TestHealthCheckNode(t *testing.T) {
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
node: newNode("node0"),
|
||||
// User specified node as schedulable and kubelet /healthz probe returns failure with no error.
|
||||
// Expected node condition to be not ready and marked schedulable.
|
||||
node: &api.Node{ObjectMeta: api.ObjectMeta{Name: "node0"}, Spec: api.NodeSpec{Unschedulable: false}},
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Failure,
|
||||
Err: nil,
|
||||
@@ -582,10 +591,17 @@ func TestHealthCheckNode(t *testing.T) {
|
||||
Status: api.ConditionNone,
|
||||
Reason: "Node health check failed: kubelet /healthz endpoint returns not ok",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
node: newNode("node0"),
|
||||
// User specified node as unschedulable and kubelet /healthz probe returns failure with some error.
|
||||
// Expected node condition to be not ready and marked unschedulable.
|
||||
node: &api.Node{ObjectMeta: api.ObjectMeta{Name: "node0"}, Spec: api.NodeSpec{Unschedulable: true}},
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Failure,
|
||||
Err: errors.New("Error"),
|
||||
@@ -596,6 +612,11 @@ func TestHealthCheckNode(t *testing.T) {
|
||||
Status: api.ConditionUnknown,
|
||||
Reason: "Node health check error: Error",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionNone,
|
||||
Reason: "User marked unschedulable during node create/update",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -663,11 +684,13 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
|
||||
expectedTransitionTimeChange bool
|
||||
}{
|
||||
{
|
||||
// Existing node is healthy, current porbe is healthy too.
|
||||
// Existing node is healthy, current probe is healthy too.
|
||||
// Existing node is schedulable, again explicitly mark node as schedulable.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Spec: api.NodeSpec{Unschedulable: false},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
@@ -676,6 +699,12 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -689,11 +718,13 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
|
||||
expectedTransitionTimeChange: false,
|
||||
},
|
||||
{
|
||||
// Existing node is healthy, current porbe is unhealthy.
|
||||
// Existing node is healthy, current probe is unhealthy.
|
||||
// Existing node is schedulable, mark node as unschedulable.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Spec: api.NodeSpec{Unschedulable: true},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
@@ -702,6 +733,12 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -865,7 +902,7 @@ func TestSyncNodeStatusDeletePods(t *testing.T) {
|
||||
expectedActions []client.FakeAction
|
||||
}{
|
||||
{
|
||||
// Existing node is healthy, current porbe is healthy too.
|
||||
// Existing node is healthy, current probe is healthy too.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
@@ -894,7 +931,7 @@ func TestSyncNodeStatusDeletePods(t *testing.T) {
|
||||
expectedActions: nil,
|
||||
},
|
||||
{
|
||||
// Existing node is healthy, current porbe is unhealthy, i.e. node just becomes unhealthy.
|
||||
// Existing node is healthy, current probe is unhealthy, i.e. node just becomes unhealthy.
|
||||
// Do not delete pods.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
@@ -924,7 +961,7 @@ func TestSyncNodeStatusDeletePods(t *testing.T) {
|
||||
expectedActions: nil,
|
||||
},
|
||||
{
|
||||
// Existing node unhealthy, current porbe is unhealthy. Node is still within grace peroid.
|
||||
// Existing node unhealthy, current probe is unhealthy. Node is still within grace peroid.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
@@ -956,7 +993,7 @@ func TestSyncNodeStatusDeletePods(t *testing.T) {
|
||||
expectedActions: nil,
|
||||
},
|
||||
{
|
||||
// Existing node unhealthy, current porbe is unhealthy. Node exceeds grace peroid.
|
||||
// Existing node unhealthy, current probe is unhealthy. Node exceeds grace peroid.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
@@ -1036,6 +1073,11 @@ func TestSyncNodeStatus(t *testing.T) {
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
},
|
||||
},
|
||||
Addresses: []api.NodeAddress{
|
||||
{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"},
|
||||
@@ -1051,6 +1093,11 @@ func TestSyncNodeStatus(t *testing.T) {
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
},
|
||||
},
|
||||
Addresses: []api.NodeAddress{
|
||||
{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"},
|
||||
|
Reference in New Issue
Block a user