mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Merge pull request #4585 from pravisankar/deactivate-node
Allow admin user to explicitly unschedule the node
This commit is contained in:
commit
ae162fded4
@ -1,7 +1,7 @@
|
||||
{
|
||||
"swaggerVersion": "1.2",
|
||||
"apiVersion": "",
|
||||
"basePath": "https://127.0.0.1:6443",
|
||||
"basePath": "127.0.0.1:6443",
|
||||
"resourcePath": "/api",
|
||||
"apis": [
|
||||
{
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
{
|
||||
"swaggerVersion": "1.2",
|
||||
"apiVersion": "",
|
||||
"basePath": "https://127.0.0.1:6443",
|
||||
"basePath": "127.0.0.1:6443",
|
||||
"resourcePath": "/version",
|
||||
"apis": [
|
||||
{
|
||||
|
29
docs/node.md
29
docs/node.md
@ -38,13 +38,16 @@ must have appropriate conditions, see below.
|
||||
|
||||
### Node Condition
|
||||
Node Condition describes the conditions of `Running` nodes. Current valid
|
||||
conditions are `NodeReachable` and `NodeReady`. In the future, we plan to
|
||||
add more. `NodeReachable` means the node can be reached within the cluster.
|
||||
`NodeReady` means the kubelet returns StatusOK for HTTP health check. Different
|
||||
condition provides different level of understanding for node health. Kubernetes
|
||||
will make a comprehensive scheduling decision based on the information. Node
|
||||
condition is represented as a json object. For example, the following conditions
|
||||
mean the node is reachable from its cluster, but not ready to accept pods:
|
||||
conditions are `NodeReachable`, `NodeReady` and `NodeSchedulable`. In the
|
||||
future, we plan to add more. `NodeReachable` means the node can be reached
|
||||
within the cluster. `NodeReady` means the kubelet returns StatusOK for HTTP
|
||||
health check. `NodeSchedulable` means node is allowed to schedule any new
|
||||
pods and is controlled by 'unschedulable' field in node spec.
|
||||
Different condition provides different level of understanding for node
|
||||
health. Kubernetes will make a comprehensive scheduling decision based on the
|
||||
information. Node condition is represented as a json object. For example, the
|
||||
following conditions mean the node is reachable from its cluster, node is in
|
||||
sane state but not allowed to accept new pods:
|
||||
```json
|
||||
"conditions": [
|
||||
{
|
||||
@ -53,8 +56,12 @@ mean the node is reachable from its cluster, but not ready to accept pods:
|
||||
},
|
||||
{
|
||||
"kind": "Ready",
|
||||
"status": "Full",
|
||||
},
|
||||
{
|
||||
"kind": "Schedulable",
|
||||
"status": "None",
|
||||
}
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
@ -125,3 +132,9 @@ A Kubernetes administrator typically uses `kubectl` to manage `Node`. Similar
|
||||
to Node Controller, `kubectl` command only creates/deletes node representation.
|
||||
Note if Kubernetes is running on cloud provider, `kubectl create` a node will
|
||||
be refused if Node Controller has already synchronized nodes from cloud provider.
|
||||
Admin can choose to make the node unschedulable using `kubectl`. Unscheduling the node
|
||||
will not affect any existing pods on the node but it will disable creation of
|
||||
any new pods on the node. Node unschedulable example:
|
||||
```
|
||||
kubectl update nodes 10.1.2.3 --patch='{"apiVersion": "v1beta1", "unschedulable": true}'
|
||||
```
|
||||
|
@ -782,13 +782,18 @@ type EndpointsList struct {
|
||||
|
||||
// NodeSpec describes the attributes that a node is created with.
|
||||
type NodeSpec struct {
|
||||
// Capacity represents the available resources of a node
|
||||
// Capacity represents the available resources of a node.
|
||||
Capacity ResourceList `json:"capacity,omitempty"`
|
||||
|
||||
// PodCIDR represents the pod IP range assigned to the node
|
||||
// Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs.
|
||||
PodCIDR string `json:"podCIDR,omitempty"`
|
||||
|
||||
// External ID of the node assigned by some machine database (e.g. a cloud provider)
|
||||
ExternalID string `json:"externalID,omitempty"`
|
||||
|
||||
// Unschedulable controls node schedulability of new pods. By default node is schedulable.
|
||||
Unschedulable bool `json:"unschedulable,omitempty"`
|
||||
}
|
||||
|
||||
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
|
||||
@ -842,6 +847,8 @@ const (
|
||||
NodeReachable NodeConditionType = "Reachable"
|
||||
// NodeReady means the node returns StatusOK for HTTP health check.
|
||||
NodeReady NodeConditionType = "Ready"
|
||||
// NodeSchedulable means the node is ready to accept new pods.
|
||||
NodeSchedulable NodeConditionType = "Schedulable"
|
||||
)
|
||||
|
||||
type NodeCondition struct {
|
||||
|
@ -711,6 +711,7 @@ func init() {
|
||||
}
|
||||
out.PodCIDR = in.Spec.PodCIDR
|
||||
out.ExternalID = in.Spec.ExternalID
|
||||
out.Unschedulable = in.Spec.Unschedulable
|
||||
return s.Convert(&in.Spec.Capacity, &out.NodeResources.Capacity, 0)
|
||||
},
|
||||
func(in *Minion, out *newer.Node, s conversion.Scope) error {
|
||||
@ -742,6 +743,7 @@ func init() {
|
||||
}
|
||||
out.Spec.PodCIDR = in.PodCIDR
|
||||
out.Spec.ExternalID = in.ExternalID
|
||||
out.Spec.Unschedulable = in.Unschedulable
|
||||
return s.Convert(&in.NodeResources.Capacity, &out.Spec.Capacity, 0)
|
||||
},
|
||||
|
||||
|
@ -677,6 +677,8 @@ const (
|
||||
NodeReachable NodeConditionKind = "Reachable"
|
||||
// NodeReady means the node returns StatusOK for HTTP health check.
|
||||
NodeReady NodeConditionKind = "Ready"
|
||||
// NodeSchedulable means the node is ready to accept new pods.
|
||||
NodeSchedulable NodeConditionKind = "Schedulable"
|
||||
)
|
||||
|
||||
type NodeCondition struct {
|
||||
@ -731,6 +733,8 @@ type Minion struct {
|
||||
NodeResources NodeResources `json:"resources,omitempty" description:"characterization of node resources"`
|
||||
// Pod IP range assigned to the node
|
||||
PodCIDR string `json:"podCIDR,omitempty" description:"IP range assigned to the node"`
|
||||
// Unschedulable controls node schedulability of new pods. By default node is schedulable.
|
||||
Unschedulable bool `json:"unschedulable,omitempty" description:"disable pod scheduling on the node"`
|
||||
// Status describes the current status of a node
|
||||
Status NodeStatus `json:"status,omitempty" description:"current status of node"`
|
||||
// Labels for the node
|
||||
|
@ -631,6 +631,7 @@ func init() {
|
||||
}
|
||||
out.PodCIDR = in.Spec.PodCIDR
|
||||
out.ExternalID = in.Spec.ExternalID
|
||||
out.Unschedulable = in.Spec.Unschedulable
|
||||
return s.Convert(&in.Spec.Capacity, &out.NodeResources.Capacity, 0)
|
||||
},
|
||||
func(in *Minion, out *newer.Node, s conversion.Scope) error {
|
||||
@ -662,6 +663,7 @@ func init() {
|
||||
}
|
||||
out.Spec.PodCIDR = in.PodCIDR
|
||||
out.Spec.ExternalID = in.ExternalID
|
||||
out.Spec.Unschedulable = in.Unschedulable
|
||||
return s.Convert(&in.NodeResources.Capacity, &out.Spec.Capacity, 0)
|
||||
},
|
||||
|
||||
|
@ -690,6 +690,8 @@ const (
|
||||
NodeReachable NodeConditionKind = "Reachable"
|
||||
// NodeReady means the node returns StatusOK for HTTP health check.
|
||||
NodeReady NodeConditionKind = "Ready"
|
||||
// NodeSchedulable means the node is ready to accept new pods.
|
||||
NodeSchedulable NodeConditionKind = "Schedulable"
|
||||
)
|
||||
|
||||
// Described the conditions of a running node.
|
||||
@ -750,6 +752,8 @@ type Minion struct {
|
||||
NodeResources NodeResources `json:"resources,omitempty" description:"characterization of node resources"`
|
||||
// Pod IP range assigned to the node
|
||||
PodCIDR string `json:"podCIDR,omitempty" description:"IP range assigned to the node"`
|
||||
// Unschedulable controls node schedulability of new pods. By default node is schedulable.
|
||||
Unschedulable bool `json:"unschedulable,omitempty" description:"disable pod scheduling on the node"`
|
||||
// Status describes the current status of a node
|
||||
Status NodeStatus `json:"status,omitempty" description:"current status of node"`
|
||||
// Labels for the node
|
||||
|
@ -813,13 +813,15 @@ type EndpointsList struct {
|
||||
|
||||
// NodeSpec describes the attributes that a node is created with.
|
||||
type NodeSpec struct {
|
||||
// Capacity represents the available resources of a node
|
||||
// Capacity represents the available resources of a node.
|
||||
// see https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/resources.md for more details.
|
||||
Capacity ResourceList `json:"capacity,omitempty" description:"compute resource capacity of the node; https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/resources.md"`
|
||||
// PodCIDR represents the pod IP range assigned to the node
|
||||
PodCIDR string `json:"podCIDR,omitempty" description:"pod IP range assigned to the node"`
|
||||
// External ID of the node assigned by some machine database (e.g. a cloud provider)
|
||||
ExternalID string `json:"externalID,omitempty" description:"external ID assigned to the node by some machine database (e.g. a cloud provider)"`
|
||||
// Unschedulable controls node schedulability of new pods. By default node is schedulable.
|
||||
Unschedulable bool `json:"unschedulable,omitempty" description:"disable pod scheduling on the node"`
|
||||
}
|
||||
|
||||
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
|
||||
@ -873,6 +875,8 @@ const (
|
||||
NodeReachable NodeConditionType = "Reachable"
|
||||
// NodeReady means the node returns StatusOK for HTTP health check.
|
||||
NodeReady NodeConditionType = "Ready"
|
||||
// NodeSchedulable means the node is ready to accept new pods.
|
||||
NodeSchedulable NodeConditionType = "Schedulable"
|
||||
)
|
||||
|
||||
type NodeCondition struct {
|
||||
|
@ -862,6 +862,8 @@ func ValidateMinionUpdate(oldMinion *api.Node, minion *api.Node) errs.Validation
|
||||
oldMinion.ObjectMeta = minion.ObjectMeta
|
||||
// Allow users to update capacity
|
||||
oldMinion.Spec.Capacity = minion.Spec.Capacity
|
||||
// Allow users to unschedule node
|
||||
oldMinion.Spec.Unschedulable = minion.Spec.Unschedulable
|
||||
// Clear status
|
||||
oldMinion.Status = minion.Status
|
||||
|
||||
|
@ -2106,6 +2106,21 @@ func TestValidateMinionUpdate(t *testing.T) {
|
||||
Labels: map[string]string{"Foo": "baz"},
|
||||
},
|
||||
}, true},
|
||||
{api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: api.NodeSpec{
|
||||
Unschedulable: false,
|
||||
},
|
||||
}, api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: api.NodeSpec{
|
||||
Unschedulable: true,
|
||||
},
|
||||
}, true},
|
||||
}
|
||||
for i, test := range tests {
|
||||
errs := ValidateMinionUpdate(&test.oldMinion, &test.minion)
|
||||
|
@ -749,6 +749,7 @@ func TestCreateMinion(t *testing.T) {
|
||||
api.ResourceCPU: resource.MustParse("1000m"),
|
||||
api.ResourceMemory: resource.MustParse("1Mi"),
|
||||
},
|
||||
Unschedulable: false,
|
||||
},
|
||||
}
|
||||
c := &testClient{
|
||||
@ -782,6 +783,7 @@ func TestUpdateMinion(t *testing.T) {
|
||||
api.ResourceCPU: resource.MustParse("1000m"),
|
||||
api.ResourceMemory: resource.MustParse("1Mi"),
|
||||
},
|
||||
Unschedulable: true,
|
||||
},
|
||||
}
|
||||
c := &testClient{
|
||||
|
@ -301,7 +301,7 @@ func (s *NodeController) PopulateAddresses(nodes *api.NodeList) (*api.NodeList,
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// UpdateNodesStatus performs health checking for given list of nodes.
|
||||
// UpdateNodesStatus performs various condition checks for given list of nodes.
|
||||
func (s *NodeController) UpdateNodesStatus(nodes *api.NodeList) *api.NodeList {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(nodes.Items))
|
||||
@ -330,21 +330,14 @@ func (s *NodeController) updateNodeInfo(node *api.Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoCheck performs health checking for given node.
|
||||
// DoCheck performs various condition checks for given node.
|
||||
func (s *NodeController) DoCheck(node *api.Node) []api.NodeCondition {
|
||||
var conditions []api.NodeCondition
|
||||
|
||||
// Check Condition: NodeReady. TODO: More node conditions.
|
||||
oldReadyCondition := s.getCondition(node, api.NodeReady)
|
||||
newReadyCondition := s.checkNodeReady(node)
|
||||
if oldReadyCondition != nil && oldReadyCondition.Status == newReadyCondition.Status {
|
||||
// If node status doesn't change, transition time is same as last time.
|
||||
newReadyCondition.LastTransitionTime = oldReadyCondition.LastTransitionTime
|
||||
} else {
|
||||
// Set transition time to Now() if node status changes or `oldReadyCondition` is nil, which
|
||||
// happens only when the node is checked for the first time.
|
||||
newReadyCondition.LastTransitionTime = util.Now()
|
||||
}
|
||||
s.updateLastTransitionTime(oldReadyCondition, newReadyCondition)
|
||||
|
||||
if newReadyCondition.Status != api.ConditionFull {
|
||||
// Node is not ready for this probe, we need to check if pods need to be deleted.
|
||||
@ -355,12 +348,48 @@ func (s *NodeController) DoCheck(node *api.Node) []api.NodeCondition {
|
||||
s.deletePods(node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
conditions = append(conditions, *newReadyCondition)
|
||||
|
||||
// Check Condition: NodeSchedulable
|
||||
oldSchedulableCondition := s.getCondition(node, api.NodeSchedulable)
|
||||
newSchedulableCondition := s.checkNodeSchedulable(node)
|
||||
s.updateLastTransitionTime(oldSchedulableCondition, newSchedulableCondition)
|
||||
conditions = append(conditions, *newSchedulableCondition)
|
||||
|
||||
return conditions
|
||||
}
|
||||
|
||||
// updateLastTransitionTime updates LastTransitionTime for the newCondition based on oldCondition.
|
||||
func (s *NodeController) updateLastTransitionTime(oldCondition, newCondition *api.NodeCondition) {
|
||||
if oldCondition != nil && oldCondition.Status == newCondition.Status {
|
||||
// If node status doesn't change, transition time is same as last time.
|
||||
newCondition.LastTransitionTime = oldCondition.LastTransitionTime
|
||||
} else {
|
||||
// Set transition time to Now() if node status changes or `oldCondition` is nil, which
|
||||
// happens only when the node is checked for the first time.
|
||||
newCondition.LastTransitionTime = util.Now()
|
||||
}
|
||||
}
|
||||
|
||||
// checkNodeSchedulable checks node schedulable condition, without transition timestamp set.
|
||||
func (s *NodeController) checkNodeSchedulable(node *api.Node) *api.NodeCondition {
|
||||
if node.Spec.Unschedulable {
|
||||
return &api.NodeCondition{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionNone,
|
||||
Reason: "User marked unschedulable during node create/update",
|
||||
LastProbeTime: util.Now(),
|
||||
}
|
||||
} else {
|
||||
return &api.NodeCondition{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
LastProbeTime: util.Now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkNodeReady checks raw node ready condition, without transition timestamp set.
|
||||
func (s *NodeController) checkNodeReady(node *api.Node) *api.NodeCondition {
|
||||
switch status, err := s.kubeletClient.HealthCheck(node.Name); {
|
||||
|
@ -550,13 +550,15 @@ func TestSyncCloudDeletePods(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckNode(t *testing.T) {
|
||||
func TestNodeConditionsCheck(t *testing.T) {
|
||||
table := []struct {
|
||||
node *api.Node
|
||||
fakeKubeletClient *FakeKubeletClient
|
||||
expectedConditions []api.NodeCondition
|
||||
}{
|
||||
{
|
||||
// Node with default spec and kubelet /healthz probe returns success.
|
||||
// Expected node condition to be ready and marked schedulable.
|
||||
node: newNode("node0"),
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Success,
|
||||
@ -568,10 +570,17 @@ func TestHealthCheckNode(t *testing.T) {
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
node: newNode("node0"),
|
||||
// User specified node as schedulable and kubelet /healthz probe returns failure with no error.
|
||||
// Expected node condition to be not ready and marked schedulable.
|
||||
node: &api.Node{ObjectMeta: api.ObjectMeta{Name: "node0"}, Spec: api.NodeSpec{Unschedulable: false}},
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Failure,
|
||||
Err: nil,
|
||||
@ -582,10 +591,17 @@ func TestHealthCheckNode(t *testing.T) {
|
||||
Status: api.ConditionNone,
|
||||
Reason: "Node health check failed: kubelet /healthz endpoint returns not ok",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
node: newNode("node0"),
|
||||
// User specified node as unschedulable and kubelet /healthz probe returns failure with some error.
|
||||
// Expected node condition to be not ready and marked unschedulable.
|
||||
node: &api.Node{ObjectMeta: api.ObjectMeta{Name: "node0"}, Spec: api.NodeSpec{Unschedulable: true}},
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Failure,
|
||||
Err: errors.New("Error"),
|
||||
@ -596,6 +612,11 @@ func TestHealthCheckNode(t *testing.T) {
|
||||
Status: api.ConditionUnknown,
|
||||
Reason: "Node health check error: Error",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionNone,
|
||||
Reason: "User marked unschedulable during node create/update",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -663,11 +684,13 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
|
||||
expectedTransitionTimeChange bool
|
||||
}{
|
||||
{
|
||||
// Existing node is healthy, current porbe is healthy too.
|
||||
// Existing node is healthy, current probe is healthy too.
|
||||
// Existing node is schedulable, again explicitly mark node as schedulable.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Spec: api.NodeSpec{Unschedulable: false},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
@ -676,6 +699,12 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -689,11 +718,13 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
|
||||
expectedTransitionTimeChange: false,
|
||||
},
|
||||
{
|
||||
// Existing node is healthy, current porbe is unhealthy.
|
||||
// Existing node is healthy, current probe is unhealthy.
|
||||
// Existing node is schedulable, mark node as unschedulable.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Spec: api.NodeSpec{Unschedulable: true},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
@ -702,6 +733,12 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -865,7 +902,7 @@ func TestSyncNodeStatusDeletePods(t *testing.T) {
|
||||
expectedActions []client.FakeAction
|
||||
}{
|
||||
{
|
||||
// Existing node is healthy, current porbe is healthy too.
|
||||
// Existing node is healthy, current probe is healthy too.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
@ -894,7 +931,7 @@ func TestSyncNodeStatusDeletePods(t *testing.T) {
|
||||
expectedActions: nil,
|
||||
},
|
||||
{
|
||||
// Existing node is healthy, current porbe is unhealthy, i.e. node just becomes unhealthy.
|
||||
// Existing node is healthy, current probe is unhealthy, i.e. node just becomes unhealthy.
|
||||
// Do not delete pods.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
@ -924,7 +961,7 @@ func TestSyncNodeStatusDeletePods(t *testing.T) {
|
||||
expectedActions: nil,
|
||||
},
|
||||
{
|
||||
// Existing node unhealthy, current porbe is unhealthy. Node is still within grace peroid.
|
||||
// Existing node unhealthy, current probe is unhealthy. Node is still within grace peroid.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
@ -956,7 +993,7 @@ func TestSyncNodeStatusDeletePods(t *testing.T) {
|
||||
expectedActions: nil,
|
||||
},
|
||||
{
|
||||
// Existing node unhealthy, current porbe is unhealthy. Node exceeds grace peroid.
|
||||
// Existing node unhealthy, current probe is unhealthy. Node exceeds grace peroid.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
@ -1036,6 +1073,11 @@ func TestSyncNodeStatus(t *testing.T) {
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
},
|
||||
},
|
||||
Addresses: []api.NodeAddress{
|
||||
{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"},
|
||||
@ -1051,6 +1093,11 @@ func TestSyncNodeStatus(t *testing.T) {
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node health check succeeded: kubelet /healthz endpoint returns ok",
|
||||
},
|
||||
{
|
||||
Type: api.NodeSchedulable,
|
||||
Status: api.ConditionFull,
|
||||
Reason: "Node is schedulable by default",
|
||||
},
|
||||
},
|
||||
Addresses: []api.NodeAddress{
|
||||
{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"},
|
||||
|
@ -432,7 +432,7 @@ func printSecretList(list *api.SecretList, w io.Writer) error {
|
||||
|
||||
func printNode(node *api.Node, w io.Writer) error {
|
||||
conditionMap := make(map[api.NodeConditionType]*api.NodeCondition)
|
||||
NodeAllConditions := []api.NodeConditionType{api.NodeReady, api.NodeReachable}
|
||||
NodeAllConditions := []api.NodeConditionType{api.NodeSchedulable, api.NodeReady, api.NodeReachable}
|
||||
for i := range node.Status.Conditions {
|
||||
cond := node.Status.Conditions[i]
|
||||
conditionMap[cond.Type] = &cond
|
||||
|
@ -567,6 +567,26 @@ func TestPrintMinionStatus(t *testing.T) {
|
||||
},
|
||||
status: "Unknown",
|
||||
},
|
||||
{
|
||||
minion: api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo7"},
|
||||
Status: api.NodeStatus{Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeSchedulable, Status: api.ConditionFull},
|
||||
{Type: api.NodeReady, Status: api.ConditionFull},
|
||||
{Type: api.NodeReachable, Status: api.ConditionFull}}},
|
||||
},
|
||||
status: "Schedulable,Ready,Reachable",
|
||||
},
|
||||
{
|
||||
minion: api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo8"},
|
||||
Status: api.NodeStatus{Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeSchedulable, Status: api.ConditionNone},
|
||||
{Type: api.NodeReady, Status: api.ConditionNone},
|
||||
{Type: api.NodeReachable, Status: api.ConditionFull}}},
|
||||
},
|
||||
status: "NotSchedulable,NotReady,Reachable",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
|
@ -46,7 +46,6 @@ func NewREST(m Registry) *REST {
|
||||
}
|
||||
|
||||
var ErrDoesNotExist = errors.New("The requested resource does not exist.")
|
||||
var ErrNotHealty = errors.New("The requested minion is not healthy.")
|
||||
|
||||
// Create satisfies the RESTStorage interface.
|
||||
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
|
||||
|
@ -207,6 +207,11 @@ func (factory *ConfigFactory) pollMinions() (cache.Enumerator, error) {
|
||||
cond := node.Status.Conditions[i]
|
||||
conditionMap[cond.Type] = &cond
|
||||
}
|
||||
if condition, ok := conditionMap[api.NodeSchedulable]; ok {
|
||||
if condition.Status != api.ConditionFull {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if condition, ok := conditionMap[api.NodeReady]; ok {
|
||||
if condition.Status == api.ConditionFull {
|
||||
nodes.Items = append(nodes.Items, node)
|
||||
|
@ -156,7 +156,15 @@ func TestPollMinions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "baz"},
|
||||
ObjectMeta: api.ObjectMeta{Name: "fiz"},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeSchedulable, Status: api.ConditionFull},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "biz"},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionFull},
|
||||
@ -173,8 +181,47 @@ func TestPollMinions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "fuz"},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeSchedulable, Status: api.ConditionFull},
|
||||
{Type: api.NodeReady, Status: api.ConditionFull},
|
||||
{Type: api.NodeReachable, Status: api.ConditionFull},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "buz"},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeSchedulable, Status: api.ConditionNone},
|
||||
{Type: api.NodeReady, Status: api.ConditionFull},
|
||||
{Type: api.NodeReachable, Status: api.ConditionFull},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foobar"},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeSchedulable, Status: api.ConditionFull},
|
||||
{Type: api.NodeReady, Status: api.ConditionNone},
|
||||
{Type: api.NodeReachable, Status: api.ConditionFull},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "fizbiz"},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeSchedulable, Status: api.ConditionFull},
|
||||
{Type: api.NodeReachable, Status: api.ConditionNone},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCount: 4,
|
||||
expectedCount: 6,
|
||||
},
|
||||
{
|
||||
minions: []api.Node{
|
||||
@ -197,6 +244,27 @@ func TestPollMinions(t *testing.T) {
|
||||
},
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
minions: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo"},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeSchedulable, Status: api.ConditionFull},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bar"},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeSchedulable, Status: api.ConditionNone},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
minions: []api.Node{
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user