clean up redundant conditiontype OutOfDisk

This commit is contained in:
Pingan2017 2018-12-29 15:31:56 +08:00
parent a33840e023
commit e94d7b3802
13 changed files with 12 additions and 82 deletions

View File

@ -3737,9 +3737,6 @@ type NodeConditionType string
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.

View File

@ -146,7 +146,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
}
// When node has memory pressure and TaintNodesByCondition is enabled, check BestEffort Pod's toleration:
// admit it if tolerates memory pressure taint, fail for other tolerations, e.g. OutOfDisk.
// admit it if tolerates memory pressure taint, fail for other tolerations, e.g. DiskPressure.
if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) &&
v1helper.TolerationsTolerateTaint(attrs.Pod.Spec.Tolerations, &v1.Taint{
Key: schedulerapi.TaintNodeMemoryPressure,

View File

@ -550,7 +550,6 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, kl.runtimeState.storageErrors, validateHostFunc, kl.containerManager.Status, kl.recordNodeStatusEvent),
nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse),
nodestatus.RemoveOutOfDiskCondition(),
// TODO(mtaufen): I decided not to move this setter for now, since all it does is send an event
// and record state back to the Kubelet runtime object. In the future, I'd like to isolate
// these side-effects by decoupling the decisions to send events and partial status recording

View File

@ -747,18 +747,3 @@ func VolumeLimits(volumePluginListFunc func() []volume.VolumePluginWithAttachLim
return nil
}
}
// RemoveOutOfDiskCondition removes stale OutOfDisk condition
// OutOfDisk condition has been removed from kubelet in 1.12
func RemoveOutOfDiskCondition() Setter {
return func(node *v1.Node) error {
var conditions []v1.NodeCondition
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type != v1.NodeOutOfDisk {
conditions = append(conditions, node.Status.Conditions[i])
}
}
node.Status.Conditions = conditions
return nil
}
}

View File

@ -1518,54 +1518,6 @@ func TestVolumeLimits(t *testing.T) {
}
}
func TestRemoveOutOfDiskCondition(t *testing.T) {
now := time.Now()
var cases = []struct {
desc string
inputNode *v1.Node
expectNode *v1.Node
}{
{
desc: "should remove stale OutOfDiskCondition from node status",
inputNode: &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
*makeMemoryPressureCondition(false, now, now),
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionFalse,
},
*makeDiskPressureCondition(false, now, now),
},
},
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
*makeMemoryPressureCondition(false, now, now),
*makeDiskPressureCondition(false, now, now),
},
},
},
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
// construct setter
setter := RemoveOutOfDiskCondition()
// call setter on node
if err := setter(tc.inputNode); err != nil {
t.Fatalf("unexpected error: %v", err)
}
// check expected node
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectNode, tc.inputNode),
"Diff: %s", diff.ObjectDiff(tc.expectNode, tc.inputNode))
})
}
}
// Test Helpers:
// sortableNodeAddress is a type for sorting []v1.NodeAddress

View File

@ -227,14 +227,14 @@ func TestNodeConditionsChanged(t *testing.T) {
{
Name: "no condition changed",
Changed: false,
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
OldConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}},
NewConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}},
},
{
Name: "only LastHeartbeatTime changed",
Changed: false,
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(1, 0)}},
NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(2, 0)}},
OldConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(1, 0)}},
NewConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(2, 0)}},
},
{
Name: "new node has more healthy conditions",
@ -245,7 +245,7 @@ func TestNodeConditionsChanged(t *testing.T) {
{
Name: "new node has less unhealthy conditions",
Changed: true,
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
OldConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}},
NewConditions: []v1.NodeCondition{},
},
{

View File

@ -4205,9 +4205,6 @@ type NodeConditionType string
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.

View File

@ -55,7 +55,7 @@ type CarpStatus struct {
// A human readable message indicating details about why the carp is in this state.
// +optional
Message string
// A brief CamelCase message indicating details about why the carp is in this state. e.g. 'OutOfDisk'
// A brief CamelCase message indicating details about why the carp is in this state. e.g. 'DiskPressure'
// +optional
Reason string

View File

@ -190,7 +190,7 @@ message CarpStatus {
optional string message = 3;
// A brief CamelCase message indicating details about why the carp is in this state.
// e.g. 'OutOfDisk'
// e.g. 'DiskPressure'
// +optional
optional string reason = 4;

View File

@ -66,7 +66,7 @@ type CarpStatus struct {
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the carp is in this state.
// e.g. 'OutOfDisk'
// e.g. 'DiskPressure'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`

View File

@ -55,7 +55,7 @@ type PodStatus struct {
// A human readable message indicating details about why the pod is in this state.
// +optional
Message string
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'DiskPressure'
// +optional
Reason string

View File

@ -190,7 +190,7 @@ message PodStatus {
optional string message = 3;
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'OutOfDisk'
// e.g. 'DiskPressure'
// +optional
optional string reason = 4;

View File

@ -66,7 +66,7 @@ type PodStatus struct {
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'OutOfDisk'
// e.g. 'DiskPressure'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`