mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Merge pull request #52856 from yastij/nodecontroller-clean-backwards-tests
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. cleaning tests from deprecated usecases due to unsupported version **What this PR does / why we need it**: this part of the #52356 effort **Which issue this PR fixes**: **Special notes for your reviewer**: **Release note**: ```release-note None ```
This commit is contained in:
commit
46c2bfe47d
@ -1771,62 +1771,6 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
},
|
||||
expectedPodStatusUpdate: true,
|
||||
},
|
||||
// Node created long time ago, with outdated kubelet version 1.1.0 and status
|
||||
// updated by kubelet exceeds grace period. Expect no action from node controller.
|
||||
{
|
||||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.1.0",
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
// Node status hasn't been updated for 1hr.
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
},
|
||||
timeToPass: 1 * time.Minute,
|
||||
newNodeStatus: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.1.0",
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
// Node status hasn't been updated for 1hr.
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
expectedPodStatusUpdate: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, item := range table {
|
||||
@ -2247,307 +2191,3 @@ func TestNodeEventGeneration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckPod(t *testing.T) {
|
||||
tcs := []struct {
|
||||
pod v1.Pod
|
||||
prune bool
|
||||
}{
|
||||
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: "new"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: "old"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: ""},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: "nonexistant"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "new"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "old"},
|
||||
},
|
||||
prune: true,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "older"},
|
||||
},
|
||||
prune: true,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "oldest"},
|
||||
},
|
||||
prune: true,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: ""},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "nonexistant"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
}
|
||||
|
||||
nc, _ := newNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false, false)
|
||||
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "new",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.1.0",
|
||||
},
|
||||
},
|
||||
})
|
||||
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "old",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.0.0",
|
||||
},
|
||||
},
|
||||
})
|
||||
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "older",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeletVersion: "v0.21.4",
|
||||
},
|
||||
},
|
||||
})
|
||||
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "oldest",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeletVersion: "v0.19.3",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
for i, tc := range tcs {
|
||||
var deleteCalls int
|
||||
nc.forcefullyDeletePod = func(_ *v1.Pod) error {
|
||||
deleteCalls++
|
||||
return nil
|
||||
}
|
||||
|
||||
nc.maybeDeleteTerminatingPod(&tc.pod)
|
||||
|
||||
if tc.prune && deleteCalls != 1 {
|
||||
t.Errorf("[%v] expected number of delete calls to be 1 but got %v", i, deleteCalls)
|
||||
}
|
||||
if !tc.prune && deleteCalls != 0 {
|
||||
t.Errorf("[%v] expected number of delete calls to be 0 but got %v", i, deleteCalls)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckNodeKubeletVersionParsing(t *testing.T) {
|
||||
tests := []struct {
|
||||
version string
|
||||
outdated bool
|
||||
}{
|
||||
{
|
||||
version: "",
|
||||
outdated: true,
|
||||
},
|
||||
{
|
||||
version: "v0.21.4",
|
||||
outdated: true,
|
||||
},
|
||||
{
|
||||
version: "v1.0.0",
|
||||
outdated: true,
|
||||
},
|
||||
{
|
||||
version: "v1.1.0",
|
||||
outdated: true,
|
||||
},
|
||||
{
|
||||
version: "v1.1.0-alpha.2.961+9d4c6846fc03b9-dirty",
|
||||
outdated: true,
|
||||
},
|
||||
{
|
||||
version: "v1.2.0",
|
||||
outdated: false,
|
||||
},
|
||||
{
|
||||
version: "v1.3.3",
|
||||
outdated: false,
|
||||
},
|
||||
{
|
||||
version: "v1.4.0-alpha.2.961+9d4c6846fc03b9-dirty",
|
||||
outdated: false,
|
||||
},
|
||||
{
|
||||
version: "v2.0.0",
|
||||
outdated: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ov := range tests {
|
||||
n := &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeletVersion: ov.version,
|
||||
},
|
||||
},
|
||||
}
|
||||
isOutdated := util.NodeRunningOutdatedKubelet(n)
|
||||
if ov.outdated != isOutdated {
|
||||
t.Errorf("Version %v doesn't match test expectation. Expected outdated %v got %v", n.Status.NodeInfo.KubeletVersion, ov.outdated, isOutdated)
|
||||
} else {
|
||||
t.Logf("Version %v outdated %v", ov.version, isOutdated)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFixDeprecatedTaintKey(t *testing.T) {
|
||||
fakeNow := metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||
evictionTimeout := 10 * time.Minute
|
||||
|
||||
fakeNodeHandler := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
}
|
||||
|
||||
nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler, evictionTimeout,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
|
||||
deprecatedTaint := &v1.Taint{
|
||||
Key: algorithm.DeprecatedTaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
nodeNotReadyTaint := &v1.Taint{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
Node *v1.Node
|
||||
ExpectedTaints []*v1.Taint
|
||||
}{
|
||||
{
|
||||
Name: "Node with deprecated taint key",
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
*deprecatedTaint,
|
||||
},
|
||||
},
|
||||
},
|
||||
ExpectedTaints: []*v1.Taint{nodeNotReadyTaint},
|
||||
},
|
||||
{
|
||||
Name: "Node with not-ready taint key",
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelZoneRegion: "region1",
|
||||
kubeletapis.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
*nodeNotReadyTaint,
|
||||
},
|
||||
},
|
||||
},
|
||||
ExpectedTaints: []*v1.Taint{nodeNotReadyTaint},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fakeNodeHandler.Update(test.Node)
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
nodeController.doFixDeprecatedTaintKeyPass(test.Node)
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
node0, err := nodeController.nodeLister.Get("node0")
|
||||
if err != nil {
|
||||
t.Errorf("Can't get current node0...")
|
||||
return
|
||||
}
|
||||
if len(node0.Spec.Taints) != len(test.ExpectedTaints) {
|
||||
t.Errorf("%s: Unexpected number of taints: expected %d, got %d",
|
||||
test.Name, len(test.ExpectedTaints), len(node0.Spec.Taints))
|
||||
}
|
||||
for _, taint := range test.ExpectedTaints {
|
||||
if !taintutils.TaintExists(node0.Spec.Taints, taint) {
|
||||
t.Errorf("%s: Can't find taint %v in %v", test.Name, taint, node0.Spec.Taints)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user