From 772bf8e14d9046371528c52d837fe42ecbe2b917 Mon Sep 17 00:00:00 2001 From: Random-Liu Date: Wed, 2 Nov 2016 18:23:57 -0700 Subject: [PATCH] Populate NetworkReady Status. --- pkg/kubelet/dockershim/docker_service.go | 8 +++- pkg/kubelet/dockershim/docker_service_test.go | 12 ++++++ pkg/kubelet/kubelet.go | 18 ++++++--- pkg/kubelet/kubelet_network.go | 17 ++------ pkg/kubelet/kubelet_node_status_test.go | 39 ++++++++++++------- 5 files changed, 59 insertions(+), 35 deletions(-) diff --git a/pkg/kubelet/dockershim/docker_service.go b/pkg/kubelet/dockershim/docker_service.go index 6e61573fb6d..5ffe8829312 100644 --- a/pkg/kubelet/dockershim/docker_service.go +++ b/pkg/kubelet/dockershim/docker_service.go @@ -237,11 +237,15 @@ func (ds *dockerService) Status() (*runtimeApi.RuntimeStatus, error) { Status: proto.Bool(true), } conditions := []*runtimeApi.RuntimeCondition{runtimeReady, networkReady} - _, err := ds.client.Version() - if err != nil { + if _, err := ds.client.Version(); err != nil { runtimeReady.Status = proto.Bool(false) runtimeReady.Reason = proto.String("DockerDaemonNotReady") runtimeReady.Message = proto.String(fmt.Sprintf("docker: failed to get docker version: %v", err)) } + if err := ds.networkPlugin.Status(); err != nil { + networkReady.Status = proto.Bool(false) + networkReady.Reason = proto.String("NetworkPluginNotReady") + networkReady.Message = proto.String(fmt.Sprintf("docker: network plugin is not ready: %v", err)) + } return &runtimeApi.RuntimeStatus{Conditions: conditions}, nil } diff --git a/pkg/kubelet/dockershim/docker_service_test.go b/pkg/kubelet/dockershim/docker_service_test.go index ac77b2d93eb..c3a773c1376 100644 --- a/pkg/kubelet/dockershim/docker_service_test.go +++ b/pkg/kubelet/dockershim/docker_service_test.go @@ -76,4 +76,16 @@ func TestStatus(t *testing.T) { runtimeApi.RuntimeReady: false, runtimeApi.NetworkReady: true, }, status) + + // Should not report ready status is network plugin returns error. + mockPlugin := newTestNetworkPlugin(t) + ds.networkPlugin = mockPlugin + defer mockPlugin.Finish() + mockPlugin.EXPECT().Status().Return(errors.New("network error")) + status, err = ds.Status() + assert.NoError(t, err) + assertStatus(map[string]bool{ + runtimeApi.RuntimeReady: true, + runtimeApi.NetworkReady: false, + }, status) } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index fa8a612db08..48f8edabef4 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1951,21 +1951,27 @@ func (kl *Kubelet) updateRuntimeUp() { } // Only check specific conditions when runtime integration type is cri, // because the old integration doesn't populate any runtime condition. - // TODO(random-liu): Add runtime error in runtimeState, and update it - // when when runtime is not ready, so that the information in RuntimeReady - // condition will be propagated to NodeReady condition. - // TODO(random-liu): Update network state according to NetworkReady runtime - // condition once it is implemented in dockershim. if kl.kubeletConfiguration.ExperimentalRuntimeIntegrationType == "cri" { if s == nil { glog.Errorf("Container runtime status is nil") return } - runtimeReady := s.GetRuntimeCondition(kubecontainer.RuntimeReady) // Periodically log the whole runtime status for debugging. // TODO(random-liu): Consider to send node event when optional // condition is unmet. glog.V(4).Infof("Container runtime status: %v", s) + networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady) + if networkReady == nil || !networkReady.Status { + glog.Errorf("Container runtime network not ready: %v", networkReady) + kl.runtimeState.setNetworkState(fmt.Errorf("runtime network not ready: %v", networkReady)) + } else { + // Set nil if the containe runtime network is ready. + kl.runtimeState.setNetworkState(nil) + } + // TODO(random-liu): Add runtime error in runtimeState, and update it + // when runtime is not ready, so that the information in RuntimeReady + // condition will be propagated to NodeReady condition. + runtimeReady := s.GetRuntimeCondition(kubecontainer.RuntimeReady) // If RuntimeReady is not set or is false, report an error. if runtimeReady == nil || !runtimeReady.Status { glog.Errorf("Container runtime not ready: %v", runtimeReady) diff --git a/pkg/kubelet/kubelet_network.go b/pkg/kubelet/kubelet_network.go index af6cb86940a..8267560841b 100644 --- a/pkg/kubelet/kubelet_network.go +++ b/pkg/kubelet/kubelet_network.go @@ -191,19 +191,11 @@ func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error { // syncNetworkStatus updates the network state func (kl *Kubelet) syncNetworkStatus() { - // TODO(#35701): cri shim handles network plugin but we currently - // don't have a cri status hook, so network plugin status isn't - // reported if --experimental-runtime-integration=cri. This isn't - // too bad, because kubenet is the only network plugin that - // implements status(), and it just checks for plugin binaries - // on the filesystem. + // For cri integration, network state will be updated in updateRuntimeUp, + // we'll get runtime network status through cri directly. + // TODO: Remove this once we completely switch to cri integration. if kl.networkPlugin != nil { kl.runtimeState.setNetworkState(kl.networkPlugin.Status()) - } else if kl.runtimeState.podCIDR() != "" { - // Don't mark the node ready till we've successfully executed - // the first UpdatePodCIDR call through cri. See comment above - // setPodCIDR call. - kl.runtimeState.setNetworkState(nil) } } @@ -231,9 +223,6 @@ func (kl *Kubelet) updatePodCIDR(cidr string) { return } - // We need to be careful about setting podCIDR. Till #35839 lands we're - // using it to indicate network plugin status for cri shims. See comment - // in syncNetworkStatus. glog.Infof("Setting Pod CIDR: %v -> %v", podCIDR, cidr) kl.runtimeState.setPodCIDR(cidr) } diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index c32f0109b51..64caeb5c62c 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -794,7 +794,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { }, } - checkNodeStatus := func(status api.ConditionStatus, reason, message string) { + checkNodeStatus := func(status api.ConditionStatus, reason string) { kubeClient.ClearActions() if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) @@ -827,11 +827,14 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { if updatedNode.Status.Conditions[lastIndex].Type != api.NodeReady { t.Errorf("unexpected node condition order. NodeReady should be last.") } + if updatedNode.Status.Conditions[lastIndex].Message == "" { + t.Errorf("unexpected empty condition message") + } + updatedNode.Status.Conditions[lastIndex].Message = "" expectedNode.Status.Conditions[lastIndex] = api.NodeCondition{ Type: api.NodeReady, Status: status, Reason: reason, - Message: message, LastHeartbeatTime: unversioned.Time{}, LastTransitionTime: unversioned.Time{}, } @@ -840,23 +843,21 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { } } - readyMessage := "kubelet is posting ready status" - downMessage := "container runtime is down" - + // TODO(random-liu): Refactor the unit test to be table driven test. // Should report kubelet not ready if the runtime check is out of date clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage) + checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report kubelet ready if the runtime check is updated clock.SetTime(time.Now()) kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionTrue, "KubeletReady", readyMessage) + checkNodeStatus(api.ConditionTrue, "KubeletReady") // Should report kubelet not ready if the runtime check is out of date clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage) + checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report kubelet not ready if the runtime check failed fakeRuntime := testKubelet.fakeRuntime @@ -864,7 +865,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error") clock.SetTime(time.Now()) kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage) + checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Test cri integration. kubelet.kubeletConfiguration.ExperimentalRuntimeIntegrationType = "cri" @@ -873,30 +874,42 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { // Should report node not ready if runtime status is nil. fakeRuntime.RuntimeStatus = nil kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage) + checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report node not ready if runtime status is empty. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{} kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage) + checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report node not ready if RuntimeReady is false. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ Conditions: []kubecontainer.RuntimeCondition{ {Type: kubecontainer.RuntimeReady, Status: false}, + {Type: kubecontainer.NetworkReady, Status: true}, }, } kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage) + checkNodeStatus(api.ConditionFalse, "KubeletNotReady") // Should report node ready if RuntimeReady is true. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ Conditions: []kubecontainer.RuntimeCondition{ {Type: kubecontainer.RuntimeReady, Status: true}, + {Type: kubecontainer.NetworkReady, Status: true}, }, } kubelet.updateRuntimeUp() - checkNodeStatus(api.ConditionTrue, "KubeletReady", readyMessage) + checkNodeStatus(api.ConditionTrue, "KubeletReady") + + // Should report node not ready if NetworkReady is false. + fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{ + Conditions: []kubecontainer.RuntimeCondition{ + {Type: kubecontainer.RuntimeReady, Status: true}, + {Type: kubecontainer.NetworkReady, Status: false}, + }, + } + kubelet.updateRuntimeUp() + checkNodeStatus(api.ConditionFalse, "KubeletNotReady") } func TestUpdateNodeStatusError(t *testing.T) {