diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 7723abc64b2..46bd7c3579f 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -732,10 +732,6 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) er if kl.cloud != nil { nodeAddressesFunc = kl.cloudResourceSyncManager.NodeAddresses } - var validateHostFunc func() error - if kl.appArmorValidator != nil { - validateHostFunc = kl.appArmorValidator.ValidateHost - } var setters []func(ctx context.Context, n *v1.Node) error setters = append(setters, nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc), @@ -754,7 +750,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) er nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent), nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent), nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, kl.runtimeState.storageErrors, - validateHostFunc, kl.containerManager.Status, kl.shutdownManager.ShutdownStatus, kl.recordNodeStatusEvent, kl.supportLocalStorageCapacityIsolation()), + kl.containerManager.Status, kl.shutdownManager.ShutdownStatus, kl.recordNodeStatusEvent, kl.supportLocalStorageCapacityIsolation()), nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse), // TODO(mtaufen): I decided not to move this setter for now, since all it does is send an event // and record state back to the Kubelet runtime object. In the future, I'd like to isolate diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index b539cbda86b..58a5eb3c1a8 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -486,7 +486,6 @@ func ReadyCondition( runtimeErrorsFunc func() error, // typically Kubelet.runtimeState.runtimeErrors networkErrorsFunc func() error, // typically Kubelet.runtimeState.networkErrors storageErrorsFunc func() error, // typically Kubelet.runtimeState.storageErrors - appArmorValidateHostFunc func() error, // typically Kubelet.appArmorValidator.ValidateHost, might be nil depending on whether there was an appArmorValidator cmStatusFunc func() cm.Status, // typically Kubelet.containerManager.Status nodeShutdownManagerErrorsFunc func() error, // typically kubelet.shutdownManager.errors. recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent @@ -527,13 +526,6 @@ func ReadyCondition( LastHeartbeatTime: currentTime, } } - // Append AppArmor status if it's enabled. - // TODO(tallclair): This is a temporary message until node feature reporting is added. - if appArmorValidateHostFunc != nil && newNodeReadyCondition.Status == v1.ConditionTrue { - if err := appArmorValidateHostFunc(); err == nil { - newNodeReadyCondition.Message = fmt.Sprintf("%s. AppArmor enabled", newNodeReadyCondition.Message) - } - } // Record any soft requirements that were not met in the container manager. status := cmStatusFunc() diff --git a/pkg/kubelet/nodestatus/setters_test.go b/pkg/kubelet/nodestatus/setters_test.go index 61a42feaa96..d3874bfee9c 100644 --- a/pkg/kubelet/nodestatus/setters_test.go +++ b/pkg/kubelet/nodestatus/setters_test.go @@ -1509,7 +1509,6 @@ func TestReadyCondition(t *testing.T) { runtimeErrors error networkErrors error storageErrors error - appArmorValidateHostFunc func() error cmStatus cm.Status nodeShutdownManagerErrors error expectConditions []v1.NodeCondition @@ -1524,19 +1523,6 @@ func TestReadyCondition(t *testing.T) { // the reason for this is unclear, so we may want to actually send an event, and change these test cases // to ensure an event is sent. }, - { - desc: "new, ready: apparmor validator passed", - node: withCapacity.DeepCopy(), - appArmorValidateHostFunc: func() error { return nil }, - expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status. AppArmor enabled", now, now)}, - }, - { - desc: "new, ready: apparmor validator failed", - node: withCapacity.DeepCopy(), - appArmorValidateHostFunc: func() error { return fmt.Errorf("foo") }, - // absence of an additional message is understood to mean that AppArmor is disabled - expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status", now, now)}, - }, { desc: "new, ready: soft requirement warning", node: withCapacity.DeepCopy(), @@ -1655,7 +1641,7 @@ func TestReadyCondition(t *testing.T) { }) } // construct setter - setter := ReadyCondition(nowFunc, runtimeErrorsFunc, networkErrorsFunc, storageErrorsFunc, tc.appArmorValidateHostFunc, cmStatusFunc, nodeShutdownErrorsFunc, recordEventFunc, !tc.disableLocalStorageCapacityIsolation) + setter := ReadyCondition(nowFunc, runtimeErrorsFunc, networkErrorsFunc, storageErrorsFunc, cmStatusFunc, nodeShutdownErrorsFunc, recordEventFunc, !tc.disableLocalStorageCapacityIsolation) // call setter on node if err := setter(ctx, tc.node); err != nil { t.Fatalf("unexpected error: %v", err) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/managedfields/node.yaml b/staging/src/k8s.io/apimachinery/pkg/util/managedfields/node.yaml index 66e849f23fb..a7f2d54fdf7 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/managedfields/node.yaml +++ b/staging/src/k8s.io/apimachinery/pkg/util/managedfields/node.yaml @@ -120,7 +120,7 @@ status: type: PIDPressure - lastHeartbeatTime: "2019-09-20T19:32:50Z" lastTransitionTime: "2019-07-09T16:17:49Z" - message: kubelet is posting ready status. AppArmor enabled + message: kubelet is posting ready status reason: KubeletReady status: "True" type: Ready diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml index 66e849f23fb..a7f2d54fdf7 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml @@ -120,7 +120,7 @@ status: type: PIDPressure - lastHeartbeatTime: "2019-09-20T19:32:50Z" lastTransitionTime: "2019-07-09T16:17:49Z" - message: kubelet is posting ready status. AppArmor enabled + message: kubelet is posting ready status reason: KubeletReady status: "True" type: Ready