mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-14 14:23:37 +00:00
Stop appending AppArmor status to node ready condition
This commit is contained in:
parent
207a965b3f
commit
24537a9131
@ -732,10 +732,6 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) er
|
||||
if kl.cloud != nil {
|
||||
nodeAddressesFunc = kl.cloudResourceSyncManager.NodeAddresses
|
||||
}
|
||||
var validateHostFunc func() error
|
||||
if kl.appArmorValidator != nil {
|
||||
validateHostFunc = kl.appArmorValidator.ValidateHost
|
||||
}
|
||||
var setters []func(ctx context.Context, n *v1.Node) error
|
||||
setters = append(setters,
|
||||
nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc),
|
||||
@ -754,7 +750,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) er
|
||||
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
||||
nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
|
||||
nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, kl.runtimeState.storageErrors,
|
||||
validateHostFunc, kl.containerManager.Status, kl.shutdownManager.ShutdownStatus, kl.recordNodeStatusEvent, kl.supportLocalStorageCapacityIsolation()),
|
||||
kl.containerManager.Status, kl.shutdownManager.ShutdownStatus, kl.recordNodeStatusEvent, kl.supportLocalStorageCapacityIsolation()),
|
||||
nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse),
|
||||
// TODO(mtaufen): I decided not to move this setter for now, since all it does is send an event
|
||||
// and record state back to the Kubelet runtime object. In the future, I'd like to isolate
|
||||
|
@ -486,7 +486,6 @@ func ReadyCondition(
|
||||
runtimeErrorsFunc func() error, // typically Kubelet.runtimeState.runtimeErrors
|
||||
networkErrorsFunc func() error, // typically Kubelet.runtimeState.networkErrors
|
||||
storageErrorsFunc func() error, // typically Kubelet.runtimeState.storageErrors
|
||||
appArmorValidateHostFunc func() error, // typically Kubelet.appArmorValidator.ValidateHost, might be nil depending on whether there was an appArmorValidator
|
||||
cmStatusFunc func() cm.Status, // typically Kubelet.containerManager.Status
|
||||
nodeShutdownManagerErrorsFunc func() error, // typically kubelet.shutdownManager.errors.
|
||||
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
|
||||
@ -527,13 +526,6 @@ func ReadyCondition(
|
||||
LastHeartbeatTime: currentTime,
|
||||
}
|
||||
}
|
||||
// Append AppArmor status if it's enabled.
|
||||
// TODO(tallclair): This is a temporary message until node feature reporting is added.
|
||||
if appArmorValidateHostFunc != nil && newNodeReadyCondition.Status == v1.ConditionTrue {
|
||||
if err := appArmorValidateHostFunc(); err == nil {
|
||||
newNodeReadyCondition.Message = fmt.Sprintf("%s. AppArmor enabled", newNodeReadyCondition.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// Record any soft requirements that were not met in the container manager.
|
||||
status := cmStatusFunc()
|
||||
|
@ -1509,7 +1509,6 @@ func TestReadyCondition(t *testing.T) {
|
||||
runtimeErrors error
|
||||
networkErrors error
|
||||
storageErrors error
|
||||
appArmorValidateHostFunc func() error
|
||||
cmStatus cm.Status
|
||||
nodeShutdownManagerErrors error
|
||||
expectConditions []v1.NodeCondition
|
||||
@ -1524,19 +1523,6 @@ func TestReadyCondition(t *testing.T) {
|
||||
// the reason for this is unclear, so we may want to actually send an event, and change these test cases
|
||||
// to ensure an event is sent.
|
||||
},
|
||||
{
|
||||
desc: "new, ready: apparmor validator passed",
|
||||
node: withCapacity.DeepCopy(),
|
||||
appArmorValidateHostFunc: func() error { return nil },
|
||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status. AppArmor enabled", now, now)},
|
||||
},
|
||||
{
|
||||
desc: "new, ready: apparmor validator failed",
|
||||
node: withCapacity.DeepCopy(),
|
||||
appArmorValidateHostFunc: func() error { return fmt.Errorf("foo") },
|
||||
// absence of an additional message is understood to mean that AppArmor is disabled
|
||||
expectConditions: []v1.NodeCondition{*makeReadyCondition(true, "kubelet is posting ready status", now, now)},
|
||||
},
|
||||
{
|
||||
desc: "new, ready: soft requirement warning",
|
||||
node: withCapacity.DeepCopy(),
|
||||
@ -1655,7 +1641,7 @@ func TestReadyCondition(t *testing.T) {
|
||||
})
|
||||
}
|
||||
// construct setter
|
||||
setter := ReadyCondition(nowFunc, runtimeErrorsFunc, networkErrorsFunc, storageErrorsFunc, tc.appArmorValidateHostFunc, cmStatusFunc, nodeShutdownErrorsFunc, recordEventFunc, !tc.disableLocalStorageCapacityIsolation)
|
||||
setter := ReadyCondition(nowFunc, runtimeErrorsFunc, networkErrorsFunc, storageErrorsFunc, cmStatusFunc, nodeShutdownErrorsFunc, recordEventFunc, !tc.disableLocalStorageCapacityIsolation)
|
||||
// call setter on node
|
||||
if err := setter(ctx, tc.node); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
|
@ -120,7 +120,7 @@ status:
|
||||
type: PIDPressure
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:49Z"
|
||||
message: kubelet is posting ready status. AppArmor enabled
|
||||
message: kubelet is posting ready status
|
||||
reason: KubeletReady
|
||||
status: "True"
|
||||
type: Ready
|
||||
|
@ -120,7 +120,7 @@ status:
|
||||
type: PIDPressure
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:49Z"
|
||||
message: kubelet is posting ready status. AppArmor enabled
|
||||
message: kubelet is posting ready status
|
||||
reason: KubeletReady
|
||||
status: "True"
|
||||
type: Ready
|
||||
|
Loading…
Reference in New Issue
Block a user