mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Merge pull request #52063 from mtaufen/dkcfg-e2enode
Automatic merge from submit-queue (batch tested with PRs 52047, 52063, 51528) Improve dynamic kubelet config e2e node test and fix bugs Rather than just changing the config once to see if dynamic kubelet config at-least-sort-of-works, this extends the test to check that the Kubelet reports the expected Node condition and the expected configuration values after several possible state transitions. Additionally, this adds a stress test that changes the configuration 100 times. It is possible for resource leaks across Kubelet restarts to eventually prevent the Kubelet from restarting. For example, this test revealed that cAdvisor's leaking journalctl processes (see: https://github.com/google/cadvisor/issues/1725) could break dynamic kubelet config. This test will help reveal these problems earlier. This commit also makes better use of const strings and fixes a few bugs that the new testing turned up. Related issue: #50217 I had been sitting on this until the cAdvisor fix merged in #51751, as these tests fail without that fix. **Release note**: ```release-note NONE ```
This commit is contained in:
commit
d6df4a5127
@ -3275,6 +3275,8 @@ const (
|
||||
NodeDiskPressure NodeConditionType = "DiskPressure"
|
||||
// NodeNetworkUnavailable means that network for the node is not correctly configured.
|
||||
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
|
||||
// NodeConfigOK indicates whether the kubelet is correctly configured
|
||||
NodeConfigOK NodeConditionType = "ConfigOK"
|
||||
)
|
||||
|
||||
type NodeCondition struct {
|
||||
|
@ -41,6 +41,7 @@ go_library(
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/status:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/util/codec:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/util/log:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
|
||||
utilcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
||||
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
|
||||
)
|
||||
@ -50,8 +51,7 @@ type RemoteConfigSource interface {
|
||||
func NewRemoteConfigSource(source *apiv1.NodeConfigSource) (RemoteConfigSource, string, error) {
|
||||
// exactly one subfield of the config source must be non-nil, toady ConfigMapRef is the only reference
|
||||
if source.ConfigMapRef == nil {
|
||||
reason := "invalid NodeConfigSource, exactly one subfield must be non-nil, but all were nil"
|
||||
return nil, reason, fmt.Errorf("%s, NodeConfigSource was: %#v", reason, source)
|
||||
return nil, status.FailSyncReasonAllNilSubfields, fmt.Errorf("%s, NodeConfigSource was: %#v", status.FailSyncReasonAllNilSubfields, source)
|
||||
}
|
||||
|
||||
// validate the NodeConfigSource:
|
||||
@ -61,8 +61,7 @@ func NewRemoteConfigSource(source *apiv1.NodeConfigSource) (RemoteConfigSource,
|
||||
|
||||
// name, namespace, and UID must all be non-empty for ConfigMapRef
|
||||
if ref.Name == "" || ref.Namespace == "" || string(ref.UID) == "" {
|
||||
reason := "invalid ObjectReference, all of UID, Name, and Namespace must be specified"
|
||||
return nil, reason, fmt.Errorf("%s, ObjectReference was: %#v", reason, ref)
|
||||
return nil, status.FailSyncReasonPartialObjectReference, fmt.Errorf("%s, ObjectReference was: %#v", status.FailSyncReasonPartialObjectReference, ref)
|
||||
}
|
||||
|
||||
return &remoteConfigMap{source}, "", nil
|
||||
@ -120,13 +119,13 @@ func (r *remoteConfigMap) Download(client clientset.Interface) (Checkpoint, stri
|
||||
// get the ConfigMap via namespace/name, there doesn't seem to be a way to get it by UID
|
||||
cm, err := client.CoreV1().ConfigMaps(r.source.ConfigMapRef.Namespace).Get(r.source.ConfigMapRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
reason = fmt.Sprintf("could not download ConfigMap with name %q from namespace %q", r.source.ConfigMapRef.Name, r.source.ConfigMapRef.Namespace)
|
||||
reason = fmt.Sprintf(status.FailSyncReasonDownloadFmt, r.source.ConfigMapRef.Name, r.source.ConfigMapRef.Namespace)
|
||||
return nil, reason, fmt.Errorf("%s, error: %v", reason, err)
|
||||
}
|
||||
|
||||
// ensure that UID matches the UID on the reference, the ObjectReference must be unambiguous
|
||||
if r.source.ConfigMapRef.UID != cm.UID {
|
||||
reason = fmt.Sprintf("invalid ObjectReference, UID %q does not match UID of downloaded ConfigMap %q", r.source.ConfigMapRef.UID, cm.UID)
|
||||
reason = fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, r.source.ConfigMapRef.UID, cm.UID)
|
||||
return nil, reason, fmt.Errorf(reason)
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ func TestRemoteConfigMapDownload(t *testing.T) {
|
||||
// object doesn't exist
|
||||
{"object doesn't exist",
|
||||
&remoteConfigMap{&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "bogus", Namespace: "namespace", UID: "bogus"}}},
|
||||
nil, "could not download ConfigMap"},
|
||||
nil, "failed to download ConfigMap"},
|
||||
// UID of downloaded object doesn't match UID of referent found via namespace/name
|
||||
{"UID is incorrect for namespace/name",
|
||||
&remoteConfigMap{&apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{Name: "name", Namespace: "namespace", UID: "bogus"}}},
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
|
||||
utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log"
|
||||
)
|
||||
|
||||
@ -55,19 +56,18 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, nodeName stri
|
||||
|
||||
node, err := latestNode(cc.informer.GetStore(), nodeName)
|
||||
if err != nil {
|
||||
reason := "unable to read Node from internal object cache"
|
||||
cc.configOK.SetFailedSyncCondition(reason)
|
||||
syncerr = fmt.Errorf("%s, error: %v", reason, err)
|
||||
cc.configOK.SetFailSyncCondition(status.FailSyncReasonInformer)
|
||||
syncerr = fmt.Errorf("%s, error: %v", status.FailSyncReasonInformer, err)
|
||||
return
|
||||
}
|
||||
|
||||
// check the Node and download any new config
|
||||
if updated, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil {
|
||||
cc.configOK.SetFailedSyncCondition(reason)
|
||||
cc.configOK.SetFailSyncCondition(reason)
|
||||
syncerr = fmt.Errorf("%s, error: %v", reason, err)
|
||||
return
|
||||
} else if updated {
|
||||
// TODO(mtaufen): Consider adding a "currently restarting" node condition for this case
|
||||
// TODO(mtaufen): Consider adding a "currently restarting kubelet" ConfigOK message for this case
|
||||
utillog.Infof("config updated, Kubelet will restart to begin using new config")
|
||||
os.Exit(0)
|
||||
}
|
||||
@ -76,7 +76,7 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, nodeName stri
|
||||
// - there is no need to restart to update the current config
|
||||
// - there was no error trying to sync configuration
|
||||
// - if, previously, there was an error trying to sync configuration, we need to clear that error from the condition
|
||||
cc.configOK.ClearFailedSyncCondition()
|
||||
cc.configOK.ClearFailSyncCondition()
|
||||
}
|
||||
|
||||
// doSyncConfigSource checkpoints and sets the store's current config to the new config or resets config,
|
||||
@ -115,7 +115,7 @@ func (cc *Controller) checkpointConfigSource(client clientset.Interface, source
|
||||
|
||||
// if the checkpoint already exists, skip downloading
|
||||
if ok, err := cc.checkpointStore.Exists(uid); err != nil {
|
||||
reason := fmt.Sprintf("unable to determine whether object with UID %q was already checkpointed", uid)
|
||||
reason := fmt.Sprintf(status.FailSyncReasonCheckpointExistenceFmt, uid)
|
||||
return reason, fmt.Errorf("%s, error: %v", reason, err)
|
||||
} else if ok {
|
||||
utillog.Infof("checkpoint already exists for object with UID %q, skipping download", uid)
|
||||
@ -131,7 +131,7 @@ func (cc *Controller) checkpointConfigSource(client clientset.Interface, source
|
||||
// save
|
||||
err = cc.checkpointStore.Save(checkpoint)
|
||||
if err != nil {
|
||||
reason := fmt.Sprintf("failed to save checkpoint for object with UID %q", checkpoint.UID())
|
||||
reason := fmt.Sprintf(status.FailSyncReasonSaveCheckpointFmt, checkpoint.UID())
|
||||
return reason, fmt.Errorf("%s, error: %v", reason, err)
|
||||
}
|
||||
|
||||
@ -143,11 +143,10 @@ func (cc *Controller) checkpointConfigSource(client clientset.Interface, source
|
||||
func (cc *Controller) setCurrentConfig(source checkpoint.RemoteConfigSource) (bool, string, error) {
|
||||
updated, err := cc.checkpointStore.SetCurrentUpdated(source)
|
||||
if err != nil {
|
||||
str := "default"
|
||||
if source != nil {
|
||||
str = fmt.Sprintf("object with UID %q", source.UID())
|
||||
if source == nil {
|
||||
return false, status.FailSyncReasonSetCurrentDefault, err
|
||||
}
|
||||
return false, fmt.Sprintf("failed to set current checkpoint to %s", str), err
|
||||
return false, fmt.Sprintf(status.FailSyncReasonSetCurrentUIDFmt, source.UID()), err
|
||||
}
|
||||
return updated, "", nil
|
||||
}
|
||||
@ -157,7 +156,7 @@ func (cc *Controller) setCurrentConfig(source checkpoint.RemoteConfigSource) (bo
|
||||
func (cc *Controller) resetConfig() (bool, string, error) {
|
||||
updated, err := cc.checkpointStore.Reset()
|
||||
if err != nil {
|
||||
return false, "failed to reset to using local (default or init) config", err
|
||||
return false, status.FailSyncReasonReset, err
|
||||
}
|
||||
return updated, "", nil
|
||||
}
|
||||
|
@ -254,7 +254,8 @@ func (cc *Controller) initialize() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// localConfig returns the initConfig if it is loaded, otherwise returns the defaultConfig
|
||||
// localConfig returns the initConfig if it is loaded, otherwise returns the defaultConfig.
|
||||
// It also sets the local configOK condition to match the returned config.
|
||||
func (cc *Controller) localConfig() *kubeletconfig.KubeletConfiguration {
|
||||
if cc.initConfig != nil {
|
||||
cc.configOK.Set(status.CurInitMessage, status.CurInitOKReason, apiv1.ConditionTrue)
|
||||
@ -264,14 +265,14 @@ func (cc *Controller) localConfig() *kubeletconfig.KubeletConfiguration {
|
||||
return cc.defaultConfig
|
||||
}
|
||||
|
||||
// inTrial returns true if the time elapsed since the last modification of the current config exceeds `trialDur`, false otherwise
|
||||
// inTrial returns true if the time elapsed since the last modification of the current config does not exceed `trialDur`, false otherwise
|
||||
func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) {
|
||||
now := time.Now()
|
||||
t, err := cc.checkpointStore.CurrentModified()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if now.Sub(t) > trialDur {
|
||||
if now.Sub(t) <= trialDur {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
|
@ -32,8 +32,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
configOKType = "ConfigOK"
|
||||
|
||||
// CurDefaultMessage indicates the Kubelet is using it's current config, which is the default
|
||||
CurDefaultMessage = "using current (default)"
|
||||
// LkgDefaultMessage indicates the Kubelet is using it's last-known-good config, which is the default
|
||||
@ -53,8 +51,8 @@ const (
|
||||
CurDefaultOKReason = "current is set to the local default, and no init config was provided"
|
||||
// CurInitOKReason indicates that init config files were provided
|
||||
CurInitOKReason = "current is set to the local default, and an init config was provided"
|
||||
// CurRemoteOKReason indicates that the config from an API source passed all checks
|
||||
CurRemoteOKReason = "passed all checks"
|
||||
// CurRemoteOKReason indicates that the config referenced by Node.ConfigSource is currently passing all checks
|
||||
CurRemoteOKReason = "passing all checks"
|
||||
|
||||
// CurFailLoadReasonFmt indicates that the Kubelet failed to load the current config checkpoint for an API source
|
||||
CurFailLoadReasonFmt = "failed to load current (UID: %q)"
|
||||
@ -63,7 +61,9 @@ const (
|
||||
// CurFailValidateReasonFmt indicates that the Kubelet failed to validate the current config checkpoint for an API source
|
||||
CurFailValidateReasonFmt = "failed to validate current (UID: %q)"
|
||||
// CurFailCrashLoopReasonFmt indicates that the Kubelet experienced a crash loop while using the current config checkpoint for an API source
|
||||
CurFailCrashLoopReasonFmt = "current failed trial period due to crash loop (UID %q)"
|
||||
CurFailCrashLoopReasonFmt = "current failed trial period due to crash loop (UID: %q)"
|
||||
|
||||
// LkgFail*ReasonFmt reasons are currently used to print errors in the Kubelet log, but do not appear in Node.Status.Conditions
|
||||
|
||||
// LkgFailLoadReasonFmt indicates that the Kubelet failed to load the last-known-good config checkpoint for an API source
|
||||
LkgFailLoadReasonFmt = "failed to load last-known-good (UID: %q)"
|
||||
@ -72,18 +72,46 @@ const (
|
||||
// LkgFailValidateReasonFmt indicates that the Kubelet failed to validate the last-known-good config checkpoint for an API source
|
||||
LkgFailValidateReasonFmt = "failed to validate last-known-good (UID: %q)"
|
||||
|
||||
emptyMessage = "unknown - message not provided"
|
||||
emptyReason = "unknown - reason not provided"
|
||||
// FailSyncReasonFmt is used when the system couldn't sync the config, due to a malformed Node.Spec.ConfigSource, a download failure, etc.
|
||||
FailSyncReasonFmt = "failed to sync, reason: %s"
|
||||
// FailSyncReasonAllNilSubfields is used when no subfields are set
|
||||
FailSyncReasonAllNilSubfields = "invalid NodeConfigSource, exactly one subfield must be non-nil, but all were nil"
|
||||
// FailSyncReasonPartialObjectReference is used when some required subfields remain unset
|
||||
FailSyncReasonPartialObjectReference = "invalid ObjectReference, all of UID, Name, and Namespace must be specified"
|
||||
// FailSyncReasonUIDMismatchFmt is used when there is a UID mismatch between the referenced and downloaded ConfigMaps,
|
||||
// this can happen because objects must be downloaded by namespace/name, rather than by UID
|
||||
FailSyncReasonUIDMismatchFmt = "invalid ObjectReference, UID %q does not match UID of downloaded ConfigMap %q"
|
||||
// FailSyncReasonDownloadFmt is used when the download fails, e.g. due to network issues
|
||||
FailSyncReasonDownloadFmt = "failed to download ConfigMap with name %q from namespace %q"
|
||||
// FailSyncReasonInformer is used when the informer fails to report the Node object
|
||||
FailSyncReasonInformer = "failed to read Node from informer object cache"
|
||||
// FailSyncReasonReset is used when we can't reset the local configuration references, e.g. due to filesystem issues
|
||||
FailSyncReasonReset = "failed to reset to local (default or init) config"
|
||||
// FailSyncReasonCheckpointExistenceFmt is used when we can't determine if a checkpoint already exists, e.g. due to filesystem issues
|
||||
FailSyncReasonCheckpointExistenceFmt = "failed to determine whether object with UID %q was already checkpointed"
|
||||
// FailSyncReasonSaveCheckpointFmt is used when we can't save a checkpoint, e.g. due to filesystem issues
|
||||
FailSyncReasonSaveCheckpointFmt = "failed to save config checkpoint for object with UID %q"
|
||||
// FailSyncReasonSetCurrentDefault is used when we can't set the current config checkpoint to the local default, e.g. due to filesystem issues
|
||||
FailSyncReasonSetCurrentDefault = "failed to set current config checkpoint to default"
|
||||
// FailSyncReasonSetCurrentUIDFmt is used when we can't set the current config checkpoint to a checkpointed object, e.g. due to filesystem issues
|
||||
FailSyncReasonSetCurrentUIDFmt = "failed to set current config checkpoint to object with UID %q"
|
||||
|
||||
// EmptyMessage is a placeholder in the case that we accidentally set the condition's message to the empty string.
|
||||
// Doing so can result in a partial patch, and thus a confusing status; this makes it clear that the message was not provided.
|
||||
EmptyMessage = "unknown - message not provided"
|
||||
// EmptyReason is a placeholder in the case that we accidentally set the condition's reason to the empty string.
|
||||
// Doing so can result in a partial patch, and thus a confusing status; this makes it clear that the reason was not provided.
|
||||
EmptyReason = "unknown - reason not provided"
|
||||
)
|
||||
|
||||
// ConfigOKCondition represents a ConfigOK NodeCondition
|
||||
type ConfigOKCondition interface {
|
||||
// Set sets the Message, Reason, and Status of the condition
|
||||
Set(message, reason string, status apiv1.ConditionStatus)
|
||||
// SetFailedSyncCondition sets the condition for when syncing Kubelet config fails
|
||||
SetFailedSyncCondition(reason string)
|
||||
// ClearFailedSyncCondition clears the overlay from SetFailedSyncCondition
|
||||
ClearFailedSyncCondition()
|
||||
// SetFailSyncCondition sets the condition for when syncing Kubelet config fails
|
||||
SetFailSyncCondition(reason string)
|
||||
// ClearFailSyncCondition clears the overlay from SetFailSyncCondition
|
||||
ClearFailSyncCondition()
|
||||
// Sync patches the current condition into the Node identified by `nodeName`
|
||||
Sync(client clientset.Interface, nodeName string)
|
||||
}
|
||||
@ -115,10 +143,10 @@ func (c *configOKCondition) unsafeSet(message, reason string, status apiv1.Condi
|
||||
// We avoid an empty Message, Reason, or Status on the condition. Since we use Patch to update conditions, an empty
|
||||
// field might cause a value from a previous condition to leak through, which can be very confusing.
|
||||
if len(message) == 0 {
|
||||
message = emptyMessage
|
||||
message = EmptyMessage
|
||||
}
|
||||
if len(reason) == 0 {
|
||||
reason = emptyReason
|
||||
reason = EmptyReason
|
||||
}
|
||||
if len(string(status)) == 0 {
|
||||
status = apiv1.ConditionUnknown
|
||||
@ -128,7 +156,7 @@ func (c *configOKCondition) unsafeSet(message, reason string, status apiv1.Condi
|
||||
Message: message,
|
||||
Reason: reason,
|
||||
Status: status,
|
||||
Type: configOKType,
|
||||
Type: apiv1.NodeConfigOK,
|
||||
}
|
||||
|
||||
c.pokeSyncWorker()
|
||||
@ -140,18 +168,18 @@ func (c *configOKCondition) Set(message, reason string, status apiv1.ConditionSt
|
||||
c.unsafeSet(message, reason, status)
|
||||
}
|
||||
|
||||
// SetFailedSyncCondition updates the ConfigOK status to reflect that we failed to sync to the latest config because we couldn't figure out what
|
||||
// config to use (e.g. due to a malformed reference, a download failure, etc)
|
||||
func (c *configOKCondition) SetFailedSyncCondition(reason string) {
|
||||
// SetFailSyncCondition updates the ConfigOK status to reflect that we failed to sync to the latest config,
|
||||
// e.g. due to a malformed Node.Spec.ConfigSource, a download failure, etc.
|
||||
func (c *configOKCondition) SetFailSyncCondition(reason string) {
|
||||
c.conditionMux.Lock()
|
||||
defer c.conditionMux.Unlock()
|
||||
// set the reason overlay and poke the sync worker to send the update
|
||||
c.failedSyncReason = fmt.Sprintf("failed to sync, desired config unclear, reason: %s", reason)
|
||||
c.failedSyncReason = fmt.Sprintf(FailSyncReasonFmt, reason)
|
||||
c.pokeSyncWorker()
|
||||
}
|
||||
|
||||
// ClearFailedSyncCondition removes the "failed to sync" reason overlay
|
||||
func (c *configOKCondition) ClearFailedSyncCondition() {
|
||||
// ClearFailSyncCondition removes the "failed to sync" reason overlay
|
||||
func (c *configOKCondition) ClearFailSyncCondition() {
|
||||
c.conditionMux.Lock()
|
||||
defer c.conditionMux.Unlock()
|
||||
// clear the reason overlay and poke the sync worker to send the update
|
||||
@ -219,9 +247,10 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
|
||||
// overlay the failedSyncReason if necessary
|
||||
var condition *apiv1.NodeCondition
|
||||
if len(c.failedSyncReason) > 0 {
|
||||
// get a copy of the condition before we edit it
|
||||
// get a copy of the condition before we add the overlay
|
||||
condition = c.condition.DeepCopy()
|
||||
condition.Reason = c.failedSyncReason
|
||||
condition.Status = apiv1.ConditionFalse
|
||||
} else {
|
||||
condition = c.condition
|
||||
}
|
||||
@ -247,7 +276,7 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
|
||||
return
|
||||
}
|
||||
|
||||
patchConfigOK(node, c.condition)
|
||||
patchConfigOK(node, condition)
|
||||
after, err := kuberuntime.Encode(encoder, node)
|
||||
if err != nil {
|
||||
err = fmt.Errorf(`failed to encode "after" node while generating patch, error: %v`, err)
|
||||
@ -271,7 +300,7 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
|
||||
// patchConfigOK replaces or adds the ConfigOK condition to the node
|
||||
func patchConfigOK(node *apiv1.Node, configOK *apiv1.NodeCondition) {
|
||||
for i := range node.Status.Conditions {
|
||||
if node.Status.Conditions[i].Type == configOKType {
|
||||
if node.Status.Conditions[i].Type == apiv1.NodeConfigOK {
|
||||
// edit the condition
|
||||
node.Status.Conditions[i] = *configOK
|
||||
return
|
||||
@ -281,11 +310,11 @@ func patchConfigOK(node *apiv1.Node, configOK *apiv1.NodeCondition) {
|
||||
node.Status.Conditions = append(node.Status.Conditions, *configOK)
|
||||
}
|
||||
|
||||
// getConfigOK returns the first NodeCondition in `cs` with Type == configOKType,
|
||||
// getConfigOK returns the first NodeCondition in `cs` with Type == apiv1.NodeConfigOK,
|
||||
// or if no such condition exists, returns nil.
|
||||
func getConfigOK(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
for i := range cs {
|
||||
if cs[i].Type == configOKType {
|
||||
if cs[i].Type == apiv1.NodeConfigOK {
|
||||
return &cs[i]
|
||||
}
|
||||
}
|
||||
|
@ -3692,6 +3692,8 @@ const (
|
||||
NodeDiskPressure NodeConditionType = "DiskPressure"
|
||||
// NodeNetworkUnavailable means that network for the node is not correctly configured.
|
||||
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
|
||||
// NodeConfigOK indicates whether the kubelet is correctly configured
|
||||
NodeConfigOK NodeConditionType = "ConfigOK"
|
||||
)
|
||||
|
||||
// NodeCondition contains condition information for a node.
|
||||
|
@ -49,7 +49,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
@ -64,6 +63,7 @@ go_library(
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
@ -82,7 +82,7 @@ go_test(
|
||||
"disk_eviction_test.go",
|
||||
"docker_test.go",
|
||||
"dockershim_checkpoint_test.go",
|
||||
"dynamic_kubelet_configuration_test.go",
|
||||
"dynamic_kubelet_config_test.go",
|
||||
"e2e_node_suite_test.go",
|
||||
"garbage_collector_test.go",
|
||||
"gke_environment_test.go",
|
||||
@ -122,6 +122,7 @@ go_test(
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig/status:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
|
406
test/e2e_node/dynamic_kubelet_config_test.go
Normal file
406
test/e2e_node/dynamic_kubelet_config_test.go
Normal file
@ -0,0 +1,406 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type configState struct {
|
||||
desc string
|
||||
configSource *apiv1.NodeConfigSource
|
||||
expectConfigOK *apiv1.NodeCondition
|
||||
expectConfig *kubeletconfig.KubeletConfiguration
|
||||
}
|
||||
|
||||
// This test is marked [Disruptive] because the Kubelet restarts several times during this test.
|
||||
var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKubeletConfig] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test")
|
||||
var originalKC *kubeletconfig.KubeletConfiguration
|
||||
var originalConfigMap *apiv1.ConfigMap
|
||||
|
||||
// Dummy context to prevent framework's AfterEach from cleaning up before this test's AfterEach can run
|
||||
Context("", func() {
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
if originalConfigMap == nil {
|
||||
originalKC, err = getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
originalConfigMap = newKubeletConfigMap("original-values", originalKC)
|
||||
originalConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(originalConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
// make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to test
|
||||
enabled, err := isKubeletConfigEnabled(f)
|
||||
framework.ExpectNoError(err)
|
||||
if !enabled {
|
||||
framework.ExpectNoError(fmt.Errorf("The Dynamic Kubelet Configuration feature is not enabled.\n" +
|
||||
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" +
|
||||
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`."))
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Set the config back to the original values before moving on.
|
||||
// We care that the values are the same, not where they come from, so it
|
||||
// should be fine to reset the values using a remote config, even if they
|
||||
// were initially set via the locally provisioned configuration.
|
||||
// This is the same strategy several other e2e node tests use.
|
||||
setAndTestKubeletConfigState(f, &configState{desc: "reset to original values",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: originalConfigMap.UID,
|
||||
Namespace: originalConfigMap.Namespace,
|
||||
Name: originalConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, originalConfigMap.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: originalKC})
|
||||
})
|
||||
|
||||
Context("When setting new NodeConfigSources that cause transitions between ConfigOK conditions", func() {
|
||||
It("the Kubelet should report the appropriate status and configz", func() {
|
||||
var err error
|
||||
// we base the "correct" configmap off of the current configuration,
|
||||
// but we also set the trial duration very high to prevent changing the last-known-good
|
||||
correctKC := originalKC.DeepCopy()
|
||||
correctKC.ConfigTrialDuration = &metav1.Duration{Duration: time.Hour}
|
||||
correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-correct", correctKC)
|
||||
correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// fail to parse, we insert some bogus stuff into the configMap
|
||||
failParseConfigMap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "dynamic-kubelet-config-test-fail-parse"},
|
||||
Data: map[string]string{
|
||||
"kubelet": "{0xdeadbeef}",
|
||||
},
|
||||
}
|
||||
failParseConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failParseConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// fail to validate, we make a copy and set an invalid KubeAPIQPS on kc before serializing
|
||||
invalidKC := correctKC.DeepCopy()
|
||||
|
||||
invalidKC.KubeAPIQPS = -1
|
||||
failValidateConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-fail-validate", invalidKC)
|
||||
failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failValidateConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
states := []configState{
|
||||
// Node.Spec.ConfigSource is nil
|
||||
{desc: "Node.Spec.ConfigSource is nil",
|
||||
configSource: nil,
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: status.CurDefaultMessage,
|
||||
Reason: status.CurDefaultOKReason},
|
||||
expectConfig: nil},
|
||||
|
||||
// Node.Spec.ConfigSource has all nil subfields
|
||||
{desc: "Node.Spec.ConfigSource has all nil subfields",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: nil},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonAllNilSubfields)},
|
||||
expectConfig: nil},
|
||||
|
||||
// Node.Spec.ConfigSource.ConfigMapRef is partial
|
||||
{desc: "Node.Spec.ConfigSource.ConfigMapRef is partial",
|
||||
// TODO(mtaufen): check the other 7 partials in a unit test
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: "foo",
|
||||
Name: "bar"}}, // missing Namespace
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, status.FailSyncReasonPartialObjectReference)},
|
||||
expectConfig: nil},
|
||||
|
||||
// Node.Spec.ConfigSource's UID does not align with namespace/name
|
||||
{desc: "Node.Spec.ConfigSource's UID does not align with namespace/name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{UID: "foo",
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: "",
|
||||
Reason: fmt.Sprintf(status.FailSyncReasonFmt, fmt.Sprintf(status.FailSyncReasonUIDMismatchFmt, "foo", correctConfigMap.UID))},
|
||||
expectConfig: nil},
|
||||
|
||||
// correct
|
||||
{desc: "correct",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: correctConfigMap.UID,
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, correctConfigMap.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: correctKC},
|
||||
|
||||
// fail-parse
|
||||
{desc: "fail-parse",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: failParseConfigMap.UID,
|
||||
Namespace: failParseConfigMap.Namespace,
|
||||
Name: failParseConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgDefaultMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, failParseConfigMap.UID)},
|
||||
expectConfig: nil},
|
||||
|
||||
// fail-validate
|
||||
{desc: "fail-validate",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: failValidateConfigMap.UID,
|
||||
Namespace: failValidateConfigMap.Namespace,
|
||||
Name: failValidateConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: status.LkgDefaultMessage,
|
||||
Reason: fmt.Sprintf(status.CurFailValidateReasonFmt, failValidateConfigMap.UID)},
|
||||
expectConfig: nil},
|
||||
}
|
||||
|
||||
L := len(states)
|
||||
for i := 1; i <= L; i++ { // need one less iteration than the number of states
|
||||
testBothDirections(f, &states[i-1 : i][0], states[i:L])
|
||||
}
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
Context("When a remote config becomes the new last-known-good before the Kubelet is updated to use a new, bad config", func() {
|
||||
It("it should report a status and configz indicating that it rolled back to the new last-known-good", func() {
|
||||
var err error
|
||||
// we base the "lkg" configmap off of the current configuration, but set the trial
|
||||
// duration very low so that it quickly becomes the last-known-good
|
||||
lkgKC := originalKC.DeepCopy()
|
||||
lkgKC.ConfigTrialDuration = &metav1.Duration{Duration: time.Nanosecond}
|
||||
lkgConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-intended-lkg", lkgKC)
|
||||
lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// bad config map, we insert some bogus stuff into the configMap
|
||||
badConfigMap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "dynamic-kubelet-config-test-bad"},
|
||||
Data: map[string]string{
|
||||
"kubelet": "{0xdeadbeef}",
|
||||
},
|
||||
}
|
||||
badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(badConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
states := []configState{
|
||||
// intended lkg
|
||||
{desc: "intended last-known-good",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: lkgConfigMap.UID,
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, lkgConfigMap.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: lkgKC},
|
||||
|
||||
// bad config
|
||||
{desc: "bad config",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: badConfigMap.UID,
|
||||
Namespace: badConfigMap.Namespace,
|
||||
Name: badConfigMap.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionFalse,
|
||||
Message: fmt.Sprintf(status.LkgRemoteMessageFmt, lkgConfigMap.UID),
|
||||
Reason: fmt.Sprintf(status.CurFailParseReasonFmt, badConfigMap.UID)},
|
||||
expectConfig: lkgKC},
|
||||
}
|
||||
|
||||
testBothDirections(f, &states[0], states[1:])
|
||||
})
|
||||
})
|
||||
|
||||
// This stress test will help turn up resource leaks across kubelet restarts that can, over time,
|
||||
// break our ability to dynamically update kubelet config
|
||||
Context("When changing the configuration 100 times", func() {
|
||||
It("the Kubelet should report the appropriate status and configz", func() {
|
||||
var err error
|
||||
|
||||
// we just create two configmaps with the same config but different names and toggle between them
|
||||
kc1 := originalKC.DeepCopy()
|
||||
cm1 := newKubeletConfigMap("dynamic-kubelet-config-test-cm1", kc1)
|
||||
cm1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm1)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// slightly change the config
|
||||
kc2 := kc1.DeepCopy()
|
||||
kc2.EventRecordQPS = kc1.EventRecordQPS + 1
|
||||
cm2 := newKubeletConfigMap("dynamic-kubelet-config-test-cm2", kc2)
|
||||
cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm2)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
states := []configState{
|
||||
{desc: "cm1",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: cm1.UID,
|
||||
Namespace: cm1.Namespace,
|
||||
Name: cm1.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm1.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: kc1},
|
||||
{desc: "cm2",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMapRef: &apiv1.ObjectReference{
|
||||
UID: cm2.UID,
|
||||
Namespace: cm2.Namespace,
|
||||
Name: cm2.Name}},
|
||||
expectConfigOK: &apiv1.NodeCondition{Type: apiv1.NodeConfigOK, Status: apiv1.ConditionTrue,
|
||||
Message: fmt.Sprintf(status.CurRemoteMessageFmt, cm2.UID),
|
||||
Reason: status.CurRemoteOKReason},
|
||||
expectConfig: kc2},
|
||||
}
|
||||
|
||||
for i := 0; i < 50; i++ { // change the config 101 times (changes 3 times in the first iteration, 2 times in each subsequent iteration)
|
||||
testBothDirections(f, &states[0], states[1:])
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// testBothDirections tests the state change represented by each edge, where each state is a vertex,
|
||||
// and there are edges in each direction between first and each of the states.
|
||||
func testBothDirections(f *framework.Framework, first *configState, states []configState) {
|
||||
// set to first and check that everything got set up properly
|
||||
By(fmt.Sprintf("setting configSource to state %q", first.desc))
|
||||
setAndTestKubeletConfigState(f, first)
|
||||
|
||||
// for each state, set to that state, check condition and configz, then reset to first and check again
|
||||
for i := range states {
|
||||
By(fmt.Sprintf("from %q to %q", first.desc, states[i].desc))
|
||||
setAndTestKubeletConfigState(f, &states[i])
|
||||
|
||||
By(fmt.Sprintf("back to %q from %q", first.desc, states[i].desc))
|
||||
setAndTestKubeletConfigState(f, first)
|
||||
}
|
||||
}
|
||||
|
||||
// setAndTestKubeletConfigState tests that after setting the config source, the ConfigOK condition
|
||||
// and (if appropriate) configuration exposed via conifgz are as expected.
|
||||
// The configuration will be converted to the internal type prior to comparison.
|
||||
func setAndTestKubeletConfigState(f *framework.Framework, state *configState) {
|
||||
// set the desired state, retry a few times in case we are competing with other editors
|
||||
Eventually(func() error {
|
||||
if err := setNodeConfigSource(f, state.configSource); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, time.Second).Should(BeNil())
|
||||
// check that config source actually got set to what we expect
|
||||
checkNodeConfigSource(f, state.configSource)
|
||||
// check condition
|
||||
checkConfigOKCondition(f, state.expectConfigOK)
|
||||
// check expectConfig
|
||||
if state.expectConfig != nil {
|
||||
checkConfig(f, state.expectConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure the node's config source matches what we expect, after setting it
|
||||
func checkNodeConfigSource(f *framework.Framework, expect *apiv1.NodeConfigSource) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
)
|
||||
|
||||
Eventually(func() error {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
actual := node.Spec.ConfigSource
|
||||
if !reflect.DeepEqual(expect, actual) {
|
||||
return fmt.Errorf(spew.Sprintf("expected %#v but got %#v", expect, actual))
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// make sure the ConfigOK node condition eventually matches what we expect
|
||||
func checkConfigOKCondition(f *framework.Framework, expect *apiv1.NodeCondition) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
)
|
||||
|
||||
Eventually(func() error {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
actual := getConfigOKCondition(node.Status.Conditions)
|
||||
if actual == nil {
|
||||
return fmt.Errorf("ConfigOK condition not found on node %q", framework.TestContext.NodeName)
|
||||
}
|
||||
if err := expectConfigOK(expect, actual); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
// if the actual matches the expect, return nil, else error explaining the mismatch
|
||||
// if a subfield of the expect is the empty string, that check is skipped
|
||||
func expectConfigOK(expect, actual *apiv1.NodeCondition) error {
|
||||
if expect.Status != actual.Status {
|
||||
return fmt.Errorf("expected condition Status %q but got %q", expect.Status, actual.Status)
|
||||
}
|
||||
if len(expect.Message) > 0 && expect.Message != actual.Message {
|
||||
return fmt.Errorf("expected condition Message %q but got %q", expect.Message, actual.Message)
|
||||
}
|
||||
if len(expect.Reason) > 0 && expect.Reason != actual.Reason {
|
||||
return fmt.Errorf("expected condition Reason %q but got %q", expect.Reason, actual.Reason)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// make sure config exposed on configz matches what we expect
|
||||
func checkConfig(f *framework.Framework, expect *kubeletconfig.KubeletConfiguration) {
|
||||
const (
|
||||
timeout = time.Minute
|
||||
interval = time.Second
|
||||
)
|
||||
Eventually(func() error {
|
||||
actual, err := getCurrentKubeletConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !reflect.DeepEqual(expect, actual) {
|
||||
return fmt.Errorf(spew.Sprintf("expected %#v but got %#v", expect, actual))
|
||||
}
|
||||
return nil
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// This test is marked [Disruptive] because the Kubelet temporarily goes down as part of of this test.
|
||||
var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKubeletConfig] [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test")
|
||||
|
||||
Context("When the config source on a Node is updated to point to new config", func() {
|
||||
It("The Kubelet on that node should restart to take up the new config", func() {
|
||||
// Get the current KubeletConfiguration (known to be valid) by
|
||||
// querying the configz endpoint for the current node.
|
||||
kubeCfg, err := getCurrentKubeletConfig()
|
||||
framework.ExpectNoError(err)
|
||||
glog.Infof("KubeletConfiguration - Initial values: %+v", *kubeCfg)
|
||||
|
||||
// Change a safe value e.g. file check frequency.
|
||||
// Make sure we're providing a value distinct from the current one.
|
||||
oldFileCheckFrequency := kubeCfg.FileCheckFrequency.Duration
|
||||
newFileCheckFrequency := 11 * time.Second
|
||||
if kubeCfg.FileCheckFrequency.Duration == newFileCheckFrequency {
|
||||
newFileCheckFrequency = 10 * time.Second
|
||||
}
|
||||
kubeCfg.FileCheckFrequency.Duration = newFileCheckFrequency
|
||||
|
||||
// Use the new config to create a new kube-{node-name} configmap in `kube-system` namespace.
|
||||
// Note: setKubeletConfiguration will return an error if the Kubelet does not present the
|
||||
// modified configuration via /configz when it comes back up.
|
||||
err = setKubeletConfiguration(f, kubeCfg)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Change the config back to what it originally was.
|
||||
kubeCfg.FileCheckFrequency.Duration = oldFileCheckFrequency
|
||||
err = setKubeletConfiguration(f, kubeCfg)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
})
|
@ -29,10 +29,9 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
@ -133,6 +132,7 @@ func isKubeletConfigEnabled(f *framework.Framework) (bool, error) {
|
||||
// Creates or updates the configmap for KubeletConfiguration, waits for the Kubelet to restart
|
||||
// with the new configuration. Returns an error if the configuration after waiting for restartGap
|
||||
// doesn't match what you attempted to set, or if the dynamic configuration feature is disabled.
|
||||
// You should only call this from serial tests.
|
||||
func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.KubeletConfiguration) error {
|
||||
const (
|
||||
restartGap = 40 * time.Second
|
||||
@ -140,43 +140,36 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
||||
)
|
||||
|
||||
// make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to reconfigure
|
||||
configEnabled, err := isKubeletConfigEnabled(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not determine whether 'DynamicKubeletConfig' feature is enabled, err: %v", err)
|
||||
}
|
||||
if !configEnabled {
|
||||
if configEnabled, err := isKubeletConfigEnabled(f); err != nil {
|
||||
return err
|
||||
} else if !configEnabled {
|
||||
return fmt.Errorf("The Dynamic Kubelet Configuration feature is not enabled.\n" +
|
||||
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" +
|
||||
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
|
||||
}
|
||||
|
||||
nodeclient := f.ClientSet.CoreV1().Nodes()
|
||||
|
||||
// create the ConfigMap with the new configuration
|
||||
cm, err := createConfigMap(f, kubeCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create the correct reference object
|
||||
src := &v1.NodeConfigSource{
|
||||
ConfigMapRef: &v1.ObjectReference{
|
||||
// create the reference and set Node.Spec.ConfigSource
|
||||
src := &apiv1.NodeConfigSource{
|
||||
ConfigMapRef: &apiv1.ObjectReference{
|
||||
Namespace: "kube-system",
|
||||
Name: cm.Name,
|
||||
UID: cm.UID,
|
||||
},
|
||||
}
|
||||
|
||||
// serialize the new node config source
|
||||
raw, err := json.Marshal(src)
|
||||
framework.ExpectNoError(err)
|
||||
data := []byte(fmt.Sprintf(`{"spec":{"configSource":%s}}`, raw))
|
||||
|
||||
// patch the node
|
||||
_, err = nodeclient.Patch(framework.TestContext.NodeName,
|
||||
types.StrategicMergePatchType,
|
||||
data)
|
||||
framework.ExpectNoError(err)
|
||||
// set the source, retry a few times in case we are competing with other writers
|
||||
Eventually(func() error {
|
||||
if err := setNodeConfigSource(f, src); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, time.Second).Should(BeNil())
|
||||
|
||||
// poll for new config, for a maximum wait of restartGap
|
||||
Eventually(func() error {
|
||||
@ -194,6 +187,41 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets the current node's configSource, this should only be called from Serial tests
|
||||
func setNodeConfigSource(f *framework.Framework, source *apiv1.NodeConfigSource) error {
|
||||
// since this is a serial test, we just get the node, change the source, and then update it
|
||||
// this prevents any issues with the patch API from affecting the test results
|
||||
nodeclient := f.ClientSet.CoreV1().Nodes()
|
||||
|
||||
// get the node
|
||||
node, err := nodeclient.Get(framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set new source
|
||||
node.Spec.ConfigSource = source
|
||||
|
||||
// update to the new source
|
||||
_, err = nodeclient.Update(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getConfigOK returns the first NodeCondition in `cs` with Type == apiv1.NodeConfigOK,
|
||||
// or if no such condition exists, returns nil.
|
||||
func getConfigOKCondition(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
|
||||
for i := range cs {
|
||||
if cs[i].Type == apiv1.NodeConfigOK {
|
||||
return &cs[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Causes the test to fail, or returns a status 200 response from the /configz endpoint
|
||||
func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Response {
|
||||
endpoint := fmt.Sprintf("http://127.0.0.1:8080/api/v1/proxy/nodes/%s/configz", framework.TestContext.NodeName)
|
||||
@ -248,8 +276,8 @@ func decodeConfigz(resp *http.Response) (*kubeletconfig.KubeletConfiguration, er
|
||||
}
|
||||
|
||||
// creates a configmap containing kubeCfg in kube-system namespace
|
||||
func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*v1.ConfigMap, error) {
|
||||
cmap := makeKubeletConfigMap(internalKC)
|
||||
func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*apiv1.ConfigMap, error) {
|
||||
cmap := newKubeletConfigMap("testcfg", internalKC)
|
||||
cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Create(cmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -257,8 +285,8 @@ func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletCo
|
||||
return cmap, nil
|
||||
}
|
||||
|
||||
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Uses GenerateName.
|
||||
func makeKubeletConfigMap(internalKC *kubeletconfig.KubeletConfiguration) *v1.ConfigMap {
|
||||
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Always uses GenerateName to generate a suffix.
|
||||
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *apiv1.ConfigMap {
|
||||
scheme, _, err := kubeletscheme.NewSchemeAndCodecs()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -272,8 +300,8 @@ func makeKubeletConfigMap(internalKC *kubeletconfig.KubeletConfiguration) *v1.Co
|
||||
data, err := runtime.Encode(encoder, versioned)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
cmap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "testcfg"},
|
||||
cmap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: name},
|
||||
Data: map[string]string{
|
||||
"kubelet": string(data),
|
||||
},
|
||||
@ -293,7 +321,7 @@ func logNodeEvents(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func getLocalNode(f *framework.Framework) *v1.Node {
|
||||
func getLocalNode(f *framework.Framework) *apiv1.Node {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(Equal(1), "Unexpected number of node objects for node e2e. Expects only one node.")
|
||||
return &nodeList.Items[0]
|
||||
|
Loading…
Reference in New Issue
Block a user