component-helpers: Support structured and contextual logging (#120637)

This commit is contained in:
bells17 2024-04-24 19:06:15 +09:00 committed by GitHub
parent 646fbe6d0a
commit 1c917aa463
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 113 additions and 78 deletions

View File

@ -871,7 +871,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
klog.InfoS("Failed to ApplyOOMScoreAdj", "err", err) klog.InfoS("Failed to ApplyOOMScoreAdj", "err", err)
} }
if err := RunKubelet(s, kubeDeps, s.RunOnce); err != nil { if err := RunKubelet(ctx, s, kubeDeps, s.RunOnce); err != nil {
return err return err
} }
@ -1202,7 +1202,7 @@ func setContentTypeForClient(cfg *restclient.Config, contentType string) {
// 3 Standalone 'kubernetes' binary // 3 Standalone 'kubernetes' binary
// //
// Eventually, #2 will be replaced with instances of #3 // Eventually, #2 will be replaced with instances of #3
func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencies, runOnce bool) error { func RunKubelet(ctx context.Context, kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencies, runOnce bool) error {
hostname, err := nodeutil.GetHostname(kubeServer.HostnameOverride) hostname, err := nodeutil.GetHostname(kubeServer.HostnameOverride)
if err != nil { if err != nil {
return err return err
@ -1214,12 +1214,15 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
} }
hostnameOverridden := len(kubeServer.HostnameOverride) > 0 hostnameOverridden := len(kubeServer.HostnameOverride) > 0
// Setup event recorder if required. // Setup event recorder if required.
makeEventRecorder(context.TODO(), kubeDeps, nodeName) makeEventRecorder(ctx, kubeDeps, nodeName)
nodeIPs, err := nodeutil.ParseNodeIPArgument(kubeServer.NodeIP, kubeServer.CloudProvider) nodeIPs, invalidNodeIps, err := nodeutil.ParseNodeIPArgument(kubeServer.NodeIP, kubeServer.CloudProvider)
if err != nil { if err != nil {
return fmt.Errorf("bad --node-ip %q: %v", kubeServer.NodeIP, err) return fmt.Errorf("bad --node-ip %q: %v", kubeServer.NodeIP, err)
} }
if len(invalidNodeIps) != 0 {
klog.FromContext(ctx).Info("Could not parse some node IP(s), ignoring them", "IPs", invalidNodeIps)
}
capabilities.Initialize(capabilities.Capabilities{ capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: true, AllowPrivileged: true,

View File

@ -157,7 +157,7 @@ func NewHollowNodeCommand() *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
verflag.PrintAndExitIfRequested() verflag.PrintAndExitIfRequested()
cliflag.PrintFlags(cmd.Flags()) cliflag.PrintFlags(cmd.Flags())
return run(s) return run(cmd.Context(), s)
}, },
Args: func(cmd *cobra.Command, args []string) error { Args: func(cmd *cobra.Command, args []string) error {
for _, arg := range args { for _, arg := range args {
@ -176,7 +176,7 @@ func NewHollowNodeCommand() *cobra.Command {
return cmd return cmd
} }
func run(config *hollowNodeConfig) error { func run(ctx context.Context, config *hollowNodeConfig) error {
// To help debugging, immediately log version and print flags. // To help debugging, immediately log version and print flags.
klog.Infof("Version: %+v", version.Get()) klog.Infof("Version: %+v", version.Get())
@ -264,7 +264,7 @@ func run(config *hollowNodeConfig) error {
runtimeService, runtimeService,
containerManager, containerManager,
) )
hollowKubelet.Run() hollowKubelet.Run(ctx)
} }
if config.Morph == "proxy" { if config.Morph == "proxy" {

View File

@ -24,6 +24,8 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/test/utils/ktesting"
) )
const fakeKubeconfig = ` const fakeKubeconfig = `
@ -80,7 +82,8 @@ func TestHollowNode(t *testing.T) {
go func() { go func() {
data, err := os.ReadFile(kubeconfigPath) data, err := os.ReadFile(kubeconfigPath)
t.Logf("read %d, err=%v\n", len(data), err) t.Logf("read %d, err=%v\n", len(data), err)
errCh <- run(s) ctx := ktesting.Init(t)
errCh <- run(ctx, s)
}() }()
select { select {

View File

@ -131,6 +131,7 @@ linters-settings: # please keep this alphabetized
contextual k8s.io/client-go/metadata/.* contextual k8s.io/client-go/metadata/.*
contextual k8s.io/client-go/tools/events/.* contextual k8s.io/client-go/tools/events/.*
contextual k8s.io/client-go/tools/record/.* contextual k8s.io/client-go/tools/record/.*
contextual k8s.io/component-helpers/.*
contextual k8s.io/dynamic-resource-allocation/.* contextual k8s.io/dynamic-resource-allocation/.*
contextual k8s.io/kubernetes/cmd/kube-proxy/.* contextual k8s.io/kubernetes/cmd/kube-proxy/.*
contextual k8s.io/kubernetes/cmd/kube-scheduler/.* contextual k8s.io/kubernetes/cmd/kube-scheduler/.*

View File

@ -177,6 +177,7 @@ linters-settings: # please keep this alphabetized
contextual k8s.io/client-go/metadata/.* contextual k8s.io/client-go/metadata/.*
contextual k8s.io/client-go/tools/events/.* contextual k8s.io/client-go/tools/events/.*
contextual k8s.io/client-go/tools/record/.* contextual k8s.io/client-go/tools/record/.*
contextual k8s.io/component-helpers/.*
contextual k8s.io/dynamic-resource-allocation/.* contextual k8s.io/dynamic-resource-allocation/.*
contextual k8s.io/kubernetes/cmd/kube-proxy/.* contextual k8s.io/kubernetes/cmd/kube-proxy/.*
contextual k8s.io/kubernetes/cmd/kube-scheduler/.* contextual k8s.io/kubernetes/cmd/kube-scheduler/.*

View File

@ -180,6 +180,7 @@ linters-settings: # please keep this alphabetized
contextual k8s.io/client-go/metadata/.* contextual k8s.io/client-go/metadata/.*
contextual k8s.io/client-go/tools/events/.* contextual k8s.io/client-go/tools/events/.*
contextual k8s.io/client-go/tools/record/.* contextual k8s.io/client-go/tools/record/.*
contextual k8s.io/component-helpers/.*
contextual k8s.io/dynamic-resource-allocation/.* contextual k8s.io/dynamic-resource-allocation/.*
contextual k8s.io/kubernetes/cmd/kube-proxy/.* contextual k8s.io/kubernetes/cmd/kube-proxy/.*
contextual k8s.io/kubernetes/cmd/kube-scheduler/.* contextual k8s.io/kubernetes/cmd/kube-scheduler/.*

View File

@ -28,6 +28,7 @@ contextual k8s.io/apimachinery/pkg/util/runtime/.*
contextual k8s.io/client-go/metadata/.* contextual k8s.io/client-go/metadata/.*
contextual k8s.io/client-go/tools/events/.* contextual k8s.io/client-go/tools/events/.*
contextual k8s.io/client-go/tools/record/.* contextual k8s.io/client-go/tools/record/.*
contextual k8s.io/component-helpers/.*
contextual k8s.io/dynamic-resource-allocation/.* contextual k8s.io/dynamic-resource-allocation/.*
contextual k8s.io/kubernetes/cmd/kube-proxy/.* contextual k8s.io/kubernetes/cmd/kube-proxy/.*
contextual k8s.io/kubernetes/cmd/kube-scheduler/.* contextual k8s.io/kubernetes/cmd/kube-scheduler/.*

View File

@ -86,7 +86,7 @@ type CIDRAllocator interface {
// AllocateOrOccupyCIDR looks at the given node, assigns it a valid // AllocateOrOccupyCIDR looks at the given node, assigns it a valid
// CIDR if it doesn't currently have one or mark the CIDR as used if // CIDR if it doesn't currently have one or mark the CIDR as used if
// the node already have one. // the node already have one.
AllocateOrOccupyCIDR(logger klog.Logger, node *v1.Node) error AllocateOrOccupyCIDR(ctx context.Context, node *v1.Node) error
// ReleaseCIDR releases the CIDR of the removed node. // ReleaseCIDR releases the CIDR of the removed node.
ReleaseCIDR(logger klog.Logger, node *v1.Node) error ReleaseCIDR(logger klog.Logger, node *v1.Node) error
// Run starts all the working logic of the allocator. // Run starts all the working logic of the allocator.

View File

@ -117,18 +117,18 @@ func NewCloudCIDRAllocator(ctx context.Context, client clientset.Interface, clou
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controllerutil.CreateAddNodeHandler( AddFunc: controllerutil.CreateAddNodeHandler(
func(node *v1.Node) error { func(node *v1.Node) error {
return ca.AllocateOrOccupyCIDR(logger, node) return ca.AllocateOrOccupyCIDR(ctx, node)
}), }),
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
if newNode.Spec.PodCIDR == "" { if newNode.Spec.PodCIDR == "" {
return ca.AllocateOrOccupyCIDR(logger, newNode) return ca.AllocateOrOccupyCIDR(ctx, newNode)
} }
// Even if PodCIDR is assigned, but NetworkUnavailable condition is // Even if PodCIDR is assigned, but NetworkUnavailable condition is
// set to true, we need to process the node to set the condition. // set to true, we need to process the node to set the condition.
networkUnavailableTaint := &v1.Taint{Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule} networkUnavailableTaint := &v1.Taint{Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
_, cond := controllerutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable) _, cond := controllerutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) { if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
return ca.AllocateOrOccupyCIDR(logger, newNode) return ca.AllocateOrOccupyCIDR(ctx, newNode)
} }
return nil return nil
}), }),
@ -173,7 +173,7 @@ func (ca *cloudCIDRAllocator) worker(ctx context.Context) {
logger.Info("Channel nodeCIDRUpdateChannel was unexpectedly closed") logger.Info("Channel nodeCIDRUpdateChannel was unexpectedly closed")
return return
} }
if err := ca.updateCIDRAllocation(logger, workItem); err == nil { if err := ca.updateCIDRAllocation(ctx, workItem); err == nil {
logger.V(3).Info("Updated CIDR", "workItem", workItem) logger.V(3).Info("Updated CIDR", "workItem", workItem)
} else { } else {
logger.Error(err, "Error updating CIDR", "workItem", workItem) logger.Error(err, "Error updating CIDR", "workItem", workItem)
@ -243,10 +243,12 @@ func (ca *cloudCIDRAllocator) removeNodeFromProcessing(nodeName string) {
// WARNING: If you're adding any return calls or defer any more work from this // WARNING: If you're adding any return calls or defer any more work from this
// function you have to make sure to update nodesInProcessing properly with the // function you have to make sure to update nodesInProcessing properly with the
// disposition of the node when the work is done. // disposition of the node when the work is done.
func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(logger klog.Logger, node *v1.Node) error { func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(ctx context.Context, node *v1.Node) error {
if node == nil { if node == nil {
return nil return nil
} }
logger := klog.FromContext(ctx)
if !ca.insertNodeToProcessing(node.Name) { if !ca.insertNodeToProcessing(node.Name) {
logger.V(2).Info("Node is already in a process of CIDR assignment", "node", klog.KObj(node)) logger.V(2).Info("Node is already in a process of CIDR assignment", "node", klog.KObj(node))
return nil return nil
@ -258,7 +260,8 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(logger klog.Logger, node *v1.
} }
// updateCIDRAllocation assigns CIDR to Node and sends an update to the API server. // updateCIDRAllocation assigns CIDR to Node and sends an update to the API server.
func (ca *cloudCIDRAllocator) updateCIDRAllocation(logger klog.Logger, nodeName string) error { func (ca *cloudCIDRAllocator) updateCIDRAllocation(ctx context.Context, nodeName string) error {
logger := klog.FromContext(ctx)
node, err := ca.nodeLister.Get(nodeName) node, err := ca.nodeLister.Get(nodeName)
if err != nil { if err != nil {
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
@ -305,7 +308,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(logger klog.Logger, nodeName
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248 // See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
} }
for i := 0; i < cidrUpdateRetries; i++ { for i := 0; i < cidrUpdateRetries; i++ {
if err = nodeutil.PatchNodeCIDRs(ca.client, types.NodeName(node.Name), cidrStrings); err == nil { if err = nodeutil.PatchNodeCIDRs(ctx, ca.client, types.NodeName(node.Name), cidrStrings); err == nil {
logger.Info("Set the node PodCIDRs", "node", klog.KObj(node), "cidrStrings", cidrStrings) logger.Info("Set the node PodCIDRs", "node", klog.KObj(node), "cidrStrings", cidrStrings)
break break
} }

View File

@ -51,14 +51,16 @@ func TestBoundedRetries(t *testing.T) {
nodesSynced: sharedInfomer.Core().V1().Nodes().Informer().HasSynced, nodesSynced: sharedInfomer.Core().V1().Nodes().Informer().HasSynced,
nodesInProcessing: map[string]*nodeProcessingInfo{}, nodesInProcessing: map[string]*nodeProcessingInfo{},
} }
logger, ctx := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
go ca.worker(ctx) go ca.worker(ctx)
nodeName := "testNode" nodeName := "testNode"
ca.AllocateOrOccupyCIDR(logger, &v1.Node{ if err := ca.AllocateOrOccupyCIDR(ctx, &v1.Node{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: nodeName, Name: nodeName,
}, },
}) }); err != nil {
t.Errorf("unexpected error in AllocateOrOccupyCIDR: %v", err)
}
for hasNodeInProcessing(ca, nodeName) { for hasNodeInProcessing(ca, nodeName) {
// wait for node to finish processing (should terminate and not time out) // wait for node to finish processing (should terminate and not time out)
} }

View File

@ -61,6 +61,8 @@ type rangeAllocator struct {
queue workqueue.RateLimitingInterface queue workqueue.RateLimitingInterface
} }
var _ CIDRAllocator = &rangeAllocator{}
// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDRs for node (one from each of clusterCIDRs) // NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDRs for node (one from each of clusterCIDRs)
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size. // Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
// Caller must always pass in a list of existing nodes so the new allocator. // Caller must always pass in a list of existing nodes so the new allocator.
@ -228,7 +230,7 @@ func (r *rangeAllocator) processNextNodeWorkItem(ctx context.Context) bool {
} }
// Run the syncHandler, passing it the namespace/name string of the // Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced. // Foo resource to be synced.
if err := r.syncNode(logger, key); err != nil { if err := r.syncNode(ctx, key); err != nil {
// Put the item back on the queue to handle any transient errors. // Put the item back on the queue to handle any transient errors.
r.queue.AddRateLimited(key) r.queue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
@ -248,7 +250,8 @@ func (r *rangeAllocator) processNextNodeWorkItem(ctx context.Context) bool {
return true return true
} }
func (r *rangeAllocator) syncNode(logger klog.Logger, key string) error { func (r *rangeAllocator) syncNode(ctx context.Context, key string) error {
logger := klog.FromContext(ctx)
startTime := time.Now() startTime := time.Now()
defer func() { defer func() {
logger.V(4).Info("Finished syncing Node request", "node", key, "elapsed", time.Since(startTime)) logger.V(4).Info("Finished syncing Node request", "node", key, "elapsed", time.Since(startTime))
@ -269,7 +272,7 @@ func (r *rangeAllocator) syncNode(logger klog.Logger, key string) error {
logger.V(3).Info("node is being deleted", "node", key) logger.V(3).Info("node is being deleted", "node", key)
return r.ReleaseCIDR(logger, node) return r.ReleaseCIDR(logger, node)
} }
return r.AllocateOrOccupyCIDR(logger, node) return r.AllocateOrOccupyCIDR(ctx, node)
} }
// marks node.PodCIDRs[...] as used in allocator's tracked cidrSet // marks node.PodCIDRs[...] as used in allocator's tracked cidrSet
@ -299,7 +302,7 @@ func (r *rangeAllocator) occupyCIDRs(node *v1.Node) error {
// WARNING: If you're adding any return calls or defer any more work from this // WARNING: If you're adding any return calls or defer any more work from this
// function you have to make sure to update nodesInProcessing properly with the // function you have to make sure to update nodesInProcessing properly with the
// disposition of the node when the work is done. // disposition of the node when the work is done.
func (r *rangeAllocator) AllocateOrOccupyCIDR(logger klog.Logger, node *v1.Node) error { func (r *rangeAllocator) AllocateOrOccupyCIDR(ctx context.Context, node *v1.Node) error {
if node == nil { if node == nil {
return nil return nil
} }
@ -308,6 +311,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(logger klog.Logger, node *v1.Node)
return r.occupyCIDRs(node) return r.occupyCIDRs(node)
} }
logger := klog.FromContext(ctx)
allocatedCIDRs := make([]*net.IPNet, len(r.cidrSets)) allocatedCIDRs := make([]*net.IPNet, len(r.cidrSets))
for idx := range r.cidrSets { for idx := range r.cidrSets {
@ -321,7 +325,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(logger klog.Logger, node *v1.Node)
//queue the assignment //queue the assignment
logger.V(4).Info("Putting node with CIDR into the work queue", "node", klog.KObj(node), "CIDRs", allocatedCIDRs) logger.V(4).Info("Putting node with CIDR into the work queue", "node", klog.KObj(node), "CIDRs", allocatedCIDRs)
return r.updateCIDRsAllocation(logger, node.Name, allocatedCIDRs) return r.updateCIDRsAllocation(ctx, node.Name, allocatedCIDRs)
} }
// ReleaseCIDR marks node.podCIDRs[...] as unused in our tracked cidrSets // ReleaseCIDR marks node.podCIDRs[...] as unused in our tracked cidrSets
@ -373,9 +377,10 @@ func (r *rangeAllocator) filterOutServiceRange(logger klog.Logger, serviceCIDR *
} }
// updateCIDRsAllocation assigns CIDR to Node and sends an update to the API server. // updateCIDRsAllocation assigns CIDR to Node and sends an update to the API server.
func (r *rangeAllocator) updateCIDRsAllocation(logger klog.Logger, nodeName string, allocatedCIDRs []*net.IPNet) error { func (r *rangeAllocator) updateCIDRsAllocation(ctx context.Context, nodeName string, allocatedCIDRs []*net.IPNet) error {
var err error var err error
var node *v1.Node var node *v1.Node
logger := klog.FromContext(ctx)
cidrsString := ipnetToStringList(allocatedCIDRs) cidrsString := ipnetToStringList(allocatedCIDRs)
node, err = r.nodeLister.Get(nodeName) node, err = r.nodeLister.Get(nodeName)
if err != nil { if err != nil {
@ -413,7 +418,7 @@ func (r *rangeAllocator) updateCIDRsAllocation(logger klog.Logger, nodeName stri
// If we reached here, it means that the node has no CIDR currently assigned. So we set it. // If we reached here, it means that the node has no CIDR currently assigned. So we set it.
for i := 0; i < cidrUpdateRetries; i++ { for i := 0; i < cidrUpdateRetries; i++ {
if err = nodeutil.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil { if err = nodeutil.PatchNodeCIDRs(ctx, r.client, types.NodeName(node.Name), cidrsString); err == nil {
logger.Info("Set node PodCIDR", "node", klog.KObj(node), "podCIDRs", cidrsString) logger.Info("Set node PodCIDR", "node", klog.KObj(node), "podCIDRs", cidrsString)
return nil return nil
} }

View File

@ -509,7 +509,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
} }
// test function // test function
logger, tCtx := ktesting.NewTestContext(t) _, tCtx := ktesting.NewTestContext(t)
testFunc := func(tc testCase) { testFunc := func(tc testCase) {
fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{}) nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{})
@ -547,7 +547,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
if node.Spec.PodCIDRs == nil { if node.Spec.PodCIDRs == nil {
updateCount++ updateCount++
} }
if err := allocator.AllocateOrOccupyCIDR(logger, node); err != nil { if err := allocator.AllocateOrOccupyCIDR(tCtx, node); err != nil {
t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
} }
} }
@ -610,7 +610,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
}, },
}, },
} }
logger, tCtx := ktesting.NewTestContext(t) _, tCtx := ktesting.NewTestContext(t)
testFunc := func(tc testCase) { testFunc := func(tc testCase) {
// Initialize the range allocator. // Initialize the range allocator.
allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
@ -639,7 +639,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
} }
} }
} }
if err := allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]); err == nil { if err := allocator.AllocateOrOccupyCIDR(tCtx, tc.fakeNodeHandler.Existing[0]); err == nil {
t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err)
} }
// We don't expect any updates, so just sleep for some time // We don't expect any updates, so just sleep for some time
@ -782,7 +782,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
} }
} }
err := allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]) err := allocator.AllocateOrOccupyCIDR(tCtx, tc.fakeNodeHandler.Existing[0])
if len(tc.expectedAllocatedCIDRFirstRound) != 0 { if len(tc.expectedAllocatedCIDRFirstRound) != 0 {
if err != nil { if err != nil {
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
@ -812,7 +812,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err) t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err)
} }
} }
if err = allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]); err != nil { if err = allocator.AllocateOrOccupyCIDR(tCtx, tc.fakeNodeHandler.Existing[0]); err != nil {
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
} }
if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {

View File

@ -17,6 +17,7 @@ limitations under the License.
package kubemark package kubemark
import ( import (
"context"
"fmt" "fmt"
"time" "time"
@ -126,8 +127,8 @@ func NewHollowKubelet(
} }
// Starts this HollowKubelet and blocks. // Starts this HollowKubelet and blocks.
func (hk *HollowKubelet) Run() { func (hk *HollowKubelet) Run(ctx context.Context) {
if err := kubeletapp.RunKubelet(&options.KubeletServer{ if err := kubeletapp.RunKubelet(ctx, &options.KubeletServer{
KubeletFlags: *hk.KubeletFlags, KubeletFlags: *hk.KubeletFlags,
KubeletConfiguration: *hk.KubeletConfiguration, KubeletConfiguration: *hk.KubeletConfiguration,
}, hk.KubeletDeps, false); err != nil { }, hk.KubeletDeps, false); err != nil {

View File

@ -198,7 +198,8 @@ func TestNewNodeLease(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
tc.controller.newLeasePostProcessFunc = setNodeOwnerFunc(tc.controller.client, node.Name) logger, _ := ktesting.NewTestContext(t)
tc.controller.newLeasePostProcessFunc = setNodeOwnerFunc(logger, tc.controller.client, node.Name)
tc.controller.leaseNamespace = corev1.NamespaceNodeLease tc.controller.leaseNamespace = corev1.NamespaceNodeLease
newLease, _ := tc.controller.newLease(tc.base) newLease, _ := tc.controller.newLease(tc.base)
if newLease == tc.base { if newLease == tc.base {
@ -286,7 +287,7 @@ func TestRetryUpdateNodeLease(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t) logger, ctx := ktesting.NewTestContext(t)
cl := tc.client cl := tc.client
if tc.updateReactor != nil { if tc.updateReactor != nil {
cl.PrependReactor("update", "leases", tc.updateReactor) cl.PrependReactor("update", "leases", tc.updateReactor)
@ -302,7 +303,7 @@ func TestRetryUpdateNodeLease(t *testing.T) {
leaseNamespace: corev1.NamespaceNodeLease, leaseNamespace: corev1.NamespaceNodeLease,
leaseDurationSeconds: 10, leaseDurationSeconds: 10,
onRepeatedHeartbeatFailure: tc.onRepeatedHeartbeatFailure, onRepeatedHeartbeatFailure: tc.onRepeatedHeartbeatFailure,
newLeasePostProcessFunc: setNodeOwnerFunc(cl, node.Name), newLeasePostProcessFunc: setNodeOwnerFunc(logger, cl, node.Name),
} }
if err := c.retryUpdateLease(ctx, nil); tc.expectErr != (err != nil) { if err := c.retryUpdateLease(ctx, nil); tc.expectErr != (err != nil) {
t.Fatalf("got %v, expected %v", err != nil, tc.expectErr) t.Fatalf("got %v, expected %v", err != nil, tc.expectErr)
@ -422,7 +423,7 @@ func TestUpdateUsingLatestLease(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t) logger, ctx := ktesting.NewTestContext(t)
cl := fake.NewSimpleClientset(tc.existingObjs...) cl := fake.NewSimpleClientset(tc.existingObjs...)
if tc.updateReactor != nil { if tc.updateReactor != nil {
cl.PrependReactor("update", "leases", tc.updateReactor) cl.PrependReactor("update", "leases", tc.updateReactor)
@ -441,7 +442,7 @@ func TestUpdateUsingLatestLease(t *testing.T) {
leaseNamespace: corev1.NamespaceNodeLease, leaseNamespace: corev1.NamespaceNodeLease,
leaseDurationSeconds: 10, leaseDurationSeconds: 10,
latestLease: tc.latestLease, latestLease: tc.latestLease,
newLeasePostProcessFunc: setNodeOwnerFunc(cl, node.Name), newLeasePostProcessFunc: setNodeOwnerFunc(logger, cl, node.Name),
} }
c.sync(ctx) c.sync(ctx)
@ -461,7 +462,7 @@ func TestUpdateUsingLatestLease(t *testing.T) {
// setNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets // setNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets
// a node OwnerReference to the given lease object // a node OwnerReference to the given lease object
func setNodeOwnerFunc(c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error { func setNodeOwnerFunc(logger klog.Logger, c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error {
return func(lease *coordinationv1.Lease) error { return func(lease *coordinationv1.Lease) error {
// Setting owner reference needs node's UID. Note that it is different from // Setting owner reference needs node's UID. Note that it is different from
// kubelet.nodeRef.UID. When lease is initially created, it is possible that // kubelet.nodeRef.UID. When lease is initially created, it is possible that
@ -478,7 +479,7 @@ func setNodeOwnerFunc(c clientset.Interface, nodeName string) func(lease *coordi
}, },
} }
} else { } else {
klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", nodeName, err) logger.Error(err, "failed to get node when trying to set owner ref to the node lease", "node", nodeName)
return err return err
} }
} }

View File

@ -37,7 +37,7 @@ type nodeSpecForMergePatch struct {
} }
// PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value. // PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value.
func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) error { func PatchNodeCIDRs(ctx context.Context, c clientset.Interface, node types.NodeName, cidrs []string) error {
// set the pod cidrs list and set the old pod cidr field // set the pod cidrs list and set the old pod cidr field
patch := nodeForCIDRMergePatch{ patch := nodeForCIDRMergePatch{
Spec: nodeSpecForMergePatch{ Spec: nodeSpecForMergePatch{
@ -50,8 +50,8 @@ func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string)
if err != nil { if err != nil {
return fmt.Errorf("failed to json.Marshal CIDR: %v", err) return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
} }
klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes)) klog.FromContext(ctx).V(4).Info("cidrs patch bytes", "patchBytes", string(patchBytes))
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { if _, err := c.CoreV1().Nodes().Patch(ctx, string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("failed to patch node CIDR: %v", err) return fmt.Errorf("failed to patch node CIDR: %v", err)
} }
return nil return nil

View File

@ -21,7 +21,6 @@ import (
"net" "net"
"strings" "strings"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
@ -31,8 +30,9 @@ const (
) )
// parseNodeIP implements ParseNodeIPArgument and ParseNodeIPAnnotation // parseNodeIP implements ParseNodeIPArgument and ParseNodeIPAnnotation
func parseNodeIP(nodeIP string, allowDual, sloppy bool) ([]net.IP, error) { func parseNodeIP(nodeIP string, allowDual, sloppy bool) ([]net.IP, []string, error) {
var nodeIPs []net.IP var nodeIPs []net.IP
var invalidIPs []string
if nodeIP != "" || !sloppy { if nodeIP != "" || !sloppy {
for _, ip := range strings.Split(nodeIP, ",") { for _, ip := range strings.Split(nodeIP, ",") {
if sloppy { if sloppy {
@ -40,10 +40,9 @@ func parseNodeIP(nodeIP string, allowDual, sloppy bool) ([]net.IP, error) {
} }
parsedNodeIP := netutils.ParseIPSloppy(ip) parsedNodeIP := netutils.ParseIPSloppy(ip)
if parsedNodeIP == nil { if parsedNodeIP == nil {
if sloppy { invalidIPs = append(invalidIPs, ip)
klog.InfoS("Could not parse node IP. Ignoring", "IP", ip) if !sloppy {
} else { return nil, invalidIPs, fmt.Errorf("could not parse %q", ip)
return nil, fmt.Errorf("could not parse %q", ip)
} }
} else { } else {
nodeIPs = append(nodeIPs, parsedNodeIP) nodeIPs = append(nodeIPs, parsedNodeIP)
@ -52,20 +51,22 @@ func parseNodeIP(nodeIP string, allowDual, sloppy bool) ([]net.IP, error) {
} }
if len(nodeIPs) > 2 || (len(nodeIPs) == 2 && netutils.IsIPv6(nodeIPs[0]) == netutils.IsIPv6(nodeIPs[1])) { if len(nodeIPs) > 2 || (len(nodeIPs) == 2 && netutils.IsIPv6(nodeIPs[0]) == netutils.IsIPv6(nodeIPs[1])) {
return nil, fmt.Errorf("must contain either a single IP or a dual-stack pair of IPs") return nil, invalidIPs, fmt.Errorf("must contain either a single IP or a dual-stack pair of IPs")
} else if len(nodeIPs) == 2 && !allowDual { } else if len(nodeIPs) == 2 && !allowDual {
return nil, fmt.Errorf("dual-stack not supported in this configuration") return nil, invalidIPs, fmt.Errorf("dual-stack not supported in this configuration")
} else if len(nodeIPs) == 2 && (nodeIPs[0].IsUnspecified() || nodeIPs[1].IsUnspecified()) { } else if len(nodeIPs) == 2 && (nodeIPs[0].IsUnspecified() || nodeIPs[1].IsUnspecified()) {
return nil, fmt.Errorf("dual-stack node IP cannot include '0.0.0.0' or '::'") return nil, invalidIPs, fmt.Errorf("dual-stack node IP cannot include '0.0.0.0' or '::'")
} }
return nodeIPs, nil return nodeIPs, invalidIPs, nil
} }
// ParseNodeIPArgument parses kubelet's --node-ip argument. If nodeIP contains invalid // ParseNodeIPArgument parses kubelet's --node-ip argument.
// values, they will be logged and ignored. Dual-stack node IPs are allowed if // If nodeIP contains invalid values, they will be returned as strings.
// cloudProvider is unset or `"external"`. // This is done also when an error is returned.
func ParseNodeIPArgument(nodeIP, cloudProvider string) ([]net.IP, error) { // The caller then can decide what to do with the invalid values.
// Dual-stack node IPs are allowed if cloudProvider is unset or `"external"`.
func ParseNodeIPArgument(nodeIP, cloudProvider string) ([]net.IP, []string, error) {
var allowDualStack bool var allowDualStack bool
if cloudProvider == cloudProviderNone || cloudProvider == cloudProviderExternal { if cloudProvider == cloudProviderNone || cloudProvider == cloudProviderExternal {
allowDualStack = true allowDualStack = true
@ -77,5 +78,6 @@ func ParseNodeIPArgument(nodeIP, cloudProvider string) ([]net.IP, error) {
// which can be either a single IP address or a comma-separated pair of IP addresses. // which can be either a single IP address or a comma-separated pair of IP addresses.
// Unlike with ParseNodeIPArgument, invalid values are considered an error. // Unlike with ParseNodeIPArgument, invalid values are considered an error.
func ParseNodeIPAnnotation(nodeIP string) ([]net.IP, error) { func ParseNodeIPAnnotation(nodeIP string) ([]net.IP, error) {
return parseNodeIP(nodeIP, true, false) nodeIps, _, err := parseNodeIP(nodeIP, true, false)
return nodeIps, err
} }

View File

@ -31,6 +31,7 @@ func TestParseNodeIPArgument(t *testing.T) {
desc string desc string
in string in string
out []net.IP out []net.IP
invalids []string
err string err string
ssErr string ssErr string
}{ }{
@ -43,11 +44,13 @@ func TestParseNodeIPArgument(t *testing.T) {
desc: "just whitespace (ignored)", desc: "just whitespace (ignored)",
in: " ", in: " ",
out: nil, out: nil,
invalids: []string{""},
}, },
{ {
desc: "garbage (ignored)", desc: "garbage (ignored)",
in: "blah", in: "blah",
out: nil, out: nil,
invalids: []string{"blah"},
}, },
{ {
desc: "single IPv4", desc: "single IPv4",
@ -74,11 +77,13 @@ func TestParseNodeIPArgument(t *testing.T) {
desc: "single IPv4 invalid (ignored)", desc: "single IPv4 invalid (ignored)",
in: "1.2.3", in: "1.2.3",
out: nil, out: nil,
invalids: []string{"1.2.3"},
}, },
{ {
desc: "single IPv4 CIDR (ignored)", desc: "single IPv4 CIDR (ignored)",
in: "1.2.3.0/24", in: "1.2.3.0/24",
out: nil, out: nil,
invalids: []string{"1.2.3.0/24"},
}, },
{ {
desc: "single IPv4 unspecified", desc: "single IPv4 unspecified",
@ -93,6 +98,7 @@ func TestParseNodeIPArgument(t *testing.T) {
out: []net.IP{ out: []net.IP{
netutils.ParseIPSloppy("1.2.3.4"), netutils.ParseIPSloppy("1.2.3.4"),
}, },
invalids: []string{"not-an-IPv6-address"},
}, },
{ {
desc: "single IPv6", desc: "single IPv6",
@ -155,6 +161,7 @@ func TestParseNodeIPArgument(t *testing.T) {
netutils.ParseIPSloppy("abcd::ef01"), netutils.ParseIPSloppy("abcd::ef01"),
netutils.ParseIPSloppy("1.2.3.4"), netutils.ParseIPSloppy("1.2.3.4"),
}, },
invalids: []string{"something else"},
ssErr: "not supported in this configuration", ssErr: "not supported in this configuration",
}, },
{ {
@ -177,9 +184,10 @@ func TestParseNodeIPArgument(t *testing.T) {
for _, conf := range configurations { for _, conf := range configurations {
desc := fmt.Sprintf("%s, cloudProvider=%q", tc.desc, conf.cloudProvider) desc := fmt.Sprintf("%s, cloudProvider=%q", tc.desc, conf.cloudProvider)
t.Run(desc, func(t *testing.T) { t.Run(desc, func(t *testing.T) {
parsed, err := ParseNodeIPArgument(tc.in, conf.cloudProvider) parsed, invalidIPs, err := ParseNodeIPArgument(tc.in, conf.cloudProvider)
expectedOut := tc.out expectedOut := tc.out
expectedInvalids := tc.invalids
expectedErr := tc.err expectedErr := tc.err
if !conf.dualStackSupported { if !conf.dualStackSupported {
@ -194,6 +202,9 @@ func TestParseNodeIPArgument(t *testing.T) {
if !reflect.DeepEqual(parsed, expectedOut) { if !reflect.DeepEqual(parsed, expectedOut) {
t.Errorf("expected %#v, got %#v", expectedOut, parsed) t.Errorf("expected %#v, got %#v", expectedOut, parsed)
} }
if !reflect.DeepEqual(invalidIPs, expectedInvalids) {
t.Errorf("[invalidIps] expected %#v, got %#v", expectedInvalids, invalidIPs)
}
if err != nil { if err != nil {
if expectedErr == "" { if expectedErr == "" {
t.Errorf("unexpected error %v", err) t.Errorf("unexpected error %v", err)