mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 13:02:14 +00:00
nodeipam: poll nodes immediately
This commit is contained in:
parent
f4e246bc93
commit
5e94ffe90b
@ -75,11 +75,10 @@ const (
|
||||
|
||||
// updateMaxRetries is the max retries for a failed node
|
||||
updateMaxRetries = 10
|
||||
)
|
||||
|
||||
// nodePollInterval is used in listing node
|
||||
// This is a variable instead of a const to enable testing.
|
||||
var nodePollInterval = 10 * time.Second
|
||||
// nodePollInterval is used in listing node
|
||||
nodePollInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
// CIDRAllocator is an interface implemented by things that know how
|
||||
// to allocate/occupy/recycle CIDR for nodes.
|
||||
@ -116,8 +115,7 @@ type nodeReservedCIDRs struct {
|
||||
|
||||
// New creates a new CIDR range allocator.
|
||||
func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
nodeList, err := listNodes(logger, kubeClient)
|
||||
nodeList, err := listNodes(ctx, kubeClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -132,13 +130,15 @@ func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovide
|
||||
}
|
||||
}
|
||||
|
||||
func listNodes(logger klog.Logger, kubeClient clientset.Interface) (*v1.NodeList, error) {
|
||||
func listNodes(ctx context.Context, kubeClient clientset.Interface) (*v1.NodeList, error) {
|
||||
var nodeList *v1.NodeList
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
// We must poll because apiserver might not be up. This error causes
|
||||
// controller manager to restart.
|
||||
if pollErr := wait.Poll(nodePollInterval, apiserverStartupGracePeriod, func() (bool, error) {
|
||||
if pollErr := wait.PollUntilContextTimeout(ctx, nodePollInterval, apiserverStartupGracePeriod, true, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
nodeList, err = kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
|
||||
nodeList, err = kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{
|
||||
FieldSelector: fields.Everything().String(),
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
|
@ -118,7 +118,8 @@ func NewController(
|
||||
func (c *Controller) Start(logger klog.Logger, nodeInformer informers.NodeInformer) error {
|
||||
logger.Info("Starting IPAM controller", "config", c.config)
|
||||
|
||||
nodes, err := listNodes(logger, c.adapter.k8s)
|
||||
ctx := klog.NewContext(context.TODO(), logger)
|
||||
nodes, err := listNodes(ctx, c.adapter.k8s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -292,13 +292,6 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
// Non-parallel test (overrides global var)
|
||||
oldNodePollInterval := nodePollInterval
|
||||
nodePollInterval = test.NodePollInterval
|
||||
defer func() {
|
||||
nodePollInterval = oldNodePollInterval
|
||||
}()
|
||||
|
||||
// all tests operate on a single node
|
||||
testCases := []testCase{
|
||||
{
|
||||
@ -680,13 +673,6 @@ type releaseTestCase struct {
|
||||
}
|
||||
|
||||
func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
// Non-parallel test (overrides global var)
|
||||
oldNodePollInterval := nodePollInterval
|
||||
nodePollInterval = test.NodePollInterval
|
||||
defer func() {
|
||||
nodePollInterval = oldNodePollInterval
|
||||
}()
|
||||
|
||||
testCases := []releaseTestCase{
|
||||
{
|
||||
description: "Correctly release preallocated CIDR",
|
||||
|
Loading…
Reference in New Issue
Block a user