Extract config common across CIDR allocators

This commit is contained in:
Shyam Jeedigunta 2017-09-11 14:27:29 +02:00
parent db809c0eb7
commit 9be91e42c7
2 changed files with 14 additions and 11 deletions

View File

@ -54,9 +54,21 @@ const (
// IPAMFromCloudAllocatorType uses the ipam controller sync'ing the node // IPAMFromCloudAllocatorType uses the ipam controller sync'ing the node
// CIDR range allocations from the cloud to the cluster. // CIDR range allocations from the cloud to the cluster.
IPAMFromCloudAllocatorType = "IPAMFromCloud" IPAMFromCloudAllocatorType = "IPAMFromCloud"
)
// TODO: figure out the good setting for those constants.
const (
// The amount of time the nodecontroller polls on the list nodes endpoint. // The amount of time the nodecontroller polls on the list nodes endpoint.
apiserverStartupGracePeriod = 10 * time.Minute apiserverStartupGracePeriod = 10 * time.Minute
// The no. of NodeSpec updates NC can process concurrently.
cidrUpdateWorkers = 10
// The max no. of NodeSpec updates that can be enqueued.
cidrUpdateQueueSize = 5000
// cidrUpdateRetries is the no. of times a NodeSpec update will be retried before dropping it.
cidrUpdateRetries = 10
) )
// CIDRAllocator is an interface implemented by things that know how // CIDRAllocator is an interface implemented by things that know how

View File

@ -39,15 +39,6 @@ import (
"k8s.io/kubernetes/pkg/controller/node/util" "k8s.io/kubernetes/pkg/controller/node/util"
) )
// TODO: figure out the good setting for those constants.
const (
// controls how many NodeSpec updates NC can process concurrently.
cidrUpdateWorkers = 10
cidrUpdateQueueSize = 5000
// podCIDRUpdateRetry controls the number of retries of writing Node.Spec.PodCIDR update.
podCIDRUpdateRetry = 5
)
type rangeAllocator struct { type rangeAllocator struct {
client clientset.Interface client clientset.Interface
cidrs *cidrset.CidrSet cidrs *cidrset.CidrSet
@ -227,7 +218,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
var err error var err error
var node *v1.Node var node *v1.Node
defer r.removeNodeFromProcessing(data.nodeName) defer r.removeNodeFromProcessing(data.nodeName)
for rep := 0; rep < podCIDRUpdateRetry; rep++ { for rep := 0; rep < cidrUpdateRetries; rep++ {
// TODO: change it to using PATCH instead of full Node updates. // TODO: change it to using PATCH instead of full Node updates.
node, err = r.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{}) node, err = r.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
@ -247,7 +238,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
} }
node.Spec.PodCIDR = data.cidr.String() node.Spec.PodCIDR = data.cidr.String()
if _, err := r.client.Core().Nodes().Update(node); err != nil { if _, err := r.client.Core().Nodes().Update(node); err != nil {
glog.Errorf("Failed while updating Node.Spec.PodCIDR (%d retries left): %v", podCIDRUpdateRetry-rep-1, err) glog.Errorf("Failed while updating Node.Spec.PodCIDR (%d retries left): %v", cidrUpdateRetries-rep-1, err)
} else { } else {
break break
} }