diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index a6a77237818..6a0406ec7f7 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -37,7 +37,7 @@ MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" MINION_TAG="${INSTANCE_PREFIX}-minion" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" -CLUSTER_IP_RANGE="${KUBE_GCE_CLUSTER_CLASS_B:-10.244}.0.0/16" +CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring" "https://www.googleapis.com/auth/logging.write") # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 1b1c614115f..2c90f796fbd 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -36,7 +36,7 @@ INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" MINION_TAG="${INSTANCE_PREFIX}-minion" -CLUSTER_IP_RANGE="${KUBE_GCE_CLUSTER_CLASS_B:-10.245}.0.0/16" +CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring") # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 1e9cfffed9c..3d8b3ae2183 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -236,7 +236,7 @@ function create-salt-pillar() { cat </srv/salt-overlay/pillar/cluster-params.sls instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' -cluster_class_b: '$(echo "$KUBE_GCE_CLUSTER_CLASS_B" | sed -e "s/'/''/g")' +cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")' portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' diff --git a/cluster/gce/coreos/helper.sh b/cluster/gce/coreos/helper.sh index 6db9beb28ab..0d4fd4d9c3b 100644 --- a/cluster/gce/coreos/helper.sh +++ b/cluster/gce/coreos/helper.sh @@ -28,7 +28,7 @@ function build-kube-env { ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z)) INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX}) NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX}) -KUBE_GCE_CLUSTER_CLASS_B: $(yaml-quote ${KUBE_GCE_CLUSTER_CLASS_B:-10.244}) +CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16}) SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL}) SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL}) PORTAL_NET: $(yaml-quote ${PORTAL_NET}) diff --git a/cluster/gce/debian/helper.sh b/cluster/gce/debian/helper.sh index 8551d821e52..c38f18b6064 100644 --- a/cluster/gce/debian/helper.sh +++ b/cluster/gce/debian/helper.sh @@ -26,7 +26,7 @@ function build-kube-env { ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z)) INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX}) NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX}) -KUBE_GCE_CLUSTER_CLASS_B: $(yaml-quote ${KUBE_GCE_CLUSTER_CLASS_B:-10.244}) +CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16}) SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL}) SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL}) PORTAL_NET: $(yaml-quote ${PORTAL_NET}) diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest index cb2f4c02d6a..1c41d06a1eb 100644 --- a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest +++ b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest @@ -1,6 +1,6 @@ {% set machines = ""-%} {% set cluster_name = "" -%} -{% set cluster_class_b = "" -%} +{% set cluster_cidr = "" -%} {% set allocate_node_cidrs = "" -%} {% set minion_regexp = "--minion_regexp=.*" -%} {% set sync_nodes = "--sync_nodes=true" -%} @@ -11,8 +11,8 @@ {% if pillar['instance_prefix'] is defined -%} {% set cluster_name = "--cluster_name=" + pillar['instance_prefix'] -%} {% endif -%} -{% if pillar['cluster_class_b'] is defined -%} - {% set cluster_class_b = "--cluster-class-b=" + pillar['cluster_class_b'] -%} +{% if pillar['cluster_cidr'] is defined -%} + {% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%} {% endif -%} {% if pillar['allocate_node_cidrs'] is defined -%} {% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%} @@ -55,7 +55,7 @@ {% endif -%} {% endif -%} -{% set params = "--master=127.0.0.1:8080" + " " + machines + " " + cluster_name + " " + cluster_class_b + " " + allocate_node_cidrs + " " + minion_regexp + " " + cloud_provider + " " + sync_nodes + " " + cloud_config + " " + pillar['log_level'] -%} +{% set params = "--master=127.0.0.1:8080" + " " + machines + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + minion_regexp + " " + cloud_provider + " " + sync_nodes + " " + cloud_config + " " + pillar['log_level'] -%} { "apiVersion": "v1beta3", diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go index 9749a1f688e..4b67b3688d9 100644 --- a/cmd/integration/integration.go +++ b/cmd/integration/integration.go @@ -225,7 +225,7 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }} - nodeController := nodecontroller.NewNodeController(nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(), 40*time.Second, 60*time.Second, 5*time.Second, "", "", false) + nodeController := nodecontroller.NewNodeController(nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(), 40*time.Second, 60*time.Second, 5*time.Second, "", nil, false) nodeController.Run(5*time.Second, true) cadvisorInterface := new(cadvisor.Fake) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 74f2078fcbf..ac8fcef9b48 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -79,7 +79,7 @@ type CMServer struct { NodeMemory resource.Quantity ClusterName string - ClusterClassB string + ClusterCIDR util.IPNet AllocateNodeCIDRs bool EnableProfiling bool @@ -147,7 +147,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) { fs.Var(resource.NewQuantityFlagValue(&s.NodeMemory), "node-memory", "The amount of memory (in bytes) provisioned on each node") fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster") fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") - fs.StringVar(&s.ClusterClassB, "cluster-class-b", "10.244", "Class B network address for Pods in cluster.") + fs.Var(&s.ClusterCIDR, "cluster-cidr", "CIDR Range for Pods in cluster.") fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.") fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") @@ -230,7 +230,7 @@ func (s *CMServer) Run(_ []string) error { nodeController := nodecontroller.NewNodeController(cloud, s.MinionRegexp, s.MachineList, nodeResources, kubeClient, s.RegisterRetryCount, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), - s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, s.ClusterName, s.ClusterClassB, s.AllocateNodeCIDRs) + s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList) serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName) diff --git a/cmd/kubernetes/kubernetes.go b/cmd/kubernetes/kubernetes.go index 559460473a9..87c7cdf4ebf 100644 --- a/cmd/kubernetes/kubernetes.go +++ b/cmd/kubernetes/kubernetes.go @@ -132,7 +132,7 @@ func runControllerManager(machineList []string, cl *client.Client, nodeMilliCPU, const nodeSyncPeriod = 10 * time.Second nodeController := nodecontroller.NewNodeController( - nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst), 40*time.Second, 60*time.Second, 5*time.Second, "", "", false) + nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst), 40*time.Second, 60*time.Second, 5*time.Second, "", nil, false) nodeController.Run(nodeSyncPeriod, true) serviceController := servicecontroller.New(nil, cl, "kubernetes") diff --git a/hack/parallel-e2e.sh b/hack/parallel-e2e.sh index fa0bccb25f0..ea0084e5135 100755 --- a/hack/parallel-e2e.sh +++ b/hack/parallel-e2e.sh @@ -34,7 +34,7 @@ function down-clusters { function up-clusters { for count in $(seq 1 ${clusters}); do export KUBE_GCE_INSTANCE_PREFIX=e2e-test-${USER}-${count} - export KUBE_GCE_CLUSTER_CLASS_B="10.$((${count}*2-1))" + export CLUSTER_IP_RANGE="10.$((${count}*2-1)).0.0/16" export MASTER_IP_RANGE="10.$((${count}*2)).0.0/24" local cluster_dir=${KUBE_ROOT}/_output/e2e/${KUBE_GCE_INSTANCE_PREFIX} diff --git a/pkg/cloudprovider/nodecontroller/nodecontroller.go b/pkg/cloudprovider/nodecontroller/nodecontroller.go index 1b7f444e64d..81cc159f7a2 100644 --- a/pkg/cloudprovider/nodecontroller/nodecontroller.go +++ b/pkg/cloudprovider/nodecontroller/nodecontroller.go @@ -89,7 +89,7 @@ type NodeController struct { // TODO: Change node status monitor to watch based. nodeMonitorPeriod time.Duration clusterName string - clusterClassB string + clusterCIDR *net.IPNet allocateNodeCIDRs bool // Method for easy mocking in unittest. lookupIP func(host string) ([]net.IP, error) @@ -110,7 +110,7 @@ func NewNodeController( nodeStartupGracePeriod time.Duration, nodeMonitorPeriod time.Duration, clusterName string, - clusterClassB string, + clusterCIDR *net.IPNet, allocateNodeCIDRs bool) *NodeController { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"}) @@ -120,6 +120,9 @@ func NewNodeController( } else { glog.Infof("No api server defined - no events will be sent to API server.") } + if allocateNodeCIDRs && clusterCIDR == nil { + glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.") + } return &NodeController{ cloud: cloud, matchRE: matchRE, @@ -137,7 +140,7 @@ func NewNodeController( lookupIP: net.LookupIP, now: util.Now, clusterName: clusterName, - clusterClassB: clusterClassB, + clusterCIDR: clusterCIDR, allocateNodeCIDRs: allocateNodeCIDRs, } } @@ -145,9 +148,12 @@ func NewNodeController( // Generates num pod CIDRs that could be assigned to nodes. func (nc *NodeController) generateCIDRs(num int) util.StringSet { res := util.NewStringSet() + cidrIP := nc.clusterCIDR.IP.To4() for i := 0; i < num; i++ { // TODO: Make the CIDRs configurable. - res.Insert(fmt.Sprintf("%v.%v.0/24", nc.clusterClassB, i)) + b1 := byte(i >> 8) + b2 := byte(i % 256) + res.Insert(fmt.Sprintf("%d.%d.%d.0/24", cidrIP[0], cidrIP[1]+b1, cidrIP[2]+b2)) } return res } diff --git a/pkg/cloudprovider/nodecontroller/nodecontroller_test.go b/pkg/cloudprovider/nodecontroller/nodecontroller_test.go index 3995c5f73f5..14ecc9d53dc 100644 --- a/pkg/cloudprovider/nodecontroller/nodecontroller_test.go +++ b/pkg/cloudprovider/nodecontroller/nodecontroller_test.go @@ -246,7 +246,7 @@ func TestRegisterNodes(t *testing.T) { nodes.Items = append(nodes.Items, *newNode(machine)) } nodeController := NewNodeController(nil, "", item.machines, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute, - util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false) + util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false) err := nodeController.registerNodes(&nodes, item.retryCount, time.Millisecond) if !item.expectedFail && err != nil { t.Errorf("unexpected error: %v", err) @@ -332,7 +332,7 @@ func TestCreateGetStaticNodesWithSpec(t *testing.T) { } for _, item := range table { nodeController := NewNodeController(nil, "", item.machines, &resources, nil, 10, time.Minute, - util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false) + util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false) nodes, err := nodeController.getStaticNodesWithSpec() if err != nil { t.Errorf("unexpected error: %v", err) @@ -394,7 +394,7 @@ func TestCreateGetCloudNodesWithSpec(t *testing.T) { for _, item := range table { nodeController := NewNodeController(item.fakeCloud, ".*", nil, &api.NodeResources{}, nil, 10, time.Minute, - util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false) + util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false) nodes, err := nodeController.getCloudNodesWithSpec() if err != nil { t.Errorf("unexpected error: %v", err) @@ -504,7 +504,7 @@ func TestSyncCloudNodes(t *testing.T) { item.fakeNodeHandler.Fake = testclient.NewSimpleFake() } nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute, - util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false) + util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false) if err := nodeController.syncCloudNodes(); err != nil { t.Errorf("unexpected error: %v", err) } @@ -588,7 +588,7 @@ func TestSyncCloudNodesEvictPods(t *testing.T) { item.fakeNodeHandler.Fake = testclient.NewSimpleFake() } nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute, - util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false) + util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false) if err := nodeController.syncCloudNodes(); err != nil { t.Errorf("unexpected error: %v", err) } @@ -628,7 +628,7 @@ func TestPopulateNodeAddresses(t *testing.T) { for _, item := range table { nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, nil, 10, time.Minute, - util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false) + util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false) result, err := nodeController.populateAddresses(item.nodes) // In case of IP querying error, we should continue. if err != nil { @@ -828,7 +828,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { for _, item := range table { nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, 10, evictionTimeout, util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, - testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false) + testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false) nodeController.now = func() util.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) @@ -1030,7 +1030,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) { for _, item := range table { nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, 10, 5*time.Minute, util.NewFakeRateLimiter(), - testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false) + testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false) nodeController.now = func() util.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err)