mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Remove restriction that cluster-cidr be a class-b
This commit is contained in:
parent
7ce75689a0
commit
fbd125e4e2
@ -37,7 +37,7 @@ MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
MASTER_TAG="${INSTANCE_PREFIX}-master"
|
||||
MINION_TAG="${INSTANCE_PREFIX}-minion"
|
||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||
CLUSTER_IP_RANGE="${KUBE_GCE_CLUSTER_CLASS_B:-10.244}.0.0/16"
|
||||
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
|
||||
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring" "https://www.googleapis.com/auth/logging.write")
|
||||
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
|
||||
POLL_SLEEP_INTERVAL=3
|
||||
|
@ -36,7 +36,7 @@ INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}"
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
MASTER_TAG="${INSTANCE_PREFIX}-master"
|
||||
MINION_TAG="${INSTANCE_PREFIX}-minion"
|
||||
CLUSTER_IP_RANGE="${KUBE_GCE_CLUSTER_CLASS_B:-10.245}.0.0/16"
|
||||
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
|
||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring")
|
||||
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
|
||||
|
@ -236,7 +236,7 @@ function create-salt-pillar() {
|
||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
cluster_class_b: '$(echo "$KUBE_GCE_CLUSTER_CLASS_B" | sed -e "s/'/''/g")'
|
||||
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
|
||||
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
|
||||
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
|
||||
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
||||
|
@ -28,7 +28,7 @@ function build-kube-env {
|
||||
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
|
||||
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
|
||||
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
|
||||
KUBE_GCE_CLUSTER_CLASS_B: $(yaml-quote ${KUBE_GCE_CLUSTER_CLASS_B:-10.244})
|
||||
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
|
||||
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
|
||||
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
|
||||
PORTAL_NET: $(yaml-quote ${PORTAL_NET})
|
||||
|
@ -26,7 +26,7 @@ function build-kube-env {
|
||||
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
|
||||
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
|
||||
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
|
||||
KUBE_GCE_CLUSTER_CLASS_B: $(yaml-quote ${KUBE_GCE_CLUSTER_CLASS_B:-10.244})
|
||||
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
|
||||
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
|
||||
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
|
||||
PORTAL_NET: $(yaml-quote ${PORTAL_NET})
|
||||
|
@ -1,6 +1,6 @@
|
||||
{% set machines = ""-%}
|
||||
{% set cluster_name = "" -%}
|
||||
{% set cluster_class_b = "" -%}
|
||||
{% set cluster_cidr = "" -%}
|
||||
{% set allocate_node_cidrs = "" -%}
|
||||
{% set minion_regexp = "--minion_regexp=.*" -%}
|
||||
{% set sync_nodes = "--sync_nodes=true" -%}
|
||||
@ -11,8 +11,8 @@
|
||||
{% if pillar['instance_prefix'] is defined -%}
|
||||
{% set cluster_name = "--cluster_name=" + pillar['instance_prefix'] -%}
|
||||
{% endif -%}
|
||||
{% if pillar['cluster_class_b'] is defined -%}
|
||||
{% set cluster_class_b = "--cluster-class-b=" + pillar['cluster_class_b'] -%}
|
||||
{% if pillar['cluster_cidr'] is defined -%}
|
||||
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
|
||||
{% endif -%}
|
||||
{% if pillar['allocate_node_cidrs'] is defined -%}
|
||||
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
|
||||
@ -55,7 +55,7 @@
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = "--master=127.0.0.1:8080" + " " + machines + " " + cluster_name + " " + cluster_class_b + " " + allocate_node_cidrs + " " + minion_regexp + " " + cloud_provider + " " + sync_nodes + " " + cloud_config + " " + pillar['log_level'] -%}
|
||||
{% set params = "--master=127.0.0.1:8080" + " " + machines + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + minion_regexp + " " + cloud_provider + " " + sync_nodes + " " + cloud_config + " " + pillar['log_level'] -%}
|
||||
|
||||
{
|
||||
"apiVersion": "v1beta3",
|
||||
|
@ -225,7 +225,7 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
}}
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(), 40*time.Second, 60*time.Second, 5*time.Second, "", "", false)
|
||||
nodeController := nodecontroller.NewNodeController(nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(), 40*time.Second, 60*time.Second, 5*time.Second, "", nil, false)
|
||||
nodeController.Run(5*time.Second, true)
|
||||
cadvisorInterface := new(cadvisor.Fake)
|
||||
|
||||
|
@ -79,7 +79,7 @@ type CMServer struct {
|
||||
NodeMemory resource.Quantity
|
||||
|
||||
ClusterName string
|
||||
ClusterClassB string
|
||||
ClusterCIDR util.IPNet
|
||||
AllocateNodeCIDRs bool
|
||||
EnableProfiling bool
|
||||
|
||||
@ -147,7 +147,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Var(resource.NewQuantityFlagValue(&s.NodeMemory), "node-memory", "The amount of memory (in bytes) provisioned on each node")
|
||||
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
|
||||
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
|
||||
fs.StringVar(&s.ClusterClassB, "cluster-class-b", "10.244", "Class B network address for Pods in cluster.")
|
||||
fs.Var(&s.ClusterCIDR, "cluster-cidr", "CIDR Range for Pods in cluster.")
|
||||
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
|
||||
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
|
||||
@ -230,7 +230,7 @@ func (s *CMServer) Run(_ []string) error {
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(cloud, s.MinionRegexp, s.MachineList, nodeResources,
|
||||
kubeClient, s.RegisterRetryCount, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, s.ClusterName, s.ClusterClassB, s.AllocateNodeCIDRs)
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList)
|
||||
|
||||
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
|
||||
|
@ -132,7 +132,7 @@ func runControllerManager(machineList []string, cl *client.Client, nodeMilliCPU,
|
||||
|
||||
const nodeSyncPeriod = 10 * time.Second
|
||||
nodeController := nodecontroller.NewNodeController(
|
||||
nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst), 40*time.Second, 60*time.Second, 5*time.Second, "", "", false)
|
||||
nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst), 40*time.Second, 60*time.Second, 5*time.Second, "", nil, false)
|
||||
nodeController.Run(nodeSyncPeriod, true)
|
||||
|
||||
serviceController := servicecontroller.New(nil, cl, "kubernetes")
|
||||
|
@ -34,7 +34,7 @@ function down-clusters {
|
||||
function up-clusters {
|
||||
for count in $(seq 1 ${clusters}); do
|
||||
export KUBE_GCE_INSTANCE_PREFIX=e2e-test-${USER}-${count}
|
||||
export KUBE_GCE_CLUSTER_CLASS_B="10.$((${count}*2-1))"
|
||||
export CLUSTER_IP_RANGE="10.$((${count}*2-1)).0.0/16"
|
||||
export MASTER_IP_RANGE="10.$((${count}*2)).0.0/24"
|
||||
|
||||
local cluster_dir=${KUBE_ROOT}/_output/e2e/${KUBE_GCE_INSTANCE_PREFIX}
|
||||
|
@ -89,7 +89,7 @@ type NodeController struct {
|
||||
// TODO: Change node status monitor to watch based.
|
||||
nodeMonitorPeriod time.Duration
|
||||
clusterName string
|
||||
clusterClassB string
|
||||
clusterCIDR *net.IPNet
|
||||
allocateNodeCIDRs bool
|
||||
// Method for easy mocking in unittest.
|
||||
lookupIP func(host string) ([]net.IP, error)
|
||||
@ -110,7 +110,7 @@ func NewNodeController(
|
||||
nodeStartupGracePeriod time.Duration,
|
||||
nodeMonitorPeriod time.Duration,
|
||||
clusterName string,
|
||||
clusterClassB string,
|
||||
clusterCIDR *net.IPNet,
|
||||
allocateNodeCIDRs bool) *NodeController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||
@ -120,6 +120,9 @@ func NewNodeController(
|
||||
} else {
|
||||
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
if allocateNodeCIDRs && clusterCIDR == nil {
|
||||
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
|
||||
}
|
||||
return &NodeController{
|
||||
cloud: cloud,
|
||||
matchRE: matchRE,
|
||||
@ -137,7 +140,7 @@ func NewNodeController(
|
||||
lookupIP: net.LookupIP,
|
||||
now: util.Now,
|
||||
clusterName: clusterName,
|
||||
clusterClassB: clusterClassB,
|
||||
clusterCIDR: clusterCIDR,
|
||||
allocateNodeCIDRs: allocateNodeCIDRs,
|
||||
}
|
||||
}
|
||||
@ -145,9 +148,12 @@ func NewNodeController(
|
||||
// Generates num pod CIDRs that could be assigned to nodes.
|
||||
func (nc *NodeController) generateCIDRs(num int) util.StringSet {
|
||||
res := util.NewStringSet()
|
||||
cidrIP := nc.clusterCIDR.IP.To4()
|
||||
for i := 0; i < num; i++ {
|
||||
// TODO: Make the CIDRs configurable.
|
||||
res.Insert(fmt.Sprintf("%v.%v.0/24", nc.clusterClassB, i))
|
||||
b1 := byte(i >> 8)
|
||||
b2 := byte(i % 256)
|
||||
res.Insert(fmt.Sprintf("%d.%d.%d.0/24", cidrIP[0], cidrIP[1]+b1, cidrIP[2]+b2))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ func TestRegisterNodes(t *testing.T) {
|
||||
nodes.Items = append(nodes.Items, *newNode(machine))
|
||||
}
|
||||
nodeController := NewNodeController(nil, "", item.machines, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
|
||||
err := nodeController.registerNodes(&nodes, item.retryCount, time.Millisecond)
|
||||
if !item.expectedFail && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -332,7 +332,7 @@ func TestCreateGetStaticNodesWithSpec(t *testing.T) {
|
||||
}
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, "", item.machines, &resources, nil, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
|
||||
nodes, err := nodeController.getStaticNodesWithSpec()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -394,7 +394,7 @@ func TestCreateGetCloudNodesWithSpec(t *testing.T) {
|
||||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(item.fakeCloud, ".*", nil, &api.NodeResources{}, nil, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
|
||||
nodes, err := nodeController.getCloudNodesWithSpec()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -504,7 +504,7 @@ func TestSyncCloudNodes(t *testing.T) {
|
||||
item.fakeNodeHandler.Fake = testclient.NewSimpleFake()
|
||||
}
|
||||
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
|
||||
if err := nodeController.syncCloudNodes(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -588,7 +588,7 @@ func TestSyncCloudNodesEvictPods(t *testing.T) {
|
||||
item.fakeNodeHandler.Fake = testclient.NewSimpleFake()
|
||||
}
|
||||
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
|
||||
if err := nodeController.syncCloudNodes(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -628,7 +628,7 @@ func TestPopulateNodeAddresses(t *testing.T) {
|
||||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, nil, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
|
||||
result, err := nodeController.populateAddresses(item.nodes)
|
||||
// In case of IP querying error, we should continue.
|
||||
if err != nil {
|
||||
@ -828,7 +828,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, 10,
|
||||
evictionTimeout, util.NewFakeRateLimiter(), testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
|
||||
nodeController.now = func() util.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -1030,7 +1030,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, 10, 5*time.Minute, util.NewFakeRateLimiter(),
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", "", false)
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, "", nil, false)
|
||||
nodeController.now = func() util.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
|
Loading…
Reference in New Issue
Block a user