From 94f580ef03fd8e2cb0d4d9c2550fd231e8406dac Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Tue, 25 Oct 2016 08:38:59 -0700 Subject: [PATCH] Revert "bootstrap: Start hostNetwork pods even if network plugin not ready" --- cluster/aws/templates/configure-vm-aws.sh | 2 ++ cluster/gce/configure-vm.sh | 2 ++ cluster/gce/gci/configure-helper.sh | 6 +++++- cluster/gce/trusty/configure-helper.sh | 2 +- cluster/saltbase/salt/kubelet/default | 1 + pkg/kubelet/kubelet.go | 8 +------- pkg/kubelet/kubelet_node_status.go | 3 +-- pkg/kubelet/runonce_test.go | 1 - pkg/kubelet/runtime.go | 15 ++++----------- 9 files changed, 17 insertions(+), 23 deletions(-) diff --git a/cluster/aws/templates/configure-vm-aws.sh b/cluster/aws/templates/configure-vm-aws.sh index 0be5203add1..6abca6ed436 100755 --- a/cluster/aws/templates/configure-vm-aws.sh +++ b/cluster/aws/templates/configure-vm-aws.sh @@ -91,6 +91,7 @@ EOF if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then cat <>/etc/salt/minion.d/grains.conf kubelet_api_servers: '${KUBELET_APISERVER}' + cbr-cidr: 10.123.45.0/29 EOF else # If the kubelet is running disconnected from a master, give it a fixed @@ -109,6 +110,7 @@ salt-node-role() { grains: roles: - kubernetes-pool + cbr-cidr: 10.123.45.0/29 cloud: aws api_servers: '${API_SERVERS}' EOF diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 558a019d84c..44a56f2ab78 100755 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -949,6 +949,7 @@ EOF if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then cat <>/etc/salt/minion.d/grains.conf kubelet_api_servers: '${KUBELET_APISERVER}' + cbr-cidr: 10.123.45.0/29 EOF else # If the kubelet is running disconnected from a master, give it a fixed @@ -967,6 +968,7 @@ function salt-node-role() { grains: roles: - kubernetes-pool + cbr-cidr: 10.123.45.0/29 cloud: gce api_servers: '${KUBERNETES_MASTER_NAME}' EOF diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index b4d3e0ce84d..32de3d840a4 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -486,8 +486,12 @@ function start-kubelet { if [[ ! -z "${KUBELET_APISERVER:-}" && ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then flags+=" --api-servers=https://${KUBELET_APISERVER}" flags+=" --register-schedulable=false" + # need at least a /29 pod cidr for now due to #32844 + # TODO: determine if we still allow non-hostnetwork pods to run on master, clean up master pod setup + # WARNING: potential ip range collision with 10.123.45.0/29 + flags+=" --pod-cidr=10.123.45.0/29" + reconcile_cidr="false" else - # Standalone mode (not widely used?) flags+=" --pod-cidr=${MASTER_IP_RANGE}" fi else # For nodes diff --git a/cluster/gce/trusty/configure-helper.sh b/cluster/gce/trusty/configure-helper.sh index 18b74f9617f..02056a1ab5f 100644 --- a/cluster/gce/trusty/configure-helper.sh +++ b/cluster/gce/trusty/configure-helper.sh @@ -155,7 +155,7 @@ assemble_kubelet_flags() { if [ ! -z "${KUBELET_APISERVER:-}" ] && \ [ ! -z "${KUBELET_CERT:-}" ] && \ [ ! -z "${KUBELET_KEY:-}" ]; then - KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --api-servers=https://${KUBELET_APISERVER} --register-schedulable=false" + KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --api-servers=https://${KUBELET_APISERVER} --register-schedulable=false --reconcile-cidr=false --pod-cidr=10.123.45.0/29" else KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --pod-cidr=${MASTER_IP_RANGE}" fi diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index 6ea1972efca..e836f0c3fcf 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -35,6 +35,7 @@ {% if grains.kubelet_api_servers is defined -%} {% set api_servers_with_port = "--api-servers=https://" + grains.kubelet_api_servers -%} {% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false" -%} + {% set reconcile_cidr_args = "--reconcile-cidr=false" -%} {% else -%} {% set api_servers_with_port = "" -%} {% endif -%} diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 82369ae8693..e6269f04c42 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1496,12 +1496,6 @@ func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) { // can be admitted, a brief single-word reason and a message explaining why // the pod cannot be admitted. func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, string) { - if rs := kl.runtimeState.networkErrors(); len(rs) != 0 { - if !podUsesHostNetwork(pod) { - return false, "NetworkNotReady", fmt.Sprintf("Network is not ready: %v", rs) - } - } - // the kubelet will invoke each pod admit handler in sequence // if any handler rejects, the pod is rejected. // TODO: move out of disk check into a pod admitter @@ -1538,7 +1532,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand defer housekeepingTicker.Stop() plegCh := kl.pleg.Watch() for { - if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 { + if rs := kl.runtimeState.errors(); len(rs) != 0 { glog.Infof("skipping pod synchronization - %v", rs) time.Sleep(5 * time.Second) continue diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index dbcf8f0c721..a8bc9ee18a9 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -568,8 +568,7 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) { // ref: https://github.com/kubernetes/kubernetes/issues/16961 currentTime := unversioned.NewTime(kl.clock.Now()) var newNodeReadyCondition api.NodeCondition - rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...) - if len(rs) == 0 { + if rs := kl.runtimeState.errors(); len(rs) == 0 { newNodeReadyCondition = api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionTrue, diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index 6d7d2664494..6e595ad9c72 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -83,7 +83,6 @@ func TestRunOnce(t *testing.T) { kubeClient: &fake.Clientset{}, hostname: testKubeletHostname, nodeName: testKubeletHostname, - runtimeState: newRuntimeState(time.Second), } kb.containerManager = cm.NewStubContainerManager() diff --git a/pkg/kubelet/runtime.go b/pkg/kubelet/runtime.go index 6cb74fe364c..90a83898a31 100644 --- a/pkg/kubelet/runtime.go +++ b/pkg/kubelet/runtime.go @@ -68,13 +68,16 @@ func (s *runtimeState) setInitError(err error) { s.initError = err } -func (s *runtimeState) runtimeErrors() []string { +func (s *runtimeState) errors() []string { s.RLock() defer s.RUnlock() var ret []string if s.initError != nil { ret = append(ret, s.initError.Error()) } + if s.networkError != nil { + ret = append(ret, s.networkError.Error()) + } if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) { ret = append(ret, "container runtime is down") } @@ -84,16 +87,6 @@ func (s *runtimeState) runtimeErrors() []string { return ret } -func (s *runtimeState) networkErrors() []string { - s.RLock() - defer s.RUnlock() - var ret []string - if s.networkError != nil { - ret = append(ret, s.networkError.Error()) - } - return ret -} - func newRuntimeState( runtimeSyncThreshold time.Duration, ) *runtimeState {