mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Revert "bootstrap: Start hostNetwork pods even if network plugin not ready"
This commit is contained in:
parent
0dc983a4c0
commit
94f580ef03
@ -91,6 +91,7 @@ EOF
|
|||||||
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
||||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||||
kubelet_api_servers: '${KUBELET_APISERVER}'
|
kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||||
|
cbr-cidr: 10.123.45.0/29
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
# If the kubelet is running disconnected from a master, give it a fixed
|
# If the kubelet is running disconnected from a master, give it a fixed
|
||||||
@ -109,6 +110,7 @@ salt-node-role() {
|
|||||||
grains:
|
grains:
|
||||||
roles:
|
roles:
|
||||||
- kubernetes-pool
|
- kubernetes-pool
|
||||||
|
cbr-cidr: 10.123.45.0/29
|
||||||
cloud: aws
|
cloud: aws
|
||||||
api_servers: '${API_SERVERS}'
|
api_servers: '${API_SERVERS}'
|
||||||
EOF
|
EOF
|
||||||
|
@ -949,6 +949,7 @@ EOF
|
|||||||
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
||||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||||
kubelet_api_servers: '${KUBELET_APISERVER}'
|
kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||||
|
cbr-cidr: 10.123.45.0/29
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
# If the kubelet is running disconnected from a master, give it a fixed
|
# If the kubelet is running disconnected from a master, give it a fixed
|
||||||
@ -967,6 +968,7 @@ function salt-node-role() {
|
|||||||
grains:
|
grains:
|
||||||
roles:
|
roles:
|
||||||
- kubernetes-pool
|
- kubernetes-pool
|
||||||
|
cbr-cidr: 10.123.45.0/29
|
||||||
cloud: gce
|
cloud: gce
|
||||||
api_servers: '${KUBERNETES_MASTER_NAME}'
|
api_servers: '${KUBERNETES_MASTER_NAME}'
|
||||||
EOF
|
EOF
|
||||||
|
@ -486,8 +486,12 @@ function start-kubelet {
|
|||||||
if [[ ! -z "${KUBELET_APISERVER:-}" && ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
|
if [[ ! -z "${KUBELET_APISERVER:-}" && ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
|
||||||
flags+=" --api-servers=https://${KUBELET_APISERVER}"
|
flags+=" --api-servers=https://${KUBELET_APISERVER}"
|
||||||
flags+=" --register-schedulable=false"
|
flags+=" --register-schedulable=false"
|
||||||
|
# need at least a /29 pod cidr for now due to #32844
|
||||||
|
# TODO: determine if we still allow non-hostnetwork pods to run on master, clean up master pod setup
|
||||||
|
# WARNING: potential ip range collision with 10.123.45.0/29
|
||||||
|
flags+=" --pod-cidr=10.123.45.0/29"
|
||||||
|
reconcile_cidr="false"
|
||||||
else
|
else
|
||||||
# Standalone mode (not widely used?)
|
|
||||||
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
|
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
|
||||||
fi
|
fi
|
||||||
else # For nodes
|
else # For nodes
|
||||||
|
@ -155,7 +155,7 @@ assemble_kubelet_flags() {
|
|||||||
if [ ! -z "${KUBELET_APISERVER:-}" ] && \
|
if [ ! -z "${KUBELET_APISERVER:-}" ] && \
|
||||||
[ ! -z "${KUBELET_CERT:-}" ] && \
|
[ ! -z "${KUBELET_CERT:-}" ] && \
|
||||||
[ ! -z "${KUBELET_KEY:-}" ]; then
|
[ ! -z "${KUBELET_KEY:-}" ]; then
|
||||||
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --api-servers=https://${KUBELET_APISERVER} --register-schedulable=false"
|
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --api-servers=https://${KUBELET_APISERVER} --register-schedulable=false --reconcile-cidr=false --pod-cidr=10.123.45.0/29"
|
||||||
else
|
else
|
||||||
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --pod-cidr=${MASTER_IP_RANGE}"
|
KUBELET_CMD_FLAGS="${KUBELET_CMD_FLAGS} --pod-cidr=${MASTER_IP_RANGE}"
|
||||||
fi
|
fi
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
{% if grains.kubelet_api_servers is defined -%}
|
{% if grains.kubelet_api_servers is defined -%}
|
||||||
{% set api_servers_with_port = "--api-servers=https://" + grains.kubelet_api_servers -%}
|
{% set api_servers_with_port = "--api-servers=https://" + grains.kubelet_api_servers -%}
|
||||||
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false" -%}
|
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false" -%}
|
||||||
|
{% set reconcile_cidr_args = "--reconcile-cidr=false" -%}
|
||||||
{% else -%}
|
{% else -%}
|
||||||
{% set api_servers_with_port = "" -%}
|
{% set api_servers_with_port = "" -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
@ -1496,12 +1496,6 @@ func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) {
|
|||||||
// can be admitted, a brief single-word reason and a message explaining why
|
// can be admitted, a brief single-word reason and a message explaining why
|
||||||
// the pod cannot be admitted.
|
// the pod cannot be admitted.
|
||||||
func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, string) {
|
func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, string) {
|
||||||
if rs := kl.runtimeState.networkErrors(); len(rs) != 0 {
|
|
||||||
if !podUsesHostNetwork(pod) {
|
|
||||||
return false, "NetworkNotReady", fmt.Sprintf("Network is not ready: %v", rs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// the kubelet will invoke each pod admit handler in sequence
|
// the kubelet will invoke each pod admit handler in sequence
|
||||||
// if any handler rejects, the pod is rejected.
|
// if any handler rejects, the pod is rejected.
|
||||||
// TODO: move out of disk check into a pod admitter
|
// TODO: move out of disk check into a pod admitter
|
||||||
@ -1538,7 +1532,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
|||||||
defer housekeepingTicker.Stop()
|
defer housekeepingTicker.Stop()
|
||||||
plegCh := kl.pleg.Watch()
|
plegCh := kl.pleg.Watch()
|
||||||
for {
|
for {
|
||||||
if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 {
|
if rs := kl.runtimeState.errors(); len(rs) != 0 {
|
||||||
glog.Infof("skipping pod synchronization - %v", rs)
|
glog.Infof("skipping pod synchronization - %v", rs)
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
continue
|
continue
|
||||||
|
@ -568,8 +568,7 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
|
|||||||
// ref: https://github.com/kubernetes/kubernetes/issues/16961
|
// ref: https://github.com/kubernetes/kubernetes/issues/16961
|
||||||
currentTime := unversioned.NewTime(kl.clock.Now())
|
currentTime := unversioned.NewTime(kl.clock.Now())
|
||||||
var newNodeReadyCondition api.NodeCondition
|
var newNodeReadyCondition api.NodeCondition
|
||||||
rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...)
|
if rs := kl.runtimeState.errors(); len(rs) == 0 {
|
||||||
if len(rs) == 0 {
|
|
||||||
newNodeReadyCondition = api.NodeCondition{
|
newNodeReadyCondition = api.NodeCondition{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
|
@ -83,7 +83,6 @@ func TestRunOnce(t *testing.T) {
|
|||||||
kubeClient: &fake.Clientset{},
|
kubeClient: &fake.Clientset{},
|
||||||
hostname: testKubeletHostname,
|
hostname: testKubeletHostname,
|
||||||
nodeName: testKubeletHostname,
|
nodeName: testKubeletHostname,
|
||||||
runtimeState: newRuntimeState(time.Second),
|
|
||||||
}
|
}
|
||||||
kb.containerManager = cm.NewStubContainerManager()
|
kb.containerManager = cm.NewStubContainerManager()
|
||||||
|
|
||||||
|
@ -68,13 +68,16 @@ func (s *runtimeState) setInitError(err error) {
|
|||||||
s.initError = err
|
s.initError = err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *runtimeState) runtimeErrors() []string {
|
func (s *runtimeState) errors() []string {
|
||||||
s.RLock()
|
s.RLock()
|
||||||
defer s.RUnlock()
|
defer s.RUnlock()
|
||||||
var ret []string
|
var ret []string
|
||||||
if s.initError != nil {
|
if s.initError != nil {
|
||||||
ret = append(ret, s.initError.Error())
|
ret = append(ret, s.initError.Error())
|
||||||
}
|
}
|
||||||
|
if s.networkError != nil {
|
||||||
|
ret = append(ret, s.networkError.Error())
|
||||||
|
}
|
||||||
if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
|
if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
|
||||||
ret = append(ret, "container runtime is down")
|
ret = append(ret, "container runtime is down")
|
||||||
}
|
}
|
||||||
@ -84,16 +87,6 @@ func (s *runtimeState) runtimeErrors() []string {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *runtimeState) networkErrors() []string {
|
|
||||||
s.RLock()
|
|
||||||
defer s.RUnlock()
|
|
||||||
var ret []string
|
|
||||||
if s.networkError != nil {
|
|
||||||
ret = append(ret, s.networkError.Error())
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRuntimeState(
|
func newRuntimeState(
|
||||||
runtimeSyncThreshold time.Duration,
|
runtimeSyncThreshold time.Duration,
|
||||||
) *runtimeState {
|
) *runtimeState {
|
||||||
|
Loading…
Reference in New Issue
Block a user