mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Create hostNetwork pods even if network plugin not ready
We do now admit pods (unlike the first attempt), but now we will stop non-hostnetwork pods from starting if the network is not ready. Issue #35409
This commit is contained in:
parent
b4e84d3e3b
commit
f8eb179c2d
@ -1393,6 +1393,11 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
||||
return errOuter
|
||||
}
|
||||
|
||||
// If the network plugin is not ready, only start the pod if it uses the host network
|
||||
if rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !podUsesHostNetwork(pod) {
|
||||
return fmt.Errorf("network is not ready: %v", rs)
|
||||
}
|
||||
|
||||
// Create Cgroups for the pod and apply resource parameters
|
||||
// to them if cgroup-per-qos flag is enabled.
|
||||
pcm := kl.containerManager.NewPodContainerManager()
|
||||
@ -1644,7 +1649,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
||||
defer housekeepingTicker.Stop()
|
||||
plegCh := kl.pleg.Watch()
|
||||
for {
|
||||
if rs := kl.runtimeState.errors(); len(rs) != 0 {
|
||||
if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 {
|
||||
glog.Infof("skipping pod synchronization - %v", rs)
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
|
@ -577,7 +577,8 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
|
||||
// ref: https://github.com/kubernetes/kubernetes/issues/16961
|
||||
currentTime := unversioned.NewTime(kl.clock.Now())
|
||||
var newNodeReadyCondition api.NodeCondition
|
||||
if rs := kl.runtimeState.errors(); len(rs) == 0 {
|
||||
rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...)
|
||||
if len(rs) == 0 {
|
||||
newNodeReadyCondition = api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
|
@ -83,6 +83,7 @@ func TestRunOnce(t *testing.T) {
|
||||
kubeClient: &fake.Clientset{},
|
||||
hostname: testKubeletHostname,
|
||||
nodeName: testKubeletHostname,
|
||||
runtimeState: newRuntimeState(time.Second),
|
||||
}
|
||||
kb.containerManager = cm.NewStubContainerManager()
|
||||
|
||||
|
@ -68,16 +68,13 @@ func (s *runtimeState) setInitError(err error) {
|
||||
s.initError = err
|
||||
}
|
||||
|
||||
func (s *runtimeState) errors() []string {
|
||||
func (s *runtimeState) runtimeErrors() []string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
var ret []string
|
||||
if s.initError != nil {
|
||||
ret = append(ret, s.initError.Error())
|
||||
}
|
||||
if s.networkError != nil {
|
||||
ret = append(ret, s.networkError.Error())
|
||||
}
|
||||
if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
|
||||
ret = append(ret, "container runtime is down")
|
||||
}
|
||||
@ -87,6 +84,16 @@ func (s *runtimeState) errors() []string {
|
||||
return ret
|
||||
}
|
||||
|
||||
func (s *runtimeState) networkErrors() []string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
var ret []string
|
||||
if s.networkError != nil {
|
||||
ret = append(ret, s.networkError.Error())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func newRuntimeState(
|
||||
runtimeSyncThreshold time.Duration,
|
||||
) *runtimeState {
|
||||
|
Loading…
Reference in New Issue
Block a user