mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-14 13:45:06 +00:00
Fix the container bridge so that it can create cbr0
Fix the kubelet so that it tries to sync status, even if Docker is down
This commit is contained in:
@@ -147,6 +147,7 @@ func NewMainKubelet(
|
||||
dockerDaemonContainer string,
|
||||
systemContainer string,
|
||||
configureCBR0 bool,
|
||||
podCIDR string,
|
||||
pods int,
|
||||
dockerExecHandler dockertools.ExecHandler) (*Kubelet, error) {
|
||||
if rootDirectory == "" {
|
||||
@@ -261,6 +262,7 @@ func NewMainKubelet(
|
||||
cgroupRoot: cgroupRoot,
|
||||
mounter: mounter,
|
||||
configureCBR0: configureCBR0,
|
||||
podCIDR: podCIDR,
|
||||
pods: pods,
|
||||
syncLoopMonitor: util.AtomicValue{},
|
||||
}
|
||||
@@ -318,6 +320,10 @@ func NewMainKubelet(
|
||||
}
|
||||
klet.containerManager = containerManager
|
||||
|
||||
// Start syncing node status immediately, this may set up things the runtime needs to run.
|
||||
go klet.syncNodeStatus()
|
||||
go klet.syncNetworkStatus()
|
||||
|
||||
// Wait for the runtime to be up with a timeout.
|
||||
if err := waitUntilRuntimeIsUp(klet.containerRuntime, maxWaitForContainerRuntime); err != nil {
|
||||
return nil, fmt.Errorf("timed out waiting for %q to come up: %v", containerRuntime, err)
|
||||
@@ -412,6 +418,9 @@ type Kubelet struct {
|
||||
runtimeUpThreshold time.Duration
|
||||
lastTimestampRuntimeUp time.Time
|
||||
|
||||
// Network Status information
|
||||
networkConfigured bool
|
||||
|
||||
// Volume plugins.
|
||||
volumePluginMgr volume.VolumePluginMgr
|
||||
|
||||
@@ -489,6 +498,7 @@ type Kubelet struct {
|
||||
// Whether or not kubelet should take responsibility for keeping cbr0 in
|
||||
// the correct state.
|
||||
configureCBR0 bool
|
||||
podCIDR string
|
||||
|
||||
// Number of Pods which can be run by this Kubelet
|
||||
pods int
|
||||
@@ -707,7 +717,6 @@ func (kl *Kubelet) Run(updates <-chan PodUpdate) {
|
||||
}
|
||||
|
||||
go util.Until(kl.updateRuntimeUp, 5*time.Second, util.NeverStop)
|
||||
go kl.syncNodeStatus()
|
||||
// Run the system oom watcher forever.
|
||||
kl.statusManager.Start()
|
||||
kl.syncLoop(updates, kl)
|
||||
@@ -1705,6 +1714,10 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan PodUpdate, handler SyncHandl
|
||||
glog.Infof("Skipping pod synchronization, container runtime is not up.")
|
||||
return
|
||||
}
|
||||
if !kl.networkConfigured {
|
||||
time.Sleep(5 * time.Second)
|
||||
glog.Infof("Skipping pod synchronization, network is not configured")
|
||||
}
|
||||
unsyncedPod := false
|
||||
podSyncTypes := make(map[types.UID]SyncPodType)
|
||||
select {
|
||||
@@ -1892,6 +1905,22 @@ func (kl *Kubelet) recordNodeStatusEvent(event string) {
|
||||
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||
var oldNodeUnschedulable bool
|
||||
|
||||
func (kl *Kubelet) syncNetworkStatus() {
|
||||
for {
|
||||
networkConfigured := true
|
||||
if kl.configureCBR0 {
|
||||
if len(kl.podCIDR) == 0 {
|
||||
networkConfigured = false
|
||||
} else if err := kl.reconcileCBR0(kl.podCIDR); err != nil {
|
||||
networkConfigured = false
|
||||
glog.Errorf("Error configuring cbr0: %v", err)
|
||||
}
|
||||
}
|
||||
kl.networkConfigured = networkConfigured
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
||||
// any fields that are currently set.
|
||||
func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
||||
@@ -1925,16 +1954,6 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
||||
}
|
||||
}
|
||||
|
||||
networkConfigured := true
|
||||
if kl.configureCBR0 {
|
||||
if len(node.Spec.PodCIDR) == 0 {
|
||||
networkConfigured = false
|
||||
} else if err := kl.reconcileCBR0(node.Spec.PodCIDR); err != nil {
|
||||
networkConfigured = false
|
||||
glog.Errorf("Error configuring cbr0: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
|
||||
// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
|
||||
info, err := kl.GetCachedMachineInfo()
|
||||
@@ -1982,7 +2001,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
||||
currentTime := util.Now()
|
||||
var newNodeReadyCondition api.NodeCondition
|
||||
var oldNodeReadyConditionStatus api.ConditionStatus
|
||||
if containerRuntimeUp && networkConfigured {
|
||||
if containerRuntimeUp && kl.networkConfigured {
|
||||
newNodeReadyCondition = api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
@@ -1994,7 +2013,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
||||
if !containerRuntimeUp {
|
||||
reasons = append(reasons, "container runtime is down")
|
||||
}
|
||||
if !networkConfigured {
|
||||
if !kl.networkConfigured {
|
||||
reasons = append(reasons, "network not configured correctly")
|
||||
}
|
||||
newNodeReadyCondition = api.NodeCondition{
|
||||
@@ -2056,6 +2075,8 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
if node == nil {
|
||||
return fmt.Errorf("no node instance returned for %q", kl.nodeName)
|
||||
}
|
||||
kl.podCIDR = node.Spec.PodCIDR
|
||||
|
||||
if err := kl.setNodeStatus(node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user