mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Revert "Fix the race between configuring cbr0 and restarting static pods"
This commit is contained in:
parent
5b4dc4edaa
commit
fd0a95dd12
@ -2,5 +2,5 @@ DOCKER_OPTS=""
|
|||||||
{% if grains.docker_opts is defined and grains.docker_opts %}
|
{% if grains.docker_opts is defined and grains.docker_opts %}
|
||||||
DOCKER_OPTS="${DOCKER_OPTS} {{grains.docker_opts}}"
|
DOCKER_OPTS="${DOCKER_OPTS} {{grains.docker_opts}}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
DOCKER_OPTS="${DOCKER_OPTS} --bridge=cbr0 --iptables=false --ip-masq=false"
|
DOCKER_OPTS="${DOCKER_OPTS} --bridge cbr0 --iptables=false --ip-masq=false"
|
||||||
DOCKER_NOFILE=1000000
|
DOCKER_NOFILE=1000000
|
||||||
|
@ -48,6 +48,11 @@ net.ipv4.ip_forward:
|
|||||||
sysctl.present:
|
sysctl.present:
|
||||||
- value: 1
|
- value: 1
|
||||||
|
|
||||||
|
cbr0:
|
||||||
|
container_bridge.ensure:
|
||||||
|
- cidr: {{ grains['cbr-cidr'] }}
|
||||||
|
- mtu: 1460
|
||||||
|
|
||||||
{{ environment_file }}:
|
{{ environment_file }}:
|
||||||
file.managed:
|
file.managed:
|
||||||
- source: salt://docker/docker-defaults
|
- source: salt://docker/docker-defaults
|
||||||
@ -119,6 +124,7 @@ docker:
|
|||||||
- enable: True
|
- enable: True
|
||||||
- watch:
|
- watch:
|
||||||
- file: {{ environment_file }}
|
- file: {{ environment_file }}
|
||||||
|
- container_bridge: cbr0
|
||||||
{% if override_docker_ver != '' %}
|
{% if override_docker_ver != '' %}
|
||||||
- require:
|
- require:
|
||||||
- pkg: lxc-docker-{{ override_docker_ver }}
|
- pkg: lxc-docker-{{ override_docker_ver }}
|
||||||
|
@ -37,26 +37,10 @@ master-docker-image-tags:
|
|||||||
file.touch:
|
file.touch:
|
||||||
- name: /srv/pillar/docker-images.sls
|
- name: /srv/pillar/docker-images.sls
|
||||||
|
|
||||||
# Current containervm image by default has both docker and kubelet
|
|
||||||
# running. But during cluster creation stage, docker and kubelet
|
|
||||||
# could be overwritten completely, or restarted due to flag changes.
|
|
||||||
# The ordering of salt states for service docker, kubelet and
|
|
||||||
# master-addon below is very important to avoid the race between
|
|
||||||
# salt restart docker or kubelet and kubelet start master components.
|
|
||||||
# Without the ordering of salt states, when gce instance boot up,
|
|
||||||
# configure-vm.sh will run and download the release. At the end of
|
|
||||||
# boot, run-salt will run kube-master-addons service which installs
|
|
||||||
# master component manifest files to kubelet config directory before
|
|
||||||
# the installation of proper version kubelet. Please see
|
|
||||||
# https://github.com/GoogleCloudPlatform/kubernetes/issues/10122#issuecomment-114566063
|
|
||||||
# for detail explanation on this very issue.
|
|
||||||
kube-master-addons:
|
kube-master-addons:
|
||||||
service.running:
|
service.running:
|
||||||
- enable: True
|
- enable: True
|
||||||
- restart: True
|
- restart: True
|
||||||
- require:
|
|
||||||
- service: docker
|
|
||||||
- service: kubelet
|
|
||||||
- watch:
|
- watch:
|
||||||
- file: master-docker-image-tags
|
- file: master-docker-image-tags
|
||||||
- file: /etc/kubernetes/kube-master-addons.sh
|
- file: /etc/kubernetes/kube-master-addons.sh
|
||||||
|
@ -76,9 +76,4 @@
|
|||||||
{% set cgroup_root = "--cgroup_root=/" -%}
|
{% set cgroup_root = "--cgroup_root=/" -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
{% set pod_cidr = "" %}
|
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}}"
|
||||||
{% if grains['roles'][0] == 'kubernetes-master' %}
|
|
||||||
{% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}}"
|
|
||||||
|
@ -115,7 +115,6 @@ type KubeletServer struct {
|
|||||||
DockerDaemonContainer string
|
DockerDaemonContainer string
|
||||||
SystemContainer string
|
SystemContainer string
|
||||||
ConfigureCBR0 bool
|
ConfigureCBR0 bool
|
||||||
PodCIDR string
|
|
||||||
MaxPods int
|
MaxPods int
|
||||||
DockerExecHandlerName string
|
DockerExecHandlerName string
|
||||||
|
|
||||||
@ -242,7 +241,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
|
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
|
||||||
fs.IntVar(&s.MaxPods, "max-pods", 100, "Number of Pods that can run on this Kubelet.")
|
fs.IntVar(&s.MaxPods, "max-pods", 100, "Number of Pods that can run on this Kubelet.")
|
||||||
fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.")
|
fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.")
|
||||||
fs.StringVar(&s.PodCIDR, "pod-cidr", "", "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.")
|
|
||||||
// Flags intended for testing, not recommended used in production environments.
|
// Flags intended for testing, not recommended used in production environments.
|
||||||
fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.")
|
fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.")
|
||||||
fs.Float64Var(&s.ChaosChance, "chaos-chance", s.ChaosChance, "If > 0.0, introduce random client errors and latency. Intended for testing. [default=0.0]")
|
fs.Float64Var(&s.ChaosChance, "chaos-chance", s.ChaosChance, "If > 0.0, introduce random client errors and latency. Intended for testing. [default=0.0]")
|
||||||
@ -362,7 +361,6 @@ func (s *KubeletServer) Run(_ []string) error {
|
|||||||
DockerDaemonContainer: s.DockerDaemonContainer,
|
DockerDaemonContainer: s.DockerDaemonContainer,
|
||||||
SystemContainer: s.SystemContainer,
|
SystemContainer: s.SystemContainer,
|
||||||
ConfigureCBR0: s.ConfigureCBR0,
|
ConfigureCBR0: s.ConfigureCBR0,
|
||||||
PodCIDR: s.PodCIDR,
|
|
||||||
MaxPods: s.MaxPods,
|
MaxPods: s.MaxPods,
|
||||||
DockerExecHandler: dockerExecHandler,
|
DockerExecHandler: dockerExecHandler,
|
||||||
}
|
}
|
||||||
@ -716,7 +714,6 @@ type KubeletConfig struct {
|
|||||||
DockerDaemonContainer string
|
DockerDaemonContainer string
|
||||||
SystemContainer string
|
SystemContainer string
|
||||||
ConfigureCBR0 bool
|
ConfigureCBR0 bool
|
||||||
PodCIDR string
|
|
||||||
MaxPods int
|
MaxPods int
|
||||||
DockerExecHandler dockertools.ExecHandler
|
DockerExecHandler dockertools.ExecHandler
|
||||||
}
|
}
|
||||||
@ -774,7 +771,6 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
|
|||||||
kc.DockerDaemonContainer,
|
kc.DockerDaemonContainer,
|
||||||
kc.SystemContainer,
|
kc.SystemContainer,
|
||||||
kc.ConfigureCBR0,
|
kc.ConfigureCBR0,
|
||||||
kc.PodCIDR,
|
|
||||||
kc.MaxPods,
|
kc.MaxPods,
|
||||||
kc.DockerExecHandler)
|
kc.DockerExecHandler)
|
||||||
|
|
||||||
|
@ -354,7 +354,6 @@ func (ks *KubeletExecutorServer) createAndInitKubelet(
|
|||||||
kc.DockerDaemonContainer,
|
kc.DockerDaemonContainer,
|
||||||
kc.SystemContainer,
|
kc.SystemContainer,
|
||||||
kc.ConfigureCBR0,
|
kc.ConfigureCBR0,
|
||||||
kc.PodCIDR,
|
|
||||||
kc.MaxPods,
|
kc.MaxPods,
|
||||||
kc.DockerExecHandler,
|
kc.DockerExecHandler,
|
||||||
)
|
)
|
||||||
|
@ -19,17 +19,27 @@ package kubelet
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var cidrRegexp = regexp.MustCompile(`inet ([0-9a-fA-F.:]*/[0-9]*)`)
|
var cidrRegexp = regexp.MustCompile(`inet ([0-9a-fA-F.:]*/[0-9]*)`)
|
||||||
|
|
||||||
func createCBR0(wantCIDR *net.IPNet) error {
|
func ensureCbr0(wantCIDR *net.IPNet) error {
|
||||||
|
if !cbr0CidrCorrect(wantCIDR) {
|
||||||
|
glog.V(2).Infof("Attempting to recreate cbr0 with address range: %s", wantCIDR)
|
||||||
|
|
||||||
|
// delete cbr0
|
||||||
|
if err := exec.Command("ip", "link", "set", "dev", "cbr0", "down").Run(); err != nil {
|
||||||
|
glog.Error(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := exec.Command("brctl", "delbr", "cbr0").Run(); err != nil {
|
||||||
|
glog.Error(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
// recreate cbr0 with wantCIDR
|
// recreate cbr0 with wantCIDR
|
||||||
if err := exec.Command("brctl", "addbr", "cbr0").Run(); err != nil {
|
if err := exec.Command("brctl", "addbr", "cbr0").Run(); err != nil {
|
||||||
glog.Error(err)
|
glog.Error(err)
|
||||||
@ -44,60 +54,16 @@ func createCBR0(wantCIDR *net.IPNet) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// restart docker
|
// restart docker
|
||||||
// For now just log the error. The containerRuntime check will catch docker failures.
|
|
||||||
// TODO (dawnchen) figure out what we should do for rkt here.
|
|
||||||
if util.UsingSystemdInitSystem() {
|
|
||||||
if err := exec.Command("systemctl", "restart", "docker").Run(); err != nil {
|
|
||||||
glog.Error(err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err := exec.Command("service", "docker", "restart").Run(); err != nil {
|
if err := exec.Command("service", "docker", "restart").Run(); err != nil {
|
||||||
glog.Error(err)
|
glog.Error(err)
|
||||||
}
|
// For now just log the error. The containerRuntime check will catch docker failures.
|
||||||
|
// TODO (dawnchen) figure out what we should do for rkt here.
|
||||||
}
|
}
|
||||||
glog.V(2).Info("Recreated cbr0 and restarted docker")
|
glog.V(2).Info("Recreated cbr0 and restarted docker")
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureCbr0(wantCIDR *net.IPNet) error {
|
|
||||||
exists, err := cbr0Exists()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
glog.V(2).Infof("CBR0 doesn't exist, attempting to create it with range: %s", wantCIDR)
|
|
||||||
return createCBR0(wantCIDR)
|
|
||||||
}
|
|
||||||
if !cbr0CidrCorrect(wantCIDR) {
|
|
||||||
glog.V(2).Infof("Attempting to recreate cbr0 with address range: %s", wantCIDR)
|
|
||||||
|
|
||||||
// delete cbr0
|
|
||||||
if err := exec.Command("ip", "link", "set", "dev", "cbr0", "down").Run(); err != nil {
|
|
||||||
glog.Error(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := exec.Command("brctl", "delbr", "cbr0").Run(); err != nil {
|
|
||||||
glog.Error(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return createCBR0(wantCIDR)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if cbr0 network interface is configured or not, and take action
|
|
||||||
// when the configuration is missing on the node, and propagate the rest
|
|
||||||
// error to kubelet to handle.
|
|
||||||
func cbr0Exists() (bool, error) {
|
|
||||||
if _, err := os.Stat("/sys/class/net/cbr0"); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cbr0CidrCorrect(wantCIDR *net.IPNet) bool {
|
func cbr0CidrCorrect(wantCIDR *net.IPNet) bool {
|
||||||
output, err := exec.Command("ip", "addr", "show", "cbr0").Output()
|
output, err := exec.Command("ip", "addr", "show", "cbr0").Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -113,7 +79,6 @@ func cbr0CidrCorrect(wantCIDR *net.IPNet) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
cbr0CIDR.IP = cbr0IP
|
cbr0CIDR.IP = cbr0IP
|
||||||
|
|
||||||
glog.V(5).Infof("Want cbr0 CIDR: %s, have cbr0 CIDR: %s", wantCIDR, cbr0CIDR)
|
glog.V(5).Infof("Want cbr0 CIDR: %s, have cbr0 CIDR: %s", wantCIDR, cbr0CIDR)
|
||||||
return wantCIDR.IP.Equal(cbr0IP) && bytes.Equal(wantCIDR.Mask, cbr0CIDR.Mask)
|
return wantCIDR.IP.Equal(cbr0IP) && bytes.Equal(wantCIDR.Mask, cbr0CIDR.Mask)
|
||||||
}
|
}
|
||||||
|
@ -147,7 +147,6 @@ func NewMainKubelet(
|
|||||||
dockerDaemonContainer string,
|
dockerDaemonContainer string,
|
||||||
systemContainer string,
|
systemContainer string,
|
||||||
configureCBR0 bool,
|
configureCBR0 bool,
|
||||||
podCIDR string,
|
|
||||||
pods int,
|
pods int,
|
||||||
dockerExecHandler dockertools.ExecHandler) (*Kubelet, error) {
|
dockerExecHandler dockertools.ExecHandler) (*Kubelet, error) {
|
||||||
if rootDirectory == "" {
|
if rootDirectory == "" {
|
||||||
@ -262,7 +261,6 @@ func NewMainKubelet(
|
|||||||
cgroupRoot: cgroupRoot,
|
cgroupRoot: cgroupRoot,
|
||||||
mounter: mounter,
|
mounter: mounter,
|
||||||
configureCBR0: configureCBR0,
|
configureCBR0: configureCBR0,
|
||||||
podCIDR: podCIDR,
|
|
||||||
pods: pods,
|
pods: pods,
|
||||||
syncLoopMonitor: util.AtomicValue{},
|
syncLoopMonitor: util.AtomicValue{},
|
||||||
}
|
}
|
||||||
@ -320,10 +318,6 @@ func NewMainKubelet(
|
|||||||
}
|
}
|
||||||
klet.containerManager = containerManager
|
klet.containerManager = containerManager
|
||||||
|
|
||||||
// Start syncing node status immediately, this may set up things the runtime needs to run.
|
|
||||||
go util.Until(klet.syncNetworkStatus, 30*time.Second, util.NeverStop)
|
|
||||||
go klet.syncNodeStatus()
|
|
||||||
|
|
||||||
// Wait for the runtime to be up with a timeout.
|
// Wait for the runtime to be up with a timeout.
|
||||||
if err := waitUntilRuntimeIsUp(klet.containerRuntime, maxWaitForContainerRuntime); err != nil {
|
if err := waitUntilRuntimeIsUp(klet.containerRuntime, maxWaitForContainerRuntime); err != nil {
|
||||||
return nil, fmt.Errorf("timed out waiting for %q to come up: %v", containerRuntime, err)
|
return nil, fmt.Errorf("timed out waiting for %q to come up: %v", containerRuntime, err)
|
||||||
@ -418,10 +412,6 @@ type Kubelet struct {
|
|||||||
runtimeUpThreshold time.Duration
|
runtimeUpThreshold time.Duration
|
||||||
lastTimestampRuntimeUp time.Time
|
lastTimestampRuntimeUp time.Time
|
||||||
|
|
||||||
// Network Status information
|
|
||||||
networkConfigMutex sync.Mutex
|
|
||||||
networkConfigured bool
|
|
||||||
|
|
||||||
// Volume plugins.
|
// Volume plugins.
|
||||||
volumePluginMgr volume.VolumePluginMgr
|
volumePluginMgr volume.VolumePluginMgr
|
||||||
|
|
||||||
@ -499,7 +489,6 @@ type Kubelet struct {
|
|||||||
// Whether or not kubelet should take responsibility for keeping cbr0 in
|
// Whether or not kubelet should take responsibility for keeping cbr0 in
|
||||||
// the correct state.
|
// the correct state.
|
||||||
configureCBR0 bool
|
configureCBR0 bool
|
||||||
podCIDR string
|
|
||||||
|
|
||||||
// Number of Pods which can be run by this Kubelet
|
// Number of Pods which can be run by this Kubelet
|
||||||
pods int
|
pods int
|
||||||
@ -718,7 +707,7 @@ func (kl *Kubelet) Run(updates <-chan PodUpdate) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
go util.Until(kl.updateRuntimeUp, 5*time.Second, util.NeverStop)
|
go util.Until(kl.updateRuntimeUp, 5*time.Second, util.NeverStop)
|
||||||
|
go kl.syncNodeStatus()
|
||||||
// Run the system oom watcher forever.
|
// Run the system oom watcher forever.
|
||||||
kl.statusManager.Start()
|
kl.statusManager.Start()
|
||||||
kl.syncLoop(updates, kl)
|
kl.syncLoop(updates, kl)
|
||||||
@ -1716,11 +1705,6 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan PodUpdate, handler SyncHandl
|
|||||||
glog.Infof("Skipping pod synchronization, container runtime is not up.")
|
glog.Infof("Skipping pod synchronization, container runtime is not up.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !kl.doneNetworkConfigure() {
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
glog.Infof("Skipping pod synchronization, network is not configured")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
unsyncedPod := false
|
unsyncedPod := false
|
||||||
podSyncTypes := make(map[types.UID]SyncPodType)
|
podSyncTypes := make(map[types.UID]SyncPodType)
|
||||||
select {
|
select {
|
||||||
@ -1877,7 +1861,6 @@ func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
|
|||||||
glog.V(5).Info("PodCIDR not set. Will not configure cbr0.")
|
glog.V(5).Info("PodCIDR not set. Will not configure cbr0.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
glog.V(5).Infof("PodCIDR is set to %q", podCIDR)
|
|
||||||
_, cidr, err := net.ParseCIDR(podCIDR)
|
_, cidr, err := net.ParseCIDR(podCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1912,22 +1895,6 @@ func (kl *Kubelet) recordNodeStatusEvent(event string) {
|
|||||||
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||||
var oldNodeUnschedulable bool
|
var oldNodeUnschedulable bool
|
||||||
|
|
||||||
func (kl *Kubelet) syncNetworkStatus() {
|
|
||||||
kl.networkConfigMutex.Lock()
|
|
||||||
defer kl.networkConfigMutex.Unlock()
|
|
||||||
|
|
||||||
networkConfigured := true
|
|
||||||
if kl.configureCBR0 {
|
|
||||||
if len(kl.podCIDR) == 0 {
|
|
||||||
networkConfigured = false
|
|
||||||
} else if err := kl.reconcileCBR0(kl.podCIDR); err != nil {
|
|
||||||
networkConfigured = false
|
|
||||||
glog.Errorf("Error configuring cbr0: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
kl.networkConfigured = networkConfigured
|
|
||||||
}
|
|
||||||
|
|
||||||
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
||||||
// any fields that are currently set.
|
// any fields that are currently set.
|
||||||
func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
||||||
@ -1961,6 +1928,16 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
networkConfigured := true
|
||||||
|
if kl.configureCBR0 {
|
||||||
|
if len(node.Spec.PodCIDR) == 0 {
|
||||||
|
networkConfigured = false
|
||||||
|
} else if err := kl.reconcileCBR0(node.Spec.PodCIDR); err != nil {
|
||||||
|
networkConfigured = false
|
||||||
|
glog.Errorf("Error configuring cbr0: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
|
// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
|
||||||
// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
|
// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
|
||||||
info, err := kl.GetCachedMachineInfo()
|
info, err := kl.GetCachedMachineInfo()
|
||||||
@ -2004,8 +1981,6 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|||||||
|
|
||||||
// Check whether container runtime can be reported as up.
|
// Check whether container runtime can be reported as up.
|
||||||
containerRuntimeUp := kl.containerRuntimeUp()
|
containerRuntimeUp := kl.containerRuntimeUp()
|
||||||
// Check whether network is configured properly
|
|
||||||
networkConfigured := kl.doneNetworkConfigure()
|
|
||||||
|
|
||||||
currentTime := util.Now()
|
currentTime := util.Now()
|
||||||
var newNodeReadyCondition api.NodeCondition
|
var newNodeReadyCondition api.NodeCondition
|
||||||
@ -2074,12 +2049,6 @@ func (kl *Kubelet) containerRuntimeUp() bool {
|
|||||||
return kl.lastTimestampRuntimeUp.Add(kl.runtimeUpThreshold).After(time.Now())
|
return kl.lastTimestampRuntimeUp.Add(kl.runtimeUpThreshold).After(time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) doneNetworkConfigure() bool {
|
|
||||||
kl.networkConfigMutex.Lock()
|
|
||||||
defer kl.networkConfigMutex.Unlock()
|
|
||||||
return kl.networkConfigured
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
|
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
|
||||||
// is set, this function will also confirm that cbr0 is configured correctly.
|
// is set, this function will also confirm that cbr0 is configured correctly.
|
||||||
func (kl *Kubelet) tryUpdateNodeStatus() error {
|
func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||||
@ -2090,8 +2059,6 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||||||
if node == nil {
|
if node == nil {
|
||||||
return fmt.Errorf("no node instance returned for %q", kl.nodeName)
|
return fmt.Errorf("no node instance returned for %q", kl.nodeName)
|
||||||
}
|
}
|
||||||
kl.podCIDR = node.Spec.PodCIDR
|
|
||||||
|
|
||||||
if err := kl.setNodeStatus(node); err != nil {
|
if err := kl.setNodeStatus(node); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,6 @@ func newTestKubelet(t *testing.T) *TestKubelet {
|
|||||||
}
|
}
|
||||||
kubelet.volumeManager = newVolumeManager()
|
kubelet.volumeManager = newVolumeManager()
|
||||||
kubelet.containerManager, _ = newContainerManager(mockCadvisor, "", "", "")
|
kubelet.containerManager, _ = newContainerManager(mockCadvisor, "", "", "")
|
||||||
kubelet.networkConfigured = true
|
|
||||||
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient}
|
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package kubelet
|
package kubelet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
@ -143,9 +142,6 @@ func (s *statusManager) RemoveOrphanedStatuses(podFullNames map[string]bool) {
|
|||||||
|
|
||||||
// syncBatch syncs pods statuses with the apiserver.
|
// syncBatch syncs pods statuses with the apiserver.
|
||||||
func (s *statusManager) syncBatch() error {
|
func (s *statusManager) syncBatch() error {
|
||||||
if s.kubeClient == nil {
|
|
||||||
return errors.New("Kubernetes client is nil, skipping pod status updates")
|
|
||||||
}
|
|
||||||
syncRequest := <-s.podStatusChannel
|
syncRequest := <-s.podStatusChannel
|
||||||
pod := syncRequest.pod
|
pod := syncRequest.pod
|
||||||
podFullName := kubecontainer.GetPodFullName(pod)
|
podFullName := kubecontainer.GetPodFullName(pod)
|
||||||
|
@ -198,20 +198,6 @@ func CompileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
|
|||||||
return regexps, nil
|
return regexps, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detects if using systemd as the init system
|
|
||||||
// Please note that simply reading /proc/1/cmdline can be misleading because
|
|
||||||
// some installation of various init programs can automatically make /sbin/init
|
|
||||||
// a symlink or even a renamed version of their main program.
|
|
||||||
// TODO(dchen1107): realiably detects the init system using on the system:
|
|
||||||
// systemd, upstart, initd, etc.
|
|
||||||
func UsingSystemdInitSystem() bool {
|
|
||||||
if _, err := os.Stat("/run/systemd/system"); err != nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
|
// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
|
||||||
func ApplyOomScoreAdj(pid int, value int) error {
|
func ApplyOomScoreAdj(pid int, value int) error {
|
||||||
if value < -1000 || value > 1000 {
|
if value < -1000 || value > 1000 {
|
||||||
|
Loading…
Reference in New Issue
Block a user