Change kubelet update frequency to 2s, and make it a constant.

This commit is contained in:
Deyuan Deng 2015-03-13 20:59:25 -04:00 committed by Deyuan Deng
parent 0d5f8dfde1
commit cf548765c9
3 changed files with 27 additions and 24 deletions

View File

@ -51,7 +51,6 @@ type KubeletServer struct {
SyncFrequency time.Duration
FileCheckFrequency time.Duration
HTTPCheckFrequency time.Duration
StatusUpdateFrequency time.Duration
ManifestURL string
EnableServer bool
Address util.IP
@ -85,13 +84,12 @@ type KubeletServer struct {
// NewKubeletServer will create a new KubeletServer with default values.
func NewKubeletServer() *KubeletServer {
return &KubeletServer{
SyncFrequency: 10 * time.Second,
FileCheckFrequency: 20 * time.Second,
HTTPCheckFrequency: 20 * time.Second,
StatusUpdateFrequency: 20 * time.Second,
EnableServer: true,
Address: util.IP(net.ParseIP("127.0.0.1")),
Port: ports.KubeletPort,
SyncFrequency: 10 * time.Second,
FileCheckFrequency: 20 * time.Second,
HTTPCheckFrequency: 20 * time.Second,
EnableServer: true,
Address: util.IP(net.ParseIP("127.0.0.1")),
Port: ports.KubeletPort,
PodInfraContainerImage: kubelet.PodInfraContainerImage,
RootDirectory: defaultRootDir,
RegistryBurst: 10,
@ -112,7 +110,6 @@ func NewKubeletServer() *KubeletServer {
func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.Config, "config", s.Config, "Path to the config file or directory of files")
fs.DurationVar(&s.SyncFrequency, "sync_frequency", s.SyncFrequency, "Max period between synchronizing running containers and config")
fs.DurationVar(&s.StatusUpdateFrequency, "status_update_frequency", s.StatusUpdateFrequency, "Duration between posting node status to master")
fs.DurationVar(&s.FileCheckFrequency, "file_check_frequency", s.FileCheckFrequency, "Duration between checking config files for new data")
fs.DurationVar(&s.HTTPCheckFrequency, "http_check_frequency", s.HTTPCheckFrequency, "Duration between checking http for new data")
fs.StringVar(&s.ManifestURL, "manifest_url", s.ManifestURL, "URL for accessing the container manifest")
@ -179,7 +176,6 @@ func (s *KubeletServer) Run(_ []string) error {
RootDirectory: s.RootDirectory,
ConfigFile: s.Config,
ManifestURL: s.ManifestURL,
StatusUpdateFrequency: s.StatusUpdateFrequency,
FileCheckFrequency: s.FileCheckFrequency,
HTTPCheckFrequency: s.HTTPCheckFrequency,
PodInfraContainerImage: s.PodInfraContainerImage,
@ -285,7 +281,6 @@ func SimpleKubelet(client *client.Client,
EnableDebuggingHandlers: true,
HTTPCheckFrequency: 1 * time.Second,
FileCheckFrequency: 1 * time.Second,
StatusUpdateFrequency: 3 * time.Second,
SyncFrequency: 3 * time.Second,
MinimumGCAge: 10 * time.Second,
MaxPerPodContainerCount: 5,
@ -380,7 +375,6 @@ type KubeletConfig struct {
RootDirectory string
ConfigFile string
ManifestURL string
StatusUpdateFrequency time.Duration
FileCheckFrequency time.Duration
HTTPCheckFrequency time.Duration
Hostname string
@ -446,7 +440,6 @@ func createAndInitKubelet(kc *KubeletConfig, pc *config.PodConfig) (*kubelet.Kub
kc.StreamingConnectionIdleTimeout,
kc.Recorder,
kc.CadvisorInterface,
kc.StatusUpdateFrequency,
kc.ImageGCPolicy)
if err != nil {

View File

@ -39,8 +39,12 @@ const (
// sync node status in this case, but will monitor node status updated from kubelet. If
// it doesn't receive update for this amount of time, it will start posting node NotReady
// condition. The amount of time when NodeController start evicting pods is controlled
// via flag 'pod_eviction_timeout'.
// via flag 'pod_eviction_timeout'. Note: be cautious when changing nodeMonitorGracePeriod,
// it must work with kubelet.nodeStatusUpdateFrequency.
nodeMonitorGracePeriod = 8 * time.Second
// The constant is used if sync_nodes_status=False, and for node startup. When node
// is just created, e.g. cluster bootstrap or node creation, we give a longer grace period.
nodeStartupGracePeriod = 30 * time.Second
// The constant is used if sync_nodes_status=False. It controls NodeController monitoring
// period, i.e. how often does NodeController check node status posted from kubelet.
// Theoretically, this value should be lower than nodeMonitorGracePeriod.
@ -412,12 +416,18 @@ func (s *NodeController) MonitorNodeStatus() error {
}
for i := range nodes.Items {
node := &nodes.Items[i]
// Precompute condition times to avoid deep copy of node status (We'll modify node for updating,
// and NodeStatus.Conditions is an array, which makes assignment copy not useful).
// Precompute all condition times to avoid deep copy of node status (We'll modify node for
// updating, and NodeStatus.Conditions is an array, which makes assignment copy not useful).
latestConditionTime := s.latestConditionTime(node, api.NodeReady)
var gracePeriod time.Duration
if latestConditionTime == node.CreationTimestamp {
gracePeriod = nodeStartupGracePeriod
} else {
gracePeriod = nodeMonitorGracePeriod
}
latestFullConditionTime := s.latestConditionTimeWithStatus(node, api.NodeReady, api.ConditionFull)
// Grace period has passed, post node NotReady condition to master, without contacting kubelet.
if util.Now().After(latestConditionTime.Add(nodeMonitorGracePeriod)) {
if util.Now().After(latestConditionTime.Add(gracePeriod)) {
readyCondition := s.getCondition(node, api.NodeReady)
if readyCondition == nil {
node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{

View File

@ -75,8 +75,10 @@ const (
initialNodeStatusUpdateFrequency = 100 * time.Millisecond
nodeStatusUpdateFrequencyInc = 500 * time.Millisecond
// The retry count for updating node status at each sync period.
nodeStatusUpdateRetry = 5
// Node status update frequency and retry count. Note: be cautious when changing nodeStatusUpdateFrequency,
// it must work with nodecontroller.nodeMonitorGracePeriod.
nodeStatusUpdateFrequency = 2 * time.Second
nodeStatusUpdateRetry = 5
)
var (
@ -124,7 +126,6 @@ func NewMainKubelet(
streamingConnectionIdleTimeout time.Duration,
recorder record.EventRecorder,
cadvisorInterface cadvisor.Interface,
statusUpdateFrequency time.Duration,
imageGCPolicy ImageGCPolicy) (*Kubelet, error) {
if rootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
@ -202,7 +203,6 @@ func NewMainKubelet(
dockerClient: dockerClient,
kubeClient: kubeClient,
rootDirectory: rootDirectory,
statusUpdateFrequency: statusUpdateFrequency,
resyncInterval: resyncInterval,
podInfraContainerImage: podInfraContainerImage,
containerIDToRef: map[string]*api.ObjectReference{},
@ -275,7 +275,6 @@ type Kubelet struct {
rootDirectory string
podInfraContainerImage string
podWorkers *podWorkers
statusUpdateFrequency time.Duration
resyncInterval time.Duration
sourcesReady SourcesReadyFn
@ -532,7 +531,8 @@ func (kl *Kubelet) syncNodeStatus() {
if kl.kubeClient == nil {
return
}
for feq := initialNodeStatusUpdateFrequency; feq < kl.statusUpdateFrequency; feq += nodeStatusUpdateFrequencyInc {
for feq := initialNodeStatusUpdateFrequency; feq < nodeStatusUpdateFrequency; feq += nodeStatusUpdateFrequencyInc {
select {
case <-time.After(feq):
if err := kl.updateNodeStatus(); err != nil {
@ -542,7 +542,7 @@ func (kl *Kubelet) syncNodeStatus() {
}
for {
select {
case <-time.After(kl.statusUpdateFrequency):
case <-time.After(nodeStatusUpdateFrequency):
if err := kl.updateNodeStatus(); err != nil {
glog.Errorf("Unable to update node status: %v", err)
}