Added pods-per-core to kubelet. #25762

This commit is contained in:
Robert Rati 2016-05-18 11:18:10 -04:00
parent 35922bdcbd
commit 2d487f7c06
11 changed files with 599 additions and 531 deletions

View File

@ -240,6 +240,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
10*time.Second, /* OutOfDiskTransitionFrequency */
10*time.Second, /* EvictionPressureTransitionPeriod */
40, /* MaxPods */
0, /* PodsPerCore*/
cm, net.ParseIP("127.0.0.1"))
kubeletapp.RunKubelet(kcfg)
@ -273,6 +274,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
10*time.Second, /* OutOfDiskTransitionFrequency */
10*time.Second, /* EvictionPressureTransitionPeriod */
40, /* MaxPods */
0, /* PodsPerCore*/
cm,
net.ParseIP("127.0.0.1"))

View File

@ -146,6 +146,7 @@ func NewKubeletServer() *KubeletServer {
HairpinMode: componentconfig.PromiscuousBridge,
BabysitDaemons: false,
EvictionPressureTransitionPeriod: unversioned.Duration{Duration: 5 * time.Minute},
PodsPerCore: 0,
},
}
}
@ -264,4 +265,5 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.EvictionSoftGracePeriod, "eviction-soft-grace-period", s.EvictionSoftGracePeriod, "A set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a pod eviction.")
fs.DurationVar(&s.EvictionPressureTransitionPeriod.Duration, "eviction-pressure-transition-period", s.EvictionPressureTransitionPeriod.Duration, "Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.")
fs.Int32Var(&s.EvictionMaxPodGracePeriod, "eviction-max-pod-grace-period", s.EvictionMaxPodGracePeriod, "Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. If negative, defer to pod specified value.")
fs.Int32Var(&s.PodsPerCore, "pods-per-core", s.PodsPerCore, "Number of Pods per core that can run on this Kubelet. The total number of Pods on this Kubelet cannot exceed max-pods, so max-pods will be used if this caulcation results in a larger number of Pods allowed on the Kubelet. A value of 0 disables this limit.")
}

View File

@ -277,6 +277,7 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
ExperimentalFlannelOverlay: s.ExperimentalFlannelOverlay,
NodeIP: net.ParseIP(s.NodeIP),
EvictionConfig: evictionConfig,
PodsPerCore: int(s.PodsPerCore),
}, nil
}
@ -533,7 +534,7 @@ func SimpleKubelet(client *clientset.Clientset,
cloud cloudprovider.Interface,
osInterface kubecontainer.OSInterface,
fileCheckFrequency, httpCheckFrequency, minimumGCAge, nodeStatusUpdateFrequency, syncFrequency, outOfDiskTransitionFrequency, evictionPressureTransitionPeriod time.Duration,
maxPods int,
maxPods int, podsPerCore int,
containerManager cm.ContainerManager, clusterDNS net.IP) *KubeletConfig {
imageGCPolicy := kubelet.ImageGCPolicy{
HighThresholdPercent: 90,
@ -604,6 +605,7 @@ func SimpleKubelet(client *clientset.Clientset,
Writer: &io.StdWriter{},
OutOfDiskTransitionFrequency: outOfDiskTransitionFrequency,
EvictionConfig: evictionConfig,
PodsPerCore: podsPerCore,
}
return &kcfg
}
@ -814,6 +816,7 @@ type KubeletConfig struct {
OOMAdjuster *oom.OOMAdjuster
OSInterface kubecontainer.OSInterface
PodCIDR string
PodsPerCore int
ReconcileCIDR bool
PodConfig *config.PodConfig
PodInfraContainerImage string
@ -923,6 +926,7 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.PodCIDR,
kc.ReconcileCIDR,
kc.MaxPods,
kc.PodsPerCore,
kc.NvidiaGPUs,
kc.DockerExecHandler,
kc.ResolverConfig,

View File

@ -49,7 +49,8 @@ type HollowNodeConfig struct {
}
const (
maxPods = 110
maxPods = 110
podsPerCore = 0
)
var knownMorphs = sets.NewString("kubelet", "proxy")
@ -115,6 +116,7 @@ func main() {
config.KubeletReadOnlyPort,
containerManager,
maxPods,
podsPerCore,
)
hollowKubelet.Run()
}

View File

@ -135,6 +135,7 @@ kubelet
--outofdisk-transition-frequency=5m0s: Duration for which the kubelet has to wait before transitioning out of out-of-disk node condition status. Default: 5m0s
--pod-cidr="": The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.
--pod-infra-container-image="gcr.io/google_containers/pause-amd64:3.0": The image whose network/ipc namespaces containers in each pod will use.
--pods-per-core=0: Number of Pods per core that can run on this Kubelet. The total number of Pods on this Kubelet cannot exceed max-pods, so max-pods will be used if this caulcation results in a larger number of Pods allowed on the Kubelet. A value of 0 disables this limit.
--port=10250: The port for the Kubelet to serve on.
--read-only-port=10255: The read-only port for the Kubelet to serve on with no authentication/authorization (set to 0 to disable)
--really-crash-for-testing[=false]: If true, when panics occur crash. Intended for testing.

View File

@ -329,6 +329,7 @@ pod-cidr
pod-eviction-timeout
pod-infra-container-image
pod-running
pods-per-core
policy-config-file
poll-interval
portal-net

View File

@ -316,6 +316,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in KubeletConfiguration, out
return err
}
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.PodsPerCore = in.PodsPerCore
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -360,6 +360,8 @@ type KubeletConfiguration struct {
EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"`
// Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"`
// Maximum number of pods per core. Cannot exceed MaxPods
PodsPerCore int32 `json:"podsPerCore"`
}
type KubeSchedulerConfiguration struct {

View File

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"io/ioutil"
"math"
"net"
"net/http"
"os"
@ -216,6 +217,7 @@ func NewMainKubelet(
podCIDR string,
reconcileCIDR bool,
maxPods int,
podsPerCore int,
nvidiaGPUs int,
dockerExecHandler dockertools.ExecHandler,
resolverConfig string,
@ -343,6 +345,7 @@ func NewMainKubelet(
nonMasqueradeCIDR: nonMasqueradeCIDR,
reconcileCIDR: reconcileCIDR,
maxPods: maxPods,
podsPerCore: podsPerCore,
nvidiaGPUs: nvidiaGPUs,
syncLoopMonitor: atomic.Value{},
resolverConfig: resolverConfig,
@ -817,6 +820,9 @@ type Kubelet struct {
// the list of handlers to call during pod sync.
lifecycle.PodSyncHandlers
// the number of allowed pods per core
podsPerCore int
}
// Validate given node IP belongs to the current host
@ -3048,8 +3054,13 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) {
node.Status.NodeInfo.MachineID = info.MachineID
node.Status.NodeInfo.SystemUUID = info.SystemUUID
node.Status.Capacity = cadvisor.CapacityFromMachineInfo(info)
node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
int64(kl.maxPods), resource.DecimalSI)
if kl.podsPerCore > 0 {
node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
int64(math.Min(float64(info.NumCores*kl.podsPerCore), float64(kl.maxPods))), resource.DecimalSI)
} else {
node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
int64(kl.maxPods), resource.DecimalSI)
}
node.Status.Capacity[api.ResourceNvidiaGPU] = *resource.NewQuantity(
int64(kl.nvidiaGPUs), resource.DecimalSI)
if node.Status.NodeInfo.BootID != "" &&

View File

@ -43,7 +43,7 @@ func NewHollowKubelet(
dockerClient dockertools.DockerInterface,
kubeletPort, kubeletReadOnlyPort int,
containerManager cm.ContainerManager,
maxPods int,
maxPods int, podsPerCore int,
) *HollowKubelet {
testRootDir := integration.MakeTempDirOrDie("hollow-kubelet.", "")
manifestFilePath := integration.MakeTempDirOrDie("manifest", testRootDir)
@ -74,6 +74,7 @@ func NewHollowKubelet(
5*time.Minute, /* OutOfDiskTransitionFrequency */
5*time.Minute, /* EvictionPressureTransitionPeriod */
maxPods,
podsPerCore,
containerManager,
nil,
),