mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
fix: rename
This commit is contained in:
parent
5494b30ce5
commit
12d49b6bfb
@ -307,8 +307,8 @@ func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCpuWeight converts from the range [2, 262144] to [1, 10000]
|
||||
func getCpuWeight(cpuShares *uint64) uint64 {
|
||||
// getCPUWeight converts from the range [2, 262144] to [1, 10000]
|
||||
func getCPUWeight(cpuShares *uint64) uint64 {
|
||||
if cpuShares == nil {
|
||||
return 0
|
||||
}
|
||||
@ -360,18 +360,18 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont
|
||||
if resourceConfig.Memory != nil {
|
||||
resources.Memory = *resourceConfig.Memory
|
||||
}
|
||||
if resourceConfig.CpuShares != nil {
|
||||
if resourceConfig.CPUShares != nil {
|
||||
if libcontainercgroups.IsCgroup2UnifiedMode() {
|
||||
resources.CpuWeight = getCpuWeight(resourceConfig.CpuShares)
|
||||
resources.CpuWeight = getCPUWeight(resourceConfig.CPUShares)
|
||||
} else {
|
||||
resources.CpuShares = *resourceConfig.CpuShares
|
||||
resources.CpuShares = *resourceConfig.CPUShares
|
||||
}
|
||||
}
|
||||
if resourceConfig.CpuQuota != nil {
|
||||
resources.CpuQuota = *resourceConfig.CpuQuota
|
||||
if resourceConfig.CPUQuota != nil {
|
||||
resources.CpuQuota = *resourceConfig.CPUQuota
|
||||
}
|
||||
if resourceConfig.CpuPeriod != nil {
|
||||
resources.CpuPeriod = *resourceConfig.CpuPeriod
|
||||
if resourceConfig.CPUPeriod != nil {
|
||||
resources.CpuPeriod = *resourceConfig.CPUPeriod
|
||||
}
|
||||
if resourceConfig.PidsLimit != nil {
|
||||
resources.PidsLimit = *resourceConfig.PidsLimit
|
||||
@ -531,7 +531,7 @@ func (m *cgroupManagerImpl) ReduceCPULimits(cgroupName CgroupName) error {
|
||||
// Set lowest possible CpuShares value for the cgroup
|
||||
minimumCPUShares := uint64(MinShares)
|
||||
resources := &ResourceConfig{
|
||||
CpuShares: &minimumCPUShares,
|
||||
CPUShares: &minimumCPUShares,
|
||||
}
|
||||
containerConfig := &CgroupConfig{
|
||||
Name: cgroupName,
|
||||
|
@ -182,22 +182,22 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64,
|
||||
// build the result
|
||||
result := &ResourceConfig{}
|
||||
if qosClass == v1.PodQOSGuaranteed {
|
||||
result.CpuShares = &cpuShares
|
||||
result.CpuQuota = &cpuQuota
|
||||
result.CpuPeriod = &cpuPeriod
|
||||
result.CPUShares = &cpuShares
|
||||
result.CPUQuota = &cpuQuota
|
||||
result.CPUPeriod = &cpuPeriod
|
||||
result.Memory = &memoryLimits
|
||||
} else if qosClass == v1.PodQOSBurstable {
|
||||
result.CpuShares = &cpuShares
|
||||
result.CPUShares = &cpuShares
|
||||
if cpuLimitsDeclared {
|
||||
result.CpuQuota = &cpuQuota
|
||||
result.CpuPeriod = &cpuPeriod
|
||||
result.CPUQuota = &cpuQuota
|
||||
result.CPUPeriod = &cpuPeriod
|
||||
}
|
||||
if memoryLimitsDeclared {
|
||||
result.Memory = &memoryLimits
|
||||
}
|
||||
} else {
|
||||
shares := uint64(MinShares)
|
||||
result.CpuShares = &shares
|
||||
result.CPUShares = &shares
|
||||
}
|
||||
result.HugePageLimit = hugePageLimits
|
||||
|
||||
|
@ -40,7 +40,7 @@ const (
|
||||
defaultNodeAllocatableCgroupName = "kubepods"
|
||||
)
|
||||
|
||||
//createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true
|
||||
// createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true
|
||||
func (cm *containerManagerImpl) createNodeAllocatableCgroups() error {
|
||||
nodeAllocatable := cm.internalCapacity
|
||||
// Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable.
|
||||
@ -155,7 +155,7 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.
|
||||
Name: cName,
|
||||
ResourceParameters: rp,
|
||||
}
|
||||
klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CpuShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit)
|
||||
klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CPUShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit)
|
||||
if err := cgroupManager.Validate(cgroupConfig.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -180,7 +180,7 @@ func getCgroupConfig(rl v1.ResourceList) *ResourceConfig {
|
||||
if q, exists := rl[v1.ResourceCPU]; exists {
|
||||
// CPU is defined in milli-cores.
|
||||
val := MilliCPUToShares(q.MilliValue())
|
||||
rc.CpuShares = &val
|
||||
rc.CPUShares = &val
|
||||
}
|
||||
if q, exists := rl[pidlimit.PIDs]; exists {
|
||||
val := q.Value()
|
||||
|
@ -98,7 +98,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
|
||||
// the BestEffort QoS class has a statically configured minShares value
|
||||
if qosClass == v1.PodQOSBestEffort {
|
||||
minShares := uint64(MinShares)
|
||||
resourceParameters.CpuShares = &minShares
|
||||
resourceParameters.CPUShares = &minShares
|
||||
}
|
||||
|
||||
// containerConfig object stores the cgroup specifications
|
||||
@ -184,11 +184,11 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]
|
||||
|
||||
// make sure best effort is always 2 shares
|
||||
bestEffortCPUShares := uint64(MinShares)
|
||||
configs[v1.PodQOSBestEffort].ResourceParameters.CpuShares = &bestEffortCPUShares
|
||||
configs[v1.PodQOSBestEffort].ResourceParameters.CPUShares = &bestEffortCPUShares
|
||||
|
||||
// set burstable shares based on current observe state
|
||||
burstableCPUShares := MilliCPUToShares(burstablePodCPURequest)
|
||||
configs[v1.PodQOSBurstable].ResourceParameters.CpuShares = &burstableCPUShares
|
||||
configs[v1.PodQOSBurstable].ResourceParameters.CPUShares = &burstableCPUShares
|
||||
return nil
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user