mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 01:06:27 +00:00
Merge pull request #46926 from k82cn/k8s_46924
Automatic merge from submit-queue (batch tested with PRs 46926, 48468) Added helper funcs to schedulercache.Resource. **What this PR does / why we need it**: Avoid duplicated code slice by helper funcs. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #46924 **Release note**: ```release-note-none ```
This commit is contained in:
commit
3bfcd74ace
@ -499,30 +499,15 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *s
|
|||||||
//
|
//
|
||||||
// Result: CPU: 3, Memory: 3G
|
// Result: CPU: 3, Memory: 3G
|
||||||
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
||||||
result := schedulercache.Resource{}
|
result := &schedulercache.Resource{}
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
for rName, rQuantity := range container.Resources.Requests {
|
result.Add(container.Resources.Requests)
|
||||||
switch rName {
|
|
||||||
case v1.ResourceMemory:
|
|
||||||
result.Memory += rQuantity.Value()
|
|
||||||
case v1.ResourceCPU:
|
|
||||||
result.MilliCPU += rQuantity.MilliValue()
|
|
||||||
case v1.ResourceNvidiaGPU:
|
|
||||||
result.NvidiaGPU += rQuantity.Value()
|
|
||||||
case v1.ResourceStorageOverlay:
|
|
||||||
result.StorageOverlay += rQuantity.Value()
|
|
||||||
default:
|
|
||||||
if v1helper.IsOpaqueIntResourceName(rName) {
|
|
||||||
result.AddOpaque(rName, rQuantity.Value())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account for storage requested by emptydir volumes
|
// Account for storage requested by emptydir volumes
|
||||||
// If the storage medium is memory, should exclude the size
|
// If the storage medium is memory, should exclude the size
|
||||||
for _, vol := range pod.Spec.Volumes {
|
for _, vol := range pod.Spec.Volumes {
|
||||||
if vol.EmptyDir != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory {
|
if vol.EmptyDir != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory {
|
||||||
|
|
||||||
result.StorageScratch += vol.EmptyDir.SizeLimit.Value()
|
result.StorageScratch += vol.EmptyDir.SizeLimit.Value()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -557,7 +542,8 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &result
|
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func podName(pod *v1.Pod) string {
|
func podName(pod *v1.Pod) string {
|
||||||
|
@ -35,7 +35,6 @@ go_test(
|
|||||||
library = ":go_default_library",
|
library = ":go_default_library",
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1/helper:go_default_library",
|
|
||||||
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
|
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||||
"//plugin/pkg/scheduler/util:go_default_library",
|
"//plugin/pkg/scheduler/util:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
|
||||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||||
schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util"
|
schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util"
|
||||||
)
|
)
|
||||||
@ -507,76 +506,32 @@ func TestForgetPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// addResource adds ResourceList into Resource.
|
|
||||||
func addResource(r *Resource, rl v1.ResourceList) {
|
|
||||||
if r == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for rName, rQuant := range rl {
|
|
||||||
switch rName {
|
|
||||||
case v1.ResourceCPU:
|
|
||||||
r.MilliCPU += rQuant.MilliValue()
|
|
||||||
case v1.ResourceMemory:
|
|
||||||
r.Memory += rQuant.Value()
|
|
||||||
case v1.ResourceNvidiaGPU:
|
|
||||||
r.NvidiaGPU += rQuant.Value()
|
|
||||||
default:
|
|
||||||
if v1helper.IsOpaqueIntResourceName(rName) {
|
|
||||||
r.AddOpaque(rName, rQuant.Value())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getResourceRequest returns the resource request of all containers in Pods;
|
// getResourceRequest returns the resource request of all containers in Pods;
|
||||||
// excuding initContainers.
|
// excuding initContainers.
|
||||||
func getResourceRequest(pod *v1.Pod) v1.ResourceList {
|
func getResourceRequest(pod *v1.Pod) v1.ResourceList {
|
||||||
result := &Resource{}
|
result := &Resource{}
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
addResource(result, container.Resources.Requests)
|
result.Add(container.Resources.Requests)
|
||||||
}
|
}
|
||||||
|
|
||||||
return result.ResourceList()
|
return result.ResourceList()
|
||||||
}
|
}
|
||||||
|
|
||||||
// newResource returns a new Resource by ResourceList.
|
|
||||||
func newResource(rl v1.ResourceList) *Resource {
|
|
||||||
res := &Resource{}
|
|
||||||
|
|
||||||
for rName, rQuantity := range rl {
|
|
||||||
switch rName {
|
|
||||||
case v1.ResourceMemory:
|
|
||||||
res.Memory = rQuantity.Value()
|
|
||||||
case v1.ResourceCPU:
|
|
||||||
res.MilliCPU = rQuantity.MilliValue()
|
|
||||||
case v1.ResourceNvidiaGPU:
|
|
||||||
res.NvidiaGPU += rQuantity.Value()
|
|
||||||
default:
|
|
||||||
if v1helper.IsOpaqueIntResourceName(rName) {
|
|
||||||
res.SetOpaque(rName, rQuantity.Value())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
|
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
|
||||||
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *NodeInfo {
|
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *NodeInfo {
|
||||||
expected := NewNodeInfo()
|
expected := NewNodeInfo()
|
||||||
|
|
||||||
// Simulate SetNode.
|
// Simulate SetNode.
|
||||||
expected.node = node
|
expected.node = node
|
||||||
expected.allocatableResource = newResource(node.Status.Allocatable)
|
expected.allocatableResource = NewResource(node.Status.Allocatable)
|
||||||
expected.taints = node.Spec.Taints
|
expected.taints = node.Spec.Taints
|
||||||
expected.generation++
|
expected.generation++
|
||||||
|
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
// Simulate AddPod
|
// Simulate AddPod
|
||||||
expected.pods = append(expected.pods, pod)
|
expected.pods = append(expected.pods, pod)
|
||||||
addResource(expected.requestedResource, getResourceRequest(pod))
|
expected.requestedResource.Add(getResourceRequest(pod))
|
||||||
addResource(expected.nonzeroRequest, getResourceRequest(pod))
|
expected.nonzeroRequest.Add(getResourceRequest(pod))
|
||||||
expected.usedPorts = schedutil.GetUsedPorts(pod)
|
expected.usedPorts = schedutil.GetUsedPorts(pod)
|
||||||
expected.generation++
|
expected.generation++
|
||||||
}
|
}
|
||||||
|
@ -47,9 +47,6 @@ type NodeInfo struct {
|
|||||||
// We store allocatedResources (which is Node.Status.Allocatable.*) explicitly
|
// We store allocatedResources (which is Node.Status.Allocatable.*) explicitly
|
||||||
// as int64, to avoid conversions and accessing map.
|
// as int64, to avoid conversions and accessing map.
|
||||||
allocatableResource *Resource
|
allocatableResource *Resource
|
||||||
// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
|
|
||||||
// explicitly as int, to avoid conversions and improve performance.
|
|
||||||
allowedPodNumber int
|
|
||||||
|
|
||||||
// Cached tains of the node for faster lookup.
|
// Cached tains of the node for faster lookup.
|
||||||
taints []v1.Taint
|
taints []v1.Taint
|
||||||
@ -71,15 +68,55 @@ type Resource struct {
|
|||||||
NvidiaGPU int64
|
NvidiaGPU int64
|
||||||
StorageScratch int64
|
StorageScratch int64
|
||||||
StorageOverlay int64
|
StorageOverlay int64
|
||||||
|
// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
|
||||||
|
// explicitly as int, to avoid conversions and improve performance.
|
||||||
|
AllowedPodNumber int
|
||||||
OpaqueIntResources map[v1.ResourceName]int64
|
OpaqueIntResources map[v1.ResourceName]int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// New creates a Resource from ResourceList
|
||||||
|
func NewResource(rl v1.ResourceList) *Resource {
|
||||||
|
r := &Resource{}
|
||||||
|
r.Add(rl)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds ResourceList into Resource.
|
||||||
|
func (r *Resource) Add(rl v1.ResourceList) {
|
||||||
|
if r == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for rName, rQuant := range rl {
|
||||||
|
switch rName {
|
||||||
|
case v1.ResourceCPU:
|
||||||
|
r.MilliCPU += rQuant.MilliValue()
|
||||||
|
case v1.ResourceMemory:
|
||||||
|
r.Memory += rQuant.Value()
|
||||||
|
case v1.ResourceNvidiaGPU:
|
||||||
|
r.NvidiaGPU += rQuant.Value()
|
||||||
|
case v1.ResourcePods:
|
||||||
|
r.AllowedPodNumber += int(rQuant.Value())
|
||||||
|
case v1.ResourceStorage:
|
||||||
|
r.StorageScratch += rQuant.Value()
|
||||||
|
case v1.ResourceStorageOverlay:
|
||||||
|
r.StorageOverlay += rQuant.Value()
|
||||||
|
default:
|
||||||
|
if v1helper.IsOpaqueIntResourceName(rName) {
|
||||||
|
r.AddOpaque(rName, rQuant.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Resource) ResourceList() v1.ResourceList {
|
func (r *Resource) ResourceList() v1.ResourceList {
|
||||||
result := v1.ResourceList{
|
result := v1.ResourceList{
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
|
||||||
v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),
|
||||||
v1.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI),
|
v1.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI),
|
||||||
|
v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
|
||||||
v1.ResourceStorageOverlay: *resource.NewQuantity(r.StorageOverlay, resource.BinarySI),
|
v1.ResourceStorageOverlay: *resource.NewQuantity(r.StorageOverlay, resource.BinarySI),
|
||||||
|
v1.ResourceStorage: *resource.NewQuantity(r.StorageScratch, resource.BinarySI),
|
||||||
}
|
}
|
||||||
for rName, rQuant := range r.OpaqueIntResources {
|
for rName, rQuant := range r.OpaqueIntResources {
|
||||||
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
|
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
|
||||||
@ -92,6 +129,7 @@ func (r *Resource) Clone() *Resource {
|
|||||||
MilliCPU: r.MilliCPU,
|
MilliCPU: r.MilliCPU,
|
||||||
Memory: r.Memory,
|
Memory: r.Memory,
|
||||||
NvidiaGPU: r.NvidiaGPU,
|
NvidiaGPU: r.NvidiaGPU,
|
||||||
|
AllowedPodNumber: r.AllowedPodNumber,
|
||||||
StorageOverlay: r.StorageOverlay,
|
StorageOverlay: r.StorageOverlay,
|
||||||
StorageScratch: r.StorageScratch,
|
StorageScratch: r.StorageScratch,
|
||||||
}
|
}
|
||||||
@ -124,7 +162,6 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
|
|||||||
requestedResource: &Resource{},
|
requestedResource: &Resource{},
|
||||||
nonzeroRequest: &Resource{},
|
nonzeroRequest: &Resource{},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
allowedPodNumber: 0,
|
|
||||||
generation: 0,
|
generation: 0,
|
||||||
usedPorts: make(map[int]bool),
|
usedPorts: make(map[int]bool),
|
||||||
}
|
}
|
||||||
@ -166,10 +203,10 @@ func (n *NodeInfo) PodsWithAffinity() []*v1.Pod {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *NodeInfo) AllowedPodNumber() int {
|
func (n *NodeInfo) AllowedPodNumber() int {
|
||||||
if n == nil {
|
if n == nil || n.allocatableResource == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return n.allowedPodNumber
|
return n.allocatableResource.AllowedPodNumber
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NodeInfo) Taints() ([]v1.Taint, error) {
|
func (n *NodeInfo) Taints() ([]v1.Taint, error) {
|
||||||
@ -223,7 +260,6 @@ func (n *NodeInfo) Clone() *NodeInfo {
|
|||||||
requestedResource: n.requestedResource.Clone(),
|
requestedResource: n.requestedResource.Clone(),
|
||||||
nonzeroRequest: n.nonzeroRequest.Clone(),
|
nonzeroRequest: n.nonzeroRequest.Clone(),
|
||||||
allocatableResource: n.allocatableResource.Clone(),
|
allocatableResource: n.allocatableResource.Clone(),
|
||||||
allowedPodNumber: n.allowedPodNumber,
|
|
||||||
taintsErr: n.taintsErr,
|
taintsErr: n.taintsErr,
|
||||||
memoryPressureCondition: n.memoryPressureCondition,
|
memoryPressureCondition: n.memoryPressureCondition,
|
||||||
diskPressureCondition: n.diskPressureCondition,
|
diskPressureCondition: n.diskPressureCondition,
|
||||||
@ -253,7 +289,8 @@ func (n *NodeInfo) String() string {
|
|||||||
for i, pod := range n.pods {
|
for i, pod := range n.pods {
|
||||||
podKeys[i] = pod.Name
|
podKeys[i] = pod.Name
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v}", podKeys, n.requestedResource, n.nonzeroRequest, n.usedPorts)
|
return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v, AllocatableResource:%#v}",
|
||||||
|
podKeys, n.requestedResource, n.nonzeroRequest, n.usedPorts, n.allocatableResource)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasPodAffinityConstraints(pod *v1.Pod) bool {
|
func hasPodAffinityConstraints(pod *v1.Pod) bool {
|
||||||
@ -345,23 +382,9 @@ func (n *NodeInfo) removePod(pod *v1.Pod) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int64) {
|
func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int64) {
|
||||||
|
resPtr := &res
|
||||||
for _, c := range pod.Spec.Containers {
|
for _, c := range pod.Spec.Containers {
|
||||||
for rName, rQuant := range c.Resources.Requests {
|
resPtr.Add(c.Resources.Requests)
|
||||||
switch rName {
|
|
||||||
case v1.ResourceCPU:
|
|
||||||
res.MilliCPU += rQuant.MilliValue()
|
|
||||||
case v1.ResourceMemory:
|
|
||||||
res.Memory += rQuant.Value()
|
|
||||||
case v1.ResourceNvidiaGPU:
|
|
||||||
res.NvidiaGPU += rQuant.Value()
|
|
||||||
case v1.ResourceStorageOverlay:
|
|
||||||
res.StorageOverlay += rQuant.Value()
|
|
||||||
default:
|
|
||||||
if v1helper.IsOpaqueIntResourceName(rName) {
|
|
||||||
res.AddOpaque(rName, rQuant.Value())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
non0_cpu_req, non0_mem_req := priorityutil.GetNonzeroRequests(&c.Resources.Requests)
|
non0_cpu_req, non0_mem_req := priorityutil.GetNonzeroRequests(&c.Resources.Requests)
|
||||||
non0_cpu += non0_cpu_req
|
non0_cpu += non0_cpu_req
|
||||||
@ -397,26 +420,9 @@ func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, used bool) {
|
|||||||
// Sets the overall node information.
|
// Sets the overall node information.
|
||||||
func (n *NodeInfo) SetNode(node *v1.Node) error {
|
func (n *NodeInfo) SetNode(node *v1.Node) error {
|
||||||
n.node = node
|
n.node = node
|
||||||
for rName, rQuant := range node.Status.Allocatable {
|
|
||||||
switch rName {
|
n.allocatableResource = NewResource(node.Status.Allocatable)
|
||||||
case v1.ResourceCPU:
|
|
||||||
n.allocatableResource.MilliCPU = rQuant.MilliValue()
|
|
||||||
case v1.ResourceMemory:
|
|
||||||
n.allocatableResource.Memory = rQuant.Value()
|
|
||||||
case v1.ResourceNvidiaGPU:
|
|
||||||
n.allocatableResource.NvidiaGPU = rQuant.Value()
|
|
||||||
case v1.ResourcePods:
|
|
||||||
n.allowedPodNumber = int(rQuant.Value())
|
|
||||||
case v1.ResourceStorage:
|
|
||||||
n.allocatableResource.StorageScratch = rQuant.Value()
|
|
||||||
case v1.ResourceStorageOverlay:
|
|
||||||
n.allocatableResource.StorageOverlay = rQuant.Value()
|
|
||||||
default:
|
|
||||||
if v1helper.IsOpaqueIntResourceName(rName) {
|
|
||||||
n.allocatableResource.SetOpaque(rName, rQuant.Value())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n.taints = node.Spec.Taints
|
n.taints = node.Spec.Taints
|
||||||
for i := range node.Status.Conditions {
|
for i := range node.Status.Conditions {
|
||||||
cond := &node.Status.Conditions[i]
|
cond := &node.Status.Conditions[i]
|
||||||
@ -441,7 +447,6 @@ func (n *NodeInfo) RemoveNode(node *v1.Node) error {
|
|||||||
// node removal. This is handled correctly in cache.go file.
|
// node removal. This is handled correctly in cache.go file.
|
||||||
n.node = nil
|
n.node = nil
|
||||||
n.allocatableResource = &Resource{}
|
n.allocatableResource = &Resource{}
|
||||||
n.allowedPodNumber = 0
|
|
||||||
n.taints, n.taintsErr = nil, nil
|
n.taints, n.taintsErr = nil, nil
|
||||||
n.memoryPressureCondition = v1.ConditionUnknown
|
n.memoryPressureCondition = v1.ConditionUnknown
|
||||||
n.diskPressureCondition = v1.ConditionUnknown
|
n.diskPressureCondition = v1.ConditionUnknown
|
||||||
|
Loading…
Reference in New Issue
Block a user