mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #89974 from ahg-g/ahg-info
scheduler's NodeInfo tracks PodInfos instead of Pods
This commit is contained in:
commit
0c9245a29f
@ -287,9 +287,9 @@ func (h *HTTPExtender) convertToNodeToVictims(
|
|||||||
func (h *HTTPExtender) convertPodUIDToPod(
|
func (h *HTTPExtender) convertPodUIDToPod(
|
||||||
metaPod *extenderv1.MetaPod,
|
metaPod *extenderv1.MetaPod,
|
||||||
nodeInfo *framework.NodeInfo) (*v1.Pod, error) {
|
nodeInfo *framework.NodeInfo) (*v1.Pod, error) {
|
||||||
for _, pod := range nodeInfo.Pods() {
|
for _, p := range nodeInfo.Pods() {
|
||||||
if string(pod.UID) == metaPod.UID {
|
if string(p.Pod.UID) == metaPod.UID {
|
||||||
return pod, nil
|
return p.Pod, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("extender: %v claims to preempt pod (UID: %v) on node: %v, but the pod is not found on that node",
|
return nil, fmt.Errorf("extender: %v claims to preempt pod (UID: %v) on node: %v, but the pod is not found on that node",
|
||||||
|
@ -226,9 +226,9 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(pod *v1.Pod, node *v1.Node)
|
|||||||
// check if the given pod can be scheduled.
|
// check if the given pod can be scheduled.
|
||||||
podPriority := podutil.GetPodPriority(pod)
|
podPriority := podutil.GetPodPriority(pod)
|
||||||
for _, p := range nodeInfoCopy.Pods() {
|
for _, p := range nodeInfoCopy.Pods() {
|
||||||
if podutil.GetPodPriority(p) < podPriority {
|
if podutil.GetPodPriority(p.Pod) < podPriority {
|
||||||
potentialVictims = append(potentialVictims, p)
|
potentialVictims = append(potentialVictims, p.Pod)
|
||||||
removePod(p)
|
removePod(p.Pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sort.Slice(potentialVictims, func(i, j int) bool { return util.MoreImportantPod(potentialVictims[i], potentialVictims[j]) })
|
sort.Slice(potentialVictims, func(i, j int) bool { return util.MoreImportantPod(potentialVictims[i], potentialVictims[j]) })
|
||||||
|
@ -971,9 +971,9 @@ func (g *genericScheduler) selectVictimsOnNode(
|
|||||||
// check if the given pod can be scheduled.
|
// check if the given pod can be scheduled.
|
||||||
podPriority := podutil.GetPodPriority(pod)
|
podPriority := podutil.GetPodPriority(pod)
|
||||||
for _, p := range nodeInfo.Pods() {
|
for _, p := range nodeInfo.Pods() {
|
||||||
if podutil.GetPodPriority(p) < podPriority {
|
if podutil.GetPodPriority(p.Pod) < podPriority {
|
||||||
potentialVictims = append(potentialVictims, p)
|
potentialVictims = append(potentialVictims, p.Pod)
|
||||||
if err := removePod(p); err != nil {
|
if err := removePod(p.Pod); err != nil {
|
||||||
return nil, 0, false
|
return nil, 0, false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1063,7 +1063,7 @@ func podEligibleToPreemptOthers(pod *v1.Pod, nodeInfos framework.NodeInfoLister,
|
|||||||
if nodeInfo, _ := nodeInfos.Get(nomNodeName); nodeInfo != nil {
|
if nodeInfo, _ := nodeInfos.Get(nomNodeName); nodeInfo != nil {
|
||||||
podPriority := podutil.GetPodPriority(pod)
|
podPriority := podutil.GetPodPriority(pod)
|
||||||
for _, p := range nodeInfo.Pods() {
|
for _, p := range nodeInfo.Pods() {
|
||||||
if p.DeletionTimestamp != nil && podutil.GetPodPriority(p) < podPriority {
|
if p.Pod.DeletionTimestamp != nil && podutil.GetPodPriority(p.Pod) < podPriority {
|
||||||
// There is a terminating pod on the nominated node.
|
// There is a terminating pod on the nominated node.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -200,11 +200,11 @@ func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *fra
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
count := 0
|
count := 0
|
||||||
for _, pod := range nodeInfo.Pods() {
|
for _, p := range nodeInfo.Pods() {
|
||||||
// Ignore pods being deleted for spreading purposes
|
// Ignore pods being deleted for spreading purposes
|
||||||
// Similar to how it is done for SelectorSpreadPriority
|
// Similar to how it is done for SelectorSpreadPriority
|
||||||
if namespace == pod.Namespace && pod.DeletionTimestamp == nil {
|
if namespace == p.Pod.Namespace && p.Pod.DeletionTimestamp == nil {
|
||||||
if selector.Matches(labels.Set(pod.Labels)) {
|
if selector.Matches(labels.Set(p.Pod.Labels)) {
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -229,7 +229,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*framework.Nod
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||||
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, node)
|
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod.Pod, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh.SendErrorWithCancel(err, cancel)
|
errCh.SendErrorWithCancel(err, cancel)
|
||||||
return
|
return
|
||||||
@ -293,10 +293,10 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, allNodes []*frame
|
|||||||
nodeTopologyPairsAntiAffinityPodsMap := make(topologyToMatchedTermCount)
|
nodeTopologyPairsAntiAffinityPodsMap := make(topologyToMatchedTermCount)
|
||||||
for _, existingPod := range nodeInfo.Pods() {
|
for _, existingPod := range nodeInfo.Pods() {
|
||||||
// Check affinity terms.
|
// Check affinity terms.
|
||||||
nodeTopologyPairsAffinityPodsMap.updateWithAffinityTerms(existingPod, node, affinityTerms, 1)
|
nodeTopologyPairsAffinityPodsMap.updateWithAffinityTerms(existingPod.Pod, node, affinityTerms, 1)
|
||||||
|
|
||||||
// Check anti-affinity terms.
|
// Check anti-affinity terms.
|
||||||
nodeTopologyPairsAntiAffinityPodsMap.updateWithAntiAffinityTerms(existingPod, node, antiAffinityTerms, 1)
|
nodeTopologyPairsAntiAffinityPodsMap.updateWithAntiAffinityTerms(existingPod.Pod, node, antiAffinityTerms, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nodeTopologyPairsAffinityPodsMap) > 0 || len(nodeTopologyPairsAntiAffinityPodsMap) > 0 {
|
if len(nodeTopologyPairsAffinityPodsMap) > 0 || len(nodeTopologyPairsAntiAffinityPodsMap) > 0 {
|
||||||
|
@ -247,7 +247,7 @@ func (pl *InterPodAffinity) PreScore(
|
|||||||
|
|
||||||
topoScore := make(scoreMap)
|
topoScore := make(scoreMap)
|
||||||
for _, existingPod := range podsToProcess {
|
for _, existingPod := range podsToProcess {
|
||||||
if err := pl.processExistingPod(state, existingPod, nodeInfo, pod, topoScore); err != nil {
|
if err := pl.processExistingPod(state, existingPod.Pod, nodeInfo, pod, topoScore); err != nil {
|
||||||
errCh.SendErrorWithCancel(err, cancel)
|
errCh.SendErrorWithCancel(err, cancel)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -32,10 +32,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// getExistingVolumeCountForNode gets the current number of volumes on node.
|
// getExistingVolumeCountForNode gets the current number of volumes on node.
|
||||||
func getExistingVolumeCountForNode(pods []*v1.Pod, maxVolumes int) int {
|
func getExistingVolumeCountForNode(podInfos []*framework.PodInfo, maxVolumes int) int {
|
||||||
volumeCount := 0
|
volumeCount := 0
|
||||||
for _, pod := range pods {
|
for _, p := range podInfos {
|
||||||
volumeCount += len(pod.Spec.Volumes)
|
volumeCount += len(p.Pod.Spec.Volumes)
|
||||||
}
|
}
|
||||||
if maxVolumes-volumeCount > 0 {
|
if maxVolumes-volumeCount > 0 {
|
||||||
return maxVolumes - volumeCount
|
return maxVolumes - volumeCount
|
||||||
|
@ -103,7 +103,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
|
|||||||
|
|
||||||
attachedVolumes := make(map[string]string)
|
attachedVolumes := make(map[string]string)
|
||||||
for _, existingPod := range nodeInfo.Pods() {
|
for _, existingPod := range nodeInfo.Pods() {
|
||||||
if err := pl.filterAttachableVolumes(csiNode, existingPod.Spec.Volumes, existingPod.Namespace, attachedVolumes); err != nil {
|
if err := pl.filterAttachableVolumes(csiNode, existingPod.Pod.Spec.Volumes, existingPod.Pod.Namespace, attachedVolumes); err != nil {
|
||||||
return framework.NewStatus(framework.Error, err.Error())
|
return framework.NewStatus(framework.Error, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -236,7 +236,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod
|
|||||||
// count unique volumes
|
// count unique volumes
|
||||||
existingVolumes := make(map[string]bool)
|
existingVolumes := make(map[string]bool)
|
||||||
for _, existingPod := range nodeInfo.Pods() {
|
for _, existingPod := range nodeInfo.Pods() {
|
||||||
if err := pl.filterVolumes(existingPod.Spec.Volumes, existingPod.Namespace, existingVolumes); err != nil {
|
if err := pl.filterVolumes(existingPod.Pod.Spec.Volumes, existingPod.Pod.Namespace, existingVolumes); err != nil {
|
||||||
return framework.NewStatus(framework.Error, err.Error())
|
return framework.NewStatus(framework.Error, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||||
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type topologyPair struct {
|
type topologyPair struct {
|
||||||
@ -83,14 +84,14 @@ func filterTopologySpreadConstraints(constraints []v1.TopologySpreadConstraint,
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func countPodsMatchSelector(pods []*v1.Pod, selector labels.Selector, ns string) int {
|
func countPodsMatchSelector(podInfos []*framework.PodInfo, selector labels.Selector, ns string) int {
|
||||||
count := 0
|
count := 0
|
||||||
for _, p := range pods {
|
for _, p := range podInfos {
|
||||||
// Bypass terminating Pod (see #87621).
|
// Bypass terminating Pod (see #87621).
|
||||||
if p.DeletionTimestamp != nil || p.Namespace != ns {
|
if p.Pod.DeletionTimestamp != nil || p.Pod.Namespace != ns {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if selector.Matches(labels.Set(p.Labels)) {
|
if selector.Matches(labels.Set(p.Pod.Labels)) {
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -285,8 +285,8 @@ func (pl *ServiceAffinity) Score(ctx context.Context, state *framework.CycleStat
|
|||||||
for _, existingPod := range nodeInfo.Pods() {
|
for _, existingPod := range nodeInfo.Pods() {
|
||||||
// Ignore pods being deleted for spreading purposes
|
// Ignore pods being deleted for spreading purposes
|
||||||
// Similar to how it is done for SelectorSpreadPriority
|
// Similar to how it is done for SelectorSpreadPriority
|
||||||
if pod.Namespace == existingPod.Namespace && existingPod.DeletionTimestamp == nil {
|
if pod.Namespace == existingPod.Pod.Namespace && existingPod.Pod.DeletionTimestamp == nil {
|
||||||
if selector.Matches(labels.Set(existingPod.Labels)) {
|
if selector.Matches(labels.Set(existingPod.Pod.Labels)) {
|
||||||
score++
|
score++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,7 @@ func haveOverlap(a1, a2 []string) bool {
|
|||||||
func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
|
func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
|
||||||
for _, v := range pod.Spec.Volumes {
|
for _, v := range pod.Spec.Volumes {
|
||||||
for _, ev := range nodeInfo.Pods() {
|
for _, ev := range nodeInfo.Pods() {
|
||||||
if isVolumeConflict(v, ev) {
|
if isVolumeConflict(v, ev.Pod) {
|
||||||
return framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
|
return framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -210,32 +210,6 @@ type Plugin interface {
|
|||||||
Name() string
|
Name() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodInfo is a wrapper to a Pod with additional information for purposes such as tracking
|
|
||||||
// the timestamp when it's added to the queue or recording per-pod metrics.
|
|
||||||
type PodInfo struct {
|
|
||||||
Pod *v1.Pod
|
|
||||||
// The time pod added to the scheduling queue.
|
|
||||||
Timestamp time.Time
|
|
||||||
// Number of schedule attempts before successfully scheduled.
|
|
||||||
// It's used to record the # attempts metric.
|
|
||||||
Attempts int
|
|
||||||
// The time when the pod is added to the queue for the first time. The pod may be added
|
|
||||||
// back to the queue multiple times before it's successfully scheduled.
|
|
||||||
// It shouldn't be updated once initialized. It's used to record the e2e scheduling
|
|
||||||
// latency for a pod.
|
|
||||||
InitialAttemptTimestamp time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy returns a deep copy of the PodInfo object.
|
|
||||||
func (podInfo *PodInfo) DeepCopy() *PodInfo {
|
|
||||||
return &PodInfo{
|
|
||||||
Pod: podInfo.Pod.DeepCopy(),
|
|
||||||
Timestamp: podInfo.Timestamp,
|
|
||||||
Attempts: podInfo.Attempts,
|
|
||||||
InitialAttemptTimestamp: podInfo.InitialAttemptTimestamp,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessFunc is the function to sort pod info
|
// LessFunc is the function to sort pod info
|
||||||
type LessFunc func(podInfo1, podInfo2 *PodInfo) bool
|
type LessFunc func(podInfo1, podInfo2 *PodInfo) bool
|
||||||
|
|
||||||
|
@ -21,8 +21,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
@ -36,6 +37,39 @@ var (
|
|||||||
generation int64
|
generation int64
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// PodInfo is a wrapper to a Pod with additional information for purposes such as tracking
|
||||||
|
// the timestamp when it's added to the queue or recording per-pod metrics.
|
||||||
|
type PodInfo struct {
|
||||||
|
Pod *v1.Pod
|
||||||
|
// The time pod added to the scheduling queue.
|
||||||
|
Timestamp time.Time
|
||||||
|
// Number of schedule attempts before successfully scheduled.
|
||||||
|
// It's used to record the # attempts metric.
|
||||||
|
Attempts int
|
||||||
|
// The time when the pod is added to the queue for the first time. The pod may be added
|
||||||
|
// back to the queue multiple times before it's successfully scheduled.
|
||||||
|
// It shouldn't be updated once initialized. It's used to record the e2e scheduling
|
||||||
|
// latency for a pod.
|
||||||
|
InitialAttemptTimestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy returns a deep copy of the PodInfo object.
|
||||||
|
func (podInfo *PodInfo) DeepCopy() *PodInfo {
|
||||||
|
return &PodInfo{
|
||||||
|
Pod: podInfo.Pod.DeepCopy(),
|
||||||
|
Timestamp: podInfo.Timestamp,
|
||||||
|
Attempts: podInfo.Attempts,
|
||||||
|
InitialAttemptTimestamp: podInfo.InitialAttemptTimestamp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPodInfo return a new PodInfo
|
||||||
|
func NewPodInfo(pod *v1.Pod) *PodInfo {
|
||||||
|
return &PodInfo{
|
||||||
|
Pod: pod,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ImageStateSummary provides summarized information about the state of an image.
|
// ImageStateSummary provides summarized information about the state of an image.
|
||||||
type ImageStateSummary struct {
|
type ImageStateSummary struct {
|
||||||
// Size of the image
|
// Size of the image
|
||||||
@ -49,8 +83,8 @@ type NodeInfo struct {
|
|||||||
// Overall node information.
|
// Overall node information.
|
||||||
node *v1.Node
|
node *v1.Node
|
||||||
|
|
||||||
pods []*v1.Pod
|
pods []*PodInfo
|
||||||
podsWithAffinity []*v1.Pod
|
podsWithAffinity []*PodInfo
|
||||||
usedPorts HostPortInfo
|
usedPorts HostPortInfo
|
||||||
|
|
||||||
// Total requested resources of all pods on this node. This includes assumed
|
// Total requested resources of all pods on this node. This includes assumed
|
||||||
@ -290,18 +324,13 @@ func (n *NodeInfo) Node() *v1.Node {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pods return all pods scheduled (including assumed to be) on this node.
|
// Pods return all pods scheduled (including assumed to be) on this node.
|
||||||
func (n *NodeInfo) Pods() []*v1.Pod {
|
func (n *NodeInfo) Pods() []*PodInfo {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return n.pods
|
return n.pods
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPods sets all pods scheduled (including assumed to be) on this node.
|
|
||||||
func (n *NodeInfo) SetPods(pods []*v1.Pod) {
|
|
||||||
n.pods = pods
|
|
||||||
}
|
|
||||||
|
|
||||||
// UsedPorts returns used ports on this node.
|
// UsedPorts returns used ports on this node.
|
||||||
func (n *NodeInfo) UsedPorts() HostPortInfo {
|
func (n *NodeInfo) UsedPorts() HostPortInfo {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
@ -329,7 +358,7 @@ func (n *NodeInfo) SetImageStates(newImageStates map[string]*ImageStateSummary)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PodsWithAffinity return all pods with (anti)affinity constraints on this node.
|
// PodsWithAffinity return all pods with (anti)affinity constraints on this node.
|
||||||
func (n *NodeInfo) PodsWithAffinity() []*v1.Pod {
|
func (n *NodeInfo) PodsWithAffinity() []*PodInfo {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -427,7 +456,7 @@ func (n *NodeInfo) Clone() *NodeInfo {
|
|||||||
generation: n.generation,
|
generation: n.generation,
|
||||||
}
|
}
|
||||||
if len(n.pods) > 0 {
|
if len(n.pods) > 0 {
|
||||||
clone.pods = append([]*v1.Pod(nil), n.pods...)
|
clone.pods = append([]*PodInfo(nil), n.pods...)
|
||||||
}
|
}
|
||||||
if len(n.usedPorts) > 0 {
|
if len(n.usedPorts) > 0 {
|
||||||
// HostPortInfo is a map-in-map struct
|
// HostPortInfo is a map-in-map struct
|
||||||
@ -440,7 +469,7 @@ func (n *NodeInfo) Clone() *NodeInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(n.podsWithAffinity) > 0 {
|
if len(n.podsWithAffinity) > 0 {
|
||||||
clone.podsWithAffinity = append([]*v1.Pod(nil), n.podsWithAffinity...)
|
clone.podsWithAffinity = append([]*PodInfo(nil), n.podsWithAffinity...)
|
||||||
}
|
}
|
||||||
if len(n.taints) > 0 {
|
if len(n.taints) > 0 {
|
||||||
clone.taints = append([]v1.Taint(nil), n.taints...)
|
clone.taints = append([]v1.Taint(nil), n.taints...)
|
||||||
@ -462,8 +491,8 @@ func (n *NodeInfo) VolumeLimits() map[v1.ResourceName]int64 {
|
|||||||
// String returns representation of human readable format of this NodeInfo.
|
// String returns representation of human readable format of this NodeInfo.
|
||||||
func (n *NodeInfo) String() string {
|
func (n *NodeInfo) String() string {
|
||||||
podKeys := make([]string, len(n.pods))
|
podKeys := make([]string, len(n.pods))
|
||||||
for i, pod := range n.pods {
|
for i, p := range n.pods {
|
||||||
podKeys[i] = pod.Name
|
podKeys[i] = p.Pod.Name
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v, AllocatableResource:%#v}",
|
return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v, AllocatableResource:%#v}",
|
||||||
podKeys, n.requestedResource, n.nonzeroRequest, n.usedPorts, n.allocatableResource)
|
podKeys, n.requestedResource, n.nonzeroRequest, n.usedPorts, n.allocatableResource)
|
||||||
@ -476,7 +505,9 @@ func hasPodAffinityConstraints(pod *v1.Pod) bool {
|
|||||||
|
|
||||||
// AddPod adds pod information to this NodeInfo.
|
// AddPod adds pod information to this NodeInfo.
|
||||||
func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
||||||
res, non0CPU, non0Mem := calculateResource(pod)
|
// TODO(#89528): AddPod should accept a PodInfo as an input argument.
|
||||||
|
podInfo := NewPodInfo(pod)
|
||||||
|
res, non0CPU, non0Mem := calculateResource(podInfo.Pod)
|
||||||
n.requestedResource.MilliCPU += res.MilliCPU
|
n.requestedResource.MilliCPU += res.MilliCPU
|
||||||
n.requestedResource.Memory += res.Memory
|
n.requestedResource.Memory += res.Memory
|
||||||
n.requestedResource.EphemeralStorage += res.EphemeralStorage
|
n.requestedResource.EphemeralStorage += res.EphemeralStorage
|
||||||
@ -488,13 +519,13 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
|||||||
}
|
}
|
||||||
n.nonzeroRequest.MilliCPU += non0CPU
|
n.nonzeroRequest.MilliCPU += non0CPU
|
||||||
n.nonzeroRequest.Memory += non0Mem
|
n.nonzeroRequest.Memory += non0Mem
|
||||||
n.pods = append(n.pods, pod)
|
n.pods = append(n.pods, podInfo)
|
||||||
if hasPodAffinityConstraints(pod) {
|
if hasPodAffinityConstraints(podInfo.Pod) {
|
||||||
n.podsWithAffinity = append(n.podsWithAffinity, pod)
|
n.podsWithAffinity = append(n.podsWithAffinity, podInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consume ports when pods added.
|
// Consume ports when pods added.
|
||||||
n.UpdateUsedPorts(pod, true)
|
n.UpdateUsedPorts(podInfo.Pod, true)
|
||||||
|
|
||||||
n.generation = nextGeneration()
|
n.generation = nextGeneration()
|
||||||
}
|
}
|
||||||
@ -507,7 +538,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := range n.podsWithAffinity {
|
for i := range n.podsWithAffinity {
|
||||||
k2, err := GetPodKey(n.podsWithAffinity[i])
|
k2, err := GetPodKey(n.podsWithAffinity[i].Pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||||
continue
|
continue
|
||||||
@ -520,7 +551,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := range n.pods {
|
for i := range n.pods {
|
||||||
k2, err := GetPodKey(n.pods[i])
|
k2, err := GetPodKey(n.pods[i].Pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||||
continue
|
continue
|
||||||
@ -656,7 +687,7 @@ func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, np := range n.Pods() {
|
for _, np := range n.Pods() {
|
||||||
npodkey, _ := GetPodKey(np)
|
npodkey, _ := GetPodKey(np.Pod)
|
||||||
if npodkey == podKey {
|
if npodkey == podKey {
|
||||||
filtered = append(filtered, p)
|
filtered = append(filtered, p)
|
||||||
break
|
break
|
||||||
@ -675,21 +706,6 @@ func GetPodKey(pod *v1.Pod) (string, error) {
|
|||||||
return uid, nil
|
return uid, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter implements PodFilter interface. It returns false only if the pod node name
|
|
||||||
// matches NodeInfo.node and the pod is not found in the pods list. Otherwise,
|
|
||||||
// returns true.
|
|
||||||
func (n *NodeInfo) Filter(pod *v1.Pod) bool {
|
|
||||||
if pod.Spec.NodeName != n.node.Name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for _, p := range n.pods {
|
|
||||||
if p.Name == pod.Name && p.Namespace == pod.Namespace {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultBindAllHostIP defines the default ip address used to bind to all host.
|
// DefaultBindAllHostIP defines the default ip address used to bind to all host.
|
||||||
const DefaultBindAllHostIP = "0.0.0.0"
|
const DefaultBindAllHostIP = "0.0.0.0"
|
||||||
|
|
||||||
|
@ -309,8 +309,9 @@ func TestNewNodeInfo(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
imageStates: map[string]*ImageStateSummary{},
|
imageStates: map[string]*ImageStateSummary{},
|
||||||
pods: []*v1.Pod{
|
pods: []*PodInfo{
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-1",
|
Name: "test-1",
|
||||||
@ -337,7 +338,9 @@ func TestNewNodeInfo(t *testing.T) {
|
|||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-2",
|
Name: "test-2",
|
||||||
@ -365,6 +368,7 @@ func TestNewNodeInfo(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
gen := generation
|
gen := generation
|
||||||
@ -398,8 +402,9 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
imageStates: map[string]*ImageStateSummary{},
|
imageStates: map[string]*ImageStateSummary{},
|
||||||
pods: []*v1.Pod{
|
pods: []*PodInfo{
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-1",
|
Name: "test-1",
|
||||||
@ -426,7 +431,9 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-2",
|
Name: "test-2",
|
||||||
@ -455,6 +462,7 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
expected: &NodeInfo{
|
expected: &NodeInfo{
|
||||||
requestedResource: &Resource{},
|
requestedResource: &Resource{},
|
||||||
nonzeroRequest: &Resource{},
|
nonzeroRequest: &Resource{},
|
||||||
@ -468,8 +476,9 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
imageStates: map[string]*ImageStateSummary{},
|
imageStates: map[string]*ImageStateSummary{},
|
||||||
pods: []*v1.Pod{
|
pods: []*PodInfo{
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-1",
|
Name: "test-1",
|
||||||
@ -496,7 +505,9 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-2",
|
Name: "test-2",
|
||||||
@ -526,6 +537,7 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
@ -633,8 +645,9 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
imageStates: map[string]*ImageStateSummary{},
|
imageStates: map[string]*ImageStateSummary{},
|
||||||
pods: []*v1.Pod{
|
pods: []*PodInfo{
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-1",
|
Name: "test-1",
|
||||||
@ -664,7 +677,9 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-2",
|
Name: "test-2",
|
||||||
@ -695,6 +710,7 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
ni := fakeNodeInfo()
|
ni := fakeNodeInfo()
|
||||||
@ -717,6 +733,7 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
nodeName := "test-node"
|
nodeName := "test-node"
|
||||||
pods := []*v1.Pod{
|
pods := []*v1.Pod{
|
||||||
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
|
||||||
|
|
||||||
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -766,8 +783,9 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
imageStates: map[string]*ImageStateSummary{},
|
imageStates: map[string]*ImageStateSummary{},
|
||||||
pods: []*v1.Pod{
|
pods: []*PodInfo{
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-1",
|
Name: "test-1",
|
||||||
@ -798,7 +816,9 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-2",
|
Name: "test-2",
|
||||||
@ -832,6 +852,7 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -894,8 +915,9 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
imageStates: map[string]*ImageStateSummary{},
|
imageStates: map[string]*ImageStateSummary{},
|
||||||
pods: []*v1.Pod{
|
pods: []*PodInfo{
|
||||||
{
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: "test-2",
|
Name: "test-2",
|
||||||
@ -929,6 +951,7 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
13
pkg/scheduler/internal/cache/cache.go
vendored
13
pkg/scheduler/internal/cache/cache.go
vendored
@ -314,12 +314,7 @@ func (cache *schedulerCache) removeDeletedNodesFromSnapshot(snapshot *Snapshot)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) {
|
func (cache *schedulerCache) ListPods(selector labels.Selector) ([]*v1.Pod, error) {
|
||||||
alwaysTrue := func(p *v1.Pod) bool { return true }
|
|
||||||
return cache.FilteredList(alwaysTrue, selector)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cache *schedulerCache) FilteredList(podFilter framework.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
|
||||||
cache.mu.RLock()
|
cache.mu.RLock()
|
||||||
defer cache.mu.RUnlock()
|
defer cache.mu.RUnlock()
|
||||||
// podFilter is expected to return true for most or all of the pods. We
|
// podFilter is expected to return true for most or all of the pods. We
|
||||||
@ -331,9 +326,9 @@ func (cache *schedulerCache) FilteredList(podFilter framework.PodFilter, selecto
|
|||||||
}
|
}
|
||||||
pods := make([]*v1.Pod, 0, maxSize)
|
pods := make([]*v1.Pod, 0, maxSize)
|
||||||
for _, n := range cache.nodes {
|
for _, n := range cache.nodes {
|
||||||
for _, pod := range n.info.Pods() {
|
for _, p := range n.info.Pods() {
|
||||||
if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) {
|
if selector.Matches(labels.Set(p.Pod.Labels)) {
|
||||||
pods = append(pods, pod)
|
pods = append(pods, p.Pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
29
pkg/scheduler/internal/cache/cache_test.go
vendored
29
pkg/scheduler/internal/cache/cache_test.go
vendored
@ -927,42 +927,15 @@ func TestForgetPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getResourceRequest returns the resource request of all containers in Pods;
|
|
||||||
// excluding initContainers.
|
|
||||||
func getResourceRequest(pod *v1.Pod) v1.ResourceList {
|
|
||||||
result := &framework.Resource{}
|
|
||||||
for _, container := range pod.Spec.Containers {
|
|
||||||
result.Add(container.Resources.Requests)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result.ResourceList()
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
|
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
|
||||||
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *framework.NodeInfo {
|
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *framework.NodeInfo {
|
||||||
expected := framework.NewNodeInfo()
|
expected := framework.NewNodeInfo()
|
||||||
|
|
||||||
// Simulate SetNode.
|
|
||||||
expected.SetNode(node)
|
expected.SetNode(node)
|
||||||
|
|
||||||
expected.SetAllocatableResource(framework.NewResource(node.Status.Allocatable))
|
expected.SetAllocatableResource(framework.NewResource(node.Status.Allocatable))
|
||||||
expected.SetTaints(node.Spec.Taints)
|
expected.SetTaints(node.Spec.Taints)
|
||||||
expected.SetGeneration(expected.GetGeneration() + 1)
|
expected.SetGeneration(expected.GetGeneration() + 1)
|
||||||
|
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
// Simulate AddPod
|
expected.AddPod(pod)
|
||||||
pods := append(expected.Pods(), pod)
|
|
||||||
expected.SetPods(pods)
|
|
||||||
requestedResource := expected.RequestedResource()
|
|
||||||
newRequestedResource := &requestedResource
|
|
||||||
newRequestedResource.Add(getResourceRequest(pod))
|
|
||||||
expected.SetRequestedResource(newRequestedResource)
|
|
||||||
nonZeroRequest := expected.NonZeroRequest()
|
|
||||||
newNonZeroRequest := &nonZeroRequest
|
|
||||||
newNonZeroRequest.Add(getResourceRequest(pod))
|
|
||||||
expected.SetNonZeroRequest(newNonZeroRequest)
|
|
||||||
expected.UpdateUsedPorts(pod, true)
|
|
||||||
expected.SetGeneration(expected.GetGeneration() + 1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return expected
|
return expected
|
||||||
|
@ -91,8 +91,8 @@ func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[s
|
|||||||
|
|
||||||
cached := []string{}
|
cached := []string{}
|
||||||
for _, nodeinfo := range nodeinfos {
|
for _, nodeinfo := range nodeinfos {
|
||||||
for _, pod := range nodeinfo.Pods() {
|
for _, p := range nodeinfo.Pods() {
|
||||||
cached = append(cached, string(pod.UID))
|
cached = append(cached, string(p.Pod.UID))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, pod := range waitingPods {
|
for _, pod := range waitingPods {
|
||||||
|
@ -67,7 +67,7 @@ func (d *CacheDumper) printNodeInfo(n *framework.NodeInfo) string {
|
|||||||
n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods())))
|
n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods())))
|
||||||
// Dumping Pod Info
|
// Dumping Pod Info
|
||||||
for _, p := range n.Pods() {
|
for _, p := range n.Pods() {
|
||||||
nodeData.WriteString(printPod(p))
|
nodeData.WriteString(printPod(p.Pod))
|
||||||
}
|
}
|
||||||
// Dumping nominated pods info on the node
|
// Dumping nominated pods info on the node
|
||||||
nominatedPods := d.podQueue.NominatedPodsForNode(n.Node().Name)
|
nominatedPods := d.podQueue.NominatedPodsForNode(n.Node().Name)
|
||||||
|
1
pkg/scheduler/internal/cache/fake/BUILD
vendored
1
pkg/scheduler/internal/cache/fake/BUILD
vendored
@ -6,7 +6,6 @@ go_library(
|
|||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake",
|
||||||
visibility = ["//pkg/scheduler:__subpackages__"],
|
visibility = ["//pkg/scheduler:__subpackages__"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
|
||||||
"//pkg/scheduler/internal/cache:go_default_library",
|
"//pkg/scheduler/internal/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
10
pkg/scheduler/internal/cache/fake/fake_cache.go
vendored
10
pkg/scheduler/internal/cache/fake/fake_cache.go
vendored
@ -19,7 +19,6 @@ package fake
|
|||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
|
||||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -79,13 +78,8 @@ func (c *Cache) UpdateSnapshot(snapshot *internalcache.Snapshot) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// List is a fake method for testing.
|
// ListPods is a fake method for testing.
|
||||||
func (c *Cache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
|
func (c *Cache) ListPods(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
|
||||||
|
|
||||||
// FilteredList is a fake method for testing.
|
|
||||||
func (c *Cache) FilteredList(filter framework.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dump is a fake method for testing.
|
// Dump is a fake method for testing.
|
||||||
func (c *Cache) Dump() *internalcache.Dump {
|
func (c *Cache) Dump() *internalcache.Dump {
|
||||||
|
6
pkg/scheduler/internal/cache/interface.go
vendored
6
pkg/scheduler/internal/cache/interface.go
vendored
@ -17,7 +17,8 @@ limitations under the License.
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -56,7 +57,8 @@ import (
|
|||||||
// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue,
|
// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue,
|
||||||
// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache.
|
// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache.
|
||||||
type Cache interface {
|
type Cache interface {
|
||||||
framework.PodLister
|
// ListPods lists all pods in the cache.
|
||||||
|
ListPods(selector labels.Selector) ([]*v1.Pod, error)
|
||||||
|
|
||||||
// AssumePod assumes a pod scheduled and aggregates the pod's information into its node.
|
// AssumePod assumes a pod scheduled and aggregates the pod's information into its node.
|
||||||
// The implementation also decides the policy to expire pod before being confirmed (receiving Add event).
|
// The implementation also decides the policy to expire pod before being confirmed (receiving Add event).
|
||||||
|
6
pkg/scheduler/internal/cache/snapshot.go
vendored
6
pkg/scheduler/internal/cache/snapshot.go
vendored
@ -157,9 +157,9 @@ func (p podLister) FilteredList(filter framework.PodFilter, selector labels.Sele
|
|||||||
}
|
}
|
||||||
pods := make([]*v1.Pod, 0, maxSize)
|
pods := make([]*v1.Pod, 0, maxSize)
|
||||||
for _, n := range p {
|
for _, n := range p {
|
||||||
for _, pod := range n.Pods() {
|
for _, p := range n.Pods() {
|
||||||
if filter(pod) && selector.Matches(labels.Set(pod.Labels)) {
|
if filter(p.Pod) && selector.Matches(labels.Set(p.Pod.Labels)) {
|
||||||
pods = append(pods, pod)
|
pods = append(pods, p.Pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -555,7 +555,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
|||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
pods, err := scache.List(labels.Everything())
|
pods, err := scache.ListPods(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errChan <- fmt.Errorf("cache.List failed: %v", err)
|
errChan <- fmt.Errorf("cache.List failed: %v", err)
|
||||||
return
|
return
|
||||||
|
@ -424,7 +424,7 @@ func waitForPDBsStable(testCtx *testutils.TestContext, pdbs []*policy.PodDisrupt
|
|||||||
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
||||||
func waitCachedPodsStable(testCtx *testutils.TestContext, pods []*v1.Pod) error {
|
func waitCachedPodsStable(testCtx *testutils.TestContext, pods []*v1.Pod) error {
|
||||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||||
cachedPods, err := testCtx.Scheduler.SchedulerCache.List(labels.Everything())
|
cachedPods, err := testCtx.Scheduler.SchedulerCache.ListPods(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user