mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 15:05:20 +00:00
Move Snapshot from nodeinfo/snapshot to internal/cache
Signed-off-by: Aldo Culquicondor <acondor@google.com>
This commit is contained in:
@@ -60,8 +60,8 @@ go_test(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
// getExistingVolumeCountForNode gets the current number of volumes on node.
|
||||
@@ -379,7 +379,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
if len(test.pod.Spec.Volumes) > 0 {
|
||||
maxVolumes := 5
|
||||
nodeInfoList, _ := snapshot.NodeInfos().List()
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
@@ -233,7 +233,7 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
p, _ := NewLeastAllocated(nil, fh)
|
||||
for i := range test.nodes {
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
@@ -196,7 +196,7 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
p, _ := NewMostAllocated(nil, fh)
|
||||
for i := range test.nodes {
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestRequestedToCapacityRatio(t *testing.T) {
|
||||
@@ -65,7 +65,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.scheduledPods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.scheduledPods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 10}, {"utilization" : 100, "score" : 0}], "resources" : [{"name" : "memory", "weight" : 1}, {"name" : "cpu", "weight" : 1}]}`)}
|
||||
p, err := NewRequestedToCapacityRatio(args, fh)
|
||||
@@ -349,7 +349,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 1}]}`)}
|
||||
p, err := NewRequestedToCapacityRatio(args, fh)
|
||||
@@ -584,7 +584,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 3}, {"name" : "intel.com/bar", "weight": 5}]}`)}
|
||||
p, err := NewRequestedToCapacityRatio(args, fh)
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestResourceLimits(t *testing.T) {
|
||||
@@ -146,7 +146,7 @@ func TestResourceLimits(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes))
|
||||
snapshot := cache.NewSnapshot(nil, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
p := &ResourceLimits{handle: fh}
|
||||
for i := range test.nodes {
|
||||
|
||||
Reference in New Issue
Block a user