rename framework pkg with schedulerframework for all instances under pkg/kubelet

This commit is contained in:
Abdullah Gharaibeh 2020-04-14 14:24:07 -04:00
parent bed9b2f23b
commit d6522e0e74
11 changed files with 36 additions and 36 deletions

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"fmt"
"strconv"
@ -91,7 +91,7 @@ type ContainerManager interface {
// Otherwise, it updates allocatableResource in nodeInfo if necessary,
// to make sure it is at least equal to the pod's requested capacity for
// any registered device plugin resource
UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error
UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error
InternalContainerLifecycle() InternalContainerLifecycle

View File

@ -62,7 +62,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
"k8s.io/kubernetes/pkg/kubelet/status"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
@ -675,7 +675,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
return opts, nil
}
func (cm *containerManagerImpl) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
func (cm *containerManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return cm.deviceManager.UpdatePluginResources(node, attrs)
}

View File

@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
type containerManagerStub struct {
@ -97,7 +97,7 @@ func (cm *containerManagerStub) GetResources(pod *v1.Pod, container *v1.Containe
return &kubecontainer.RunContainerOptions{}, nil
}
func (cm *containerManagerStub) UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
func (cm *containerManagerStub) UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
return nil
}

View File

@ -42,7 +42,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
type containerManagerImpl struct {
@ -165,7 +165,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
return &kubecontainer.RunContainerOptions{}, nil
}
func (cm *containerManagerImpl) UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
func (cm *containerManagerImpl) UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
return nil
}

View File

@ -48,7 +48,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/util/selinux"
)
@ -391,7 +391,7 @@ func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error {
}
// UpdatePluginResources updates node resources based on devices already allocated to pods.
func (m *ManagerImpl) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
pod := attrs.Pod
m.mutex.Lock()
@ -924,8 +924,8 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
// and if necessary, updates allocatableResource in nodeInfo to at least equal to
// the allocated capacity. This allows pods that have already been scheduled on
// the node to pass GeneralPredicates admission checking even upon device plugin failure.
func (m *ManagerImpl) sanitizeNodeAllocatable(node *framework.NodeInfo) {
var newAllocatableResource *framework.Resource
func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulerframework.NodeInfo) {
var newAllocatableResource *schedulerframework.Resource
allocatableResource := node.Allocatable
if allocatableResource.ScalarResources == nil {
allocatableResource.ScalarResources = make(map[v1.ResourceName]int64)

View File

@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// ManagerStub provides a simple stub implementation for the Device Manager.
@ -50,7 +50,7 @@ func (h *ManagerStub) Allocate(pod *v1.Pod, container *v1.Container) error {
}
// UpdatePluginResources simply returns nil.
func (h *ManagerStub) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
func (h *ManagerStub) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return nil
}

View File

@ -42,7 +42,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
const (
@ -889,7 +889,7 @@ func TestUpdatePluginResources(t *testing.T) {
},
},
}
nodeInfo := &framework.NodeInfo{}
nodeInfo := &schedulerframework.NodeInfo{}
nodeInfo.SetNode(cachedNode)
testManager.UpdatePluginResources(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod})

View File

@ -26,7 +26,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// Manager manages all the Device Plugins running on a node.
@ -44,7 +44,7 @@ type Manager interface {
// UpdatePluginResources updates node resources based on devices already
// allocated to pods. The node object is provided for the device manager to
// update the node capacity to reflect the currently available devices.
UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error
UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error
// Stop stops the manager.
Stop() error

View File

@ -67,7 +67,7 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/awsebs"
"k8s.io/kubernetes/pkg/volume/azure_dd"
@ -658,7 +658,7 @@ func TestHandlePluginResources(t *testing.T) {
}
kl.nodeLister = testNodeLister{nodes: nodes}
updatePluginResourcesFunc := func(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
updatePluginResourcesFunc := func(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
// Maps from resourceName to the value we use to set node.allocatableResource[resourceName].
// A resource with invalid value (< 0) causes the function to return an error
// to emulate resource Allocation failure.

View File

@ -28,12 +28,12 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
type getNodeAnyWayFuncType func() (*v1.Node, error)
type pluginResourceUpdateFuncType func(*framework.NodeInfo, *PodAdmitAttributes) error
type pluginResourceUpdateFuncType func(*schedulerframework.NodeInfo, *PodAdmitAttributes) error
// AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod.
// This allows for the graceful handling of pod admission failure.
@ -69,7 +69,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
}
admitPod := attrs.Pod
pods := attrs.OtherPods
nodeInfo := framework.NewNodeInfo(pods...)
nodeInfo := schedulerframework.NewNodeInfo(pods...)
nodeInfo.SetNode(node)
// ensure the node has enough plugin resources for that required in pods
if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil {
@ -155,7 +155,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
}
}
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *framework.NodeInfo) *v1.Pod {
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) *v1.Pod {
podCopy := pod.DeepCopy()
for i, c := range pod.Spec.Containers {
// We only handle requests in Requests but not Limits because the
@ -219,7 +219,7 @@ func (e *PredicateFailureError) GetReason() string {
}
// GeneralPredicates checks a group of predicates that the kubelet cares about.
func GeneralPredicates(pod *v1.Pod, nodeInfo *framework.NodeInfo) ([]PredicateFailureReason, error) {
func GeneralPredicates(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) ([]PredicateFailureReason, error) {
if nodeInfo.Node() == nil {
return nil, fmt.Errorf("node not found")
}

View File

@ -26,7 +26,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
var (
@ -83,7 +83,7 @@ func TestRemoveMissingExtendedResources(t *testing.T) {
),
},
} {
nodeInfo := framework.NewNodeInfo()
nodeInfo := schedulerframework.NewNodeInfo()
nodeInfo.SetNode(test.node)
pod := removeMissingExtendedResources(test.pod, nodeInfo)
if !reflect.DeepEqual(pod, test.expectedPod) {
@ -144,7 +144,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
}
}
func newResourcePod(usage ...framework.Resource) *v1.Pod {
func newResourcePod(usage ...schedulerframework.Resource) *v1.Pod {
containers := []v1.Container{}
for _, req := range usage {
containers = append(containers, v1.Container{
@ -177,7 +177,7 @@ func newPodWithPort(hostPorts ...int) *v1.Pod {
func TestGeneralPredicates(t *testing.T) {
resourceTests := []struct {
pod *v1.Pod
nodeInfo *framework.NodeInfo
nodeInfo *schedulerframework.NodeInfo
node *v1.Node
fits bool
name string
@ -186,8 +186,8 @@ func TestGeneralPredicates(t *testing.T) {
}{
{
pod: &v1.Pod{},
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
nodeInfo: schedulerframework.NewNodeInfo(
newResourcePod(schedulerframework.Resource{MilliCPU: 9, Memory: 19})),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@ -197,9 +197,9 @@ func TestGeneralPredicates(t *testing.T) {
name: "no resources/port/host requested always fits",
},
{
pod: newResourcePod(framework.Resource{MilliCPU: 8, Memory: 10}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourcePod(schedulerframework.Resource{MilliCPU: 8, Memory: 10}),
nodeInfo: schedulerframework.NewNodeInfo(
newResourcePod(schedulerframework.Resource{MilliCPU: 5, Memory: 19})),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@ -218,7 +218,7 @@ func TestGeneralPredicates(t *testing.T) {
NodeName: "machine2",
},
},
nodeInfo: framework.NewNodeInfo(),
nodeInfo: schedulerframework.NewNodeInfo(),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@ -230,7 +230,7 @@ func TestGeneralPredicates(t *testing.T) {
},
{
pod: newPodWithPort(123),
nodeInfo: framework.NewNodeInfo(newPodWithPort(123)),
nodeInfo: schedulerframework.NewNodeInfo(newPodWithPort(123)),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},