rename framework pkg with schedulerframework for all instances under pkg/kubelet

This commit is contained in:
Abdullah Gharaibeh 2020-04-14 14:24:07 -04:00
parent bed9b2f23b
commit d6522e0e74
11 changed files with 36 additions and 36 deletions

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"fmt" "fmt"
"strconv" "strconv"
@ -91,7 +91,7 @@ type ContainerManager interface {
// Otherwise, it updates allocatableResource in nodeInfo if necessary, // Otherwise, it updates allocatableResource in nodeInfo if necessary,
// to make sure it is at least equal to the pod's requested capacity for // to make sure it is at least equal to the pod's requested capacity for
// any registered device plugin resource // any registered device plugin resource
UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error
InternalContainerLifecycle() InternalContainerLifecycle InternalContainerLifecycle() InternalContainerLifecycle

View File

@ -62,7 +62,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs" "k8s.io/kubernetes/pkg/util/procfs"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
@ -675,7 +675,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
return opts, nil return opts, nil
} }
func (cm *containerManagerImpl) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { func (cm *containerManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return cm.deviceManager.UpdatePluginResources(node, attrs) return cm.deviceManager.UpdatePluginResources(node, attrs)
} }

View File

@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
type containerManagerStub struct { type containerManagerStub struct {
@ -97,7 +97,7 @@ func (cm *containerManagerStub) GetResources(pod *v1.Pod, container *v1.Containe
return &kubecontainer.RunContainerOptions{}, nil return &kubecontainer.RunContainerOptions{}, nil
} }
func (cm *containerManagerStub) UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error { func (cm *containerManagerStub) UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
return nil return nil
} }

View File

@ -42,7 +42,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
type containerManagerImpl struct { type containerManagerImpl struct {
@ -165,7 +165,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
return &kubecontainer.RunContainerOptions{}, nil return &kubecontainer.RunContainerOptions{}, nil
} }
func (cm *containerManagerImpl) UpdatePluginResources(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error { func (cm *containerManagerImpl) UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
return nil return nil
} }

View File

@ -48,7 +48,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/util/selinux" "k8s.io/kubernetes/pkg/util/selinux"
) )
@ -391,7 +391,7 @@ func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error {
} }
// UpdatePluginResources updates node resources based on devices already allocated to pods. // UpdatePluginResources updates node resources based on devices already allocated to pods.
func (m *ManagerImpl) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
pod := attrs.Pod pod := attrs.Pod
m.mutex.Lock() m.mutex.Lock()
@ -924,8 +924,8 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
// and if necessary, updates allocatableResource in nodeInfo to at least equal to // and if necessary, updates allocatableResource in nodeInfo to at least equal to
// the allocated capacity. This allows pods that have already been scheduled on // the allocated capacity. This allows pods that have already been scheduled on
// the node to pass GeneralPredicates admission checking even upon device plugin failure. // the node to pass GeneralPredicates admission checking even upon device plugin failure.
func (m *ManagerImpl) sanitizeNodeAllocatable(node *framework.NodeInfo) { func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulerframework.NodeInfo) {
var newAllocatableResource *framework.Resource var newAllocatableResource *schedulerframework.Resource
allocatableResource := node.Allocatable allocatableResource := node.Allocatable
if allocatableResource.ScalarResources == nil { if allocatableResource.ScalarResources == nil {
allocatableResource.ScalarResources = make(map[v1.ResourceName]int64) allocatableResource.ScalarResources = make(map[v1.ResourceName]int64)

View File

@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
// ManagerStub provides a simple stub implementation for the Device Manager. // ManagerStub provides a simple stub implementation for the Device Manager.
@ -50,7 +50,7 @@ func (h *ManagerStub) Allocate(pod *v1.Pod, container *v1.Container) error {
} }
// UpdatePluginResources simply returns nil. // UpdatePluginResources simply returns nil.
func (h *ManagerStub) UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { func (h *ManagerStub) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return nil return nil
} }

View File

@ -42,7 +42,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager" "k8s.io/kubernetes/pkg/kubelet/pluginmanager"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
const ( const (
@ -889,7 +889,7 @@ func TestUpdatePluginResources(t *testing.T) {
}, },
}, },
} }
nodeInfo := &framework.NodeInfo{} nodeInfo := &schedulerframework.NodeInfo{}
nodeInfo.SetNode(cachedNode) nodeInfo.SetNode(cachedNode)
testManager.UpdatePluginResources(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod}) testManager.UpdatePluginResources(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod})

View File

@ -26,7 +26,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
// Manager manages all the Device Plugins running on a node. // Manager manages all the Device Plugins running on a node.
@ -44,7 +44,7 @@ type Manager interface {
// UpdatePluginResources updates node resources based on devices already // UpdatePluginResources updates node resources based on devices already
// allocated to pods. The node object is provided for the device manager to // allocated to pods. The node object is provided for the device manager to
// update the node capacity to reflect the currently available devices. // update the node capacity to reflect the currently available devices.
UpdatePluginResources(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error
// Stop stops the manager. // Stop stops the manager.
Stop() error Stop() error

View File

@ -67,7 +67,7 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/awsebs"
"k8s.io/kubernetes/pkg/volume/azure_dd" "k8s.io/kubernetes/pkg/volume/azure_dd"
@ -658,7 +658,7 @@ func TestHandlePluginResources(t *testing.T) {
} }
kl.nodeLister = testNodeLister{nodes: nodes} kl.nodeLister = testNodeLister{nodes: nodes}
updatePluginResourcesFunc := func(node *framework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { updatePluginResourcesFunc := func(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
// Maps from resourceName to the value we use to set node.allocatableResource[resourceName]. // Maps from resourceName to the value we use to set node.allocatableResource[resourceName].
// A resource with invalid value (< 0) causes the function to return an error // A resource with invalid value (< 0) causes the function to return an error
// to emulate resource Allocation failure. // to emulate resource Allocation failure.

View File

@ -28,12 +28,12 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
type getNodeAnyWayFuncType func() (*v1.Node, error) type getNodeAnyWayFuncType func() (*v1.Node, error)
type pluginResourceUpdateFuncType func(*framework.NodeInfo, *PodAdmitAttributes) error type pluginResourceUpdateFuncType func(*schedulerframework.NodeInfo, *PodAdmitAttributes) error
// AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod. // AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod.
// This allows for the graceful handling of pod admission failure. // This allows for the graceful handling of pod admission failure.
@ -69,7 +69,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
} }
admitPod := attrs.Pod admitPod := attrs.Pod
pods := attrs.OtherPods pods := attrs.OtherPods
nodeInfo := framework.NewNodeInfo(pods...) nodeInfo := schedulerframework.NewNodeInfo(pods...)
nodeInfo.SetNode(node) nodeInfo.SetNode(node)
// ensure the node has enough plugin resources for that required in pods // ensure the node has enough plugin resources for that required in pods
if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil { if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil {
@ -155,7 +155,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
} }
} }
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *framework.NodeInfo) *v1.Pod { func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) *v1.Pod {
podCopy := pod.DeepCopy() podCopy := pod.DeepCopy()
for i, c := range pod.Spec.Containers { for i, c := range pod.Spec.Containers {
// We only handle requests in Requests but not Limits because the // We only handle requests in Requests but not Limits because the
@ -219,7 +219,7 @@ func (e *PredicateFailureError) GetReason() string {
} }
// GeneralPredicates checks a group of predicates that the kubelet cares about. // GeneralPredicates checks a group of predicates that the kubelet cares about.
func GeneralPredicates(pod *v1.Pod, nodeInfo *framework.NodeInfo) ([]PredicateFailureReason, error) { func GeneralPredicates(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) ([]PredicateFailureReason, error) {
if nodeInfo.Node() == nil { if nodeInfo.Node() == nil {
return nil, fmt.Errorf("node not found") return nil, fmt.Errorf("node not found")
} }

View File

@ -26,7 +26,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
) )
var ( var (
@ -83,7 +83,7 @@ func TestRemoveMissingExtendedResources(t *testing.T) {
), ),
}, },
} { } {
nodeInfo := framework.NewNodeInfo() nodeInfo := schedulerframework.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
pod := removeMissingExtendedResources(test.pod, nodeInfo) pod := removeMissingExtendedResources(test.pod, nodeInfo)
if !reflect.DeepEqual(pod, test.expectedPod) { if !reflect.DeepEqual(pod, test.expectedPod) {
@ -144,7 +144,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
} }
} }
func newResourcePod(usage ...framework.Resource) *v1.Pod { func newResourcePod(usage ...schedulerframework.Resource) *v1.Pod {
containers := []v1.Container{} containers := []v1.Container{}
for _, req := range usage { for _, req := range usage {
containers = append(containers, v1.Container{ containers = append(containers, v1.Container{
@ -177,7 +177,7 @@ func newPodWithPort(hostPorts ...int) *v1.Pod {
func TestGeneralPredicates(t *testing.T) { func TestGeneralPredicates(t *testing.T) {
resourceTests := []struct { resourceTests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *framework.NodeInfo nodeInfo *schedulerframework.NodeInfo
node *v1.Node node *v1.Node
fits bool fits bool
name string name string
@ -186,8 +186,8 @@ func TestGeneralPredicates(t *testing.T) {
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
nodeInfo: framework.NewNodeInfo( nodeInfo: schedulerframework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulerframework.Resource{MilliCPU: 9, Memory: 19})),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@ -197,9 +197,9 @@ func TestGeneralPredicates(t *testing.T) {
name: "no resources/port/host requested always fits", name: "no resources/port/host requested always fits",
}, },
{ {
pod: newResourcePod(framework.Resource{MilliCPU: 8, Memory: 10}), pod: newResourcePod(schedulerframework.Resource{MilliCPU: 8, Memory: 10}),
nodeInfo: framework.NewNodeInfo( nodeInfo: schedulerframework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulerframework.Resource{MilliCPU: 5, Memory: 19})),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@ -218,7 +218,7 @@ func TestGeneralPredicates(t *testing.T) {
NodeName: "machine2", NodeName: "machine2",
}, },
}, },
nodeInfo: framework.NewNodeInfo(), nodeInfo: schedulerframework.NewNodeInfo(),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@ -230,7 +230,7 @@ func TestGeneralPredicates(t *testing.T) {
}, },
{ {
pod: newPodWithPort(123), pod: newPodWithPort(123),
nodeInfo: framework.NewNodeInfo(newPodWithPort(123)), nodeInfo: schedulerframework.NewNodeInfo(newPodWithPort(123)),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},