memory manager: provide the new type to contain resources for each NUMA node

Signed-off-by: Artyom Lukianov <alukiano@redhat.com>
This commit is contained in:
Artyom Lukianov 2020-11-12 13:58:12 +02:00
parent 74eeef2a0a
commit ff2a110920
8 changed files with 41 additions and 33 deletions

View File

@ -1305,7 +1305,7 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) {
return rl, nil return rl, nil
} }
func parseReservedMemoryConfig(config []map[string]string) (map[int]map[v1.ResourceName]resource.Quantity, error) { func parseReservedMemoryConfig(config []map[string]string) (kubetypes.NUMANodeResources, error) {
if len(config) == 0 { if len(config) == 0 {
return nil, nil return nil, nil
} }
@ -1327,7 +1327,7 @@ func parseReservedMemoryConfig(config []map[string]string) (map[int]map[v1.Resou
} }
} }
parsed := make(map[int]map[v1.ResourceName]resource.Quantity, len(config)) parsed := make(kubetypes.NUMANodeResources, len(config))
for _, m := range config { for _, m := range config {
idxInString, _ := m[indexKey] idxInString, _ := m[indexKey]
idx, err := strconv.Atoi(idxInString) idx, err := strconv.Atoi(idxInString)

View File

@ -41,6 +41,7 @@ go_library(
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/pluginmanager/cache:go_default_library", "//pkg/kubelet/pluginmanager/cache:go_default_library",
"//pkg/kubelet/status:go_default_library", "//pkg/kubelet/status:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/scheduler/framework:go_default_library", "//pkg/scheduler/framework:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
@ -69,7 +70,6 @@ go_library(
"//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/qos:go_default_library", "//pkg/kubelet/qos:go_default_library",
"//pkg/kubelet/stats/pidlimit:go_default_library", "//pkg/kubelet/stats/pidlimit:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/util/oom:go_default_library", "//pkg/util/oom:go_default_library",
"//pkg/util/procfs:go_default_library", "//pkg/util/procfs:go_default_library",
"//pkg/util/sysctl:go_default_library", "//pkg/util/sysctl:go_default_library",
@ -130,7 +130,6 @@ go_library(
"//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/qos:go_default_library", "//pkg/kubelet/qos:go_default_library",
"//pkg/kubelet/stats/pidlimit:go_default_library", "//pkg/kubelet/stats/pidlimit:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/util/oom:go_default_library", "//pkg/util/oom:go_default_library",
"//pkg/util/procfs:go_default_library", "//pkg/util/procfs:go_default_library",
"//pkg/util/sysctl:go_default_library", "//pkg/util/sysctl:go_default_library",

View File

@ -19,7 +19,6 @@ package cm
import ( import (
"time" "time"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
// TODO: Migrate kubelet to either use its own internal objects or client library. // TODO: Migrate kubelet to either use its own internal objects or client library.
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -32,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"fmt" "fmt"
@ -137,7 +137,7 @@ type NodeConfig struct {
ExperimentalTopologyManagerScope string ExperimentalTopologyManagerScope string
ExperimentalCPUManagerReconcilePeriod time.Duration ExperimentalCPUManagerReconcilePeriod time.Duration
ExperimentalMemoryManagerPolicy string ExperimentalMemoryManagerPolicy string
ExperimentalMemoryManagerReservedMemory map[int]map[v1.ResourceName]resource.Quantity ExperimentalMemoryManagerReservedMemory kubetypes.NUMANodeResources
ExperimentalPodPidsLimit int64 ExperimentalPodPidsLimit int64
EnforceCPULimits bool EnforceCPULimits bool
CPUCFSQuotaPeriod time.Duration CPUCFSQuotaPeriod time.Duration

View File

@ -20,6 +20,7 @@ go_library(
"//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library",
"//pkg/kubelet/config:go_default_library", "//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/status:go_default_library", "//pkg/kubelet/status:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library", "//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library",
@ -40,6 +41,7 @@ go_test(
"//pkg/kubelet/cm/memorymanager/state:go_default_library", "//pkg/kubelet/cm/memorymanager/state:go_default_library",
"//pkg/kubelet/cm/topologymanager:go_default_library", "//pkg/kubelet/cm/topologymanager:go_default_library",
"//pkg/kubelet/cm/topologymanager/bitmask:go_default_library", "//pkg/kubelet/cm/topologymanager/bitmask:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
) )
// memoryManagerStateFileName is the file name where memory manager stores its state // memoryManagerStateFileName is the file name where memory manager stores its state
@ -118,7 +119,7 @@ type manager struct {
var _ Manager = &manager{} var _ Manager = &manager{}
// NewManager returns new instance of the memory manager // NewManager returns new instance of the memory manager
func NewManager(policyName string, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory map[int]map[v1.ResourceName]resource.Quantity, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) { func NewManager(policyName string, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory kubetypes.NUMANodeResources, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
var policy Policy var policy Policy
switch policyType(policyName) { switch policyType(policyName) {
@ -320,7 +321,7 @@ func (m *manager) policyRemoveContainerByRef(podUID string, containerName string
return err return err
} }
func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory map[int]map[v1.ResourceName]resource.Quantity) map[v1.ResourceName]resource.Quantity { func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory kubetypes.NUMANodeResources) map[v1.ResourceName]resource.Quantity {
totalMemoryType := map[v1.ResourceName]resource.Quantity{} totalMemoryType := map[v1.ResourceName]resource.Quantity{}
numaNodes := map[int]bool{} numaNodes := map[int]bool{}
@ -345,7 +346,7 @@ func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMe
return totalMemoryType return totalMemoryType
} }
func validateReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory map[int]map[v1.ResourceName]resource.Quantity) error { func validateReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory kubetypes.NUMANodeResources) error {
totalMemoryType := getTotalMemoryTypeReserved(machineInfo, reservedMemory) totalMemoryType := getTotalMemoryTypeReserved(machineInfo, reservedMemory)
commonMemoryTypeSet := make(map[v1.ResourceName]bool) commonMemoryTypeSet := make(map[v1.ResourceName]bool)
@ -381,7 +382,7 @@ func validateReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatabl
return nil return nil
} }
func convertReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory map[int]map[v1.ResourceName]resource.Quantity) (systemReservedMemory, error) { func convertReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory kubetypes.NUMANodeResources) (systemReservedMemory, error) {
preReservedMemoryConverted := make(map[int]map[v1.ResourceName]uint64) preReservedMemoryConverted := make(map[int]map[v1.ResourceName]uint64)
for _, node := range machineInfo.Topology { for _, node := range machineInfo.Topology {
preReservedMemoryConverted[node.Id] = make(map[v1.ResourceName]uint64) preReservedMemoryConverted[node.Id] = make(map[v1.ResourceName]uint64)
@ -401,7 +402,7 @@ func convertReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory map[in
return preReservedMemoryConverted, nil return preReservedMemoryConverted, nil
} }
func getSystemReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, preReservedMemory map[int]map[v1.ResourceName]resource.Quantity) (systemReservedMemory, error) { func getSystemReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, preReservedMemory kubetypes.NUMANodeResources) (systemReservedMemory, error) {
if err := validateReservedMemory(machineInfo, nodeAllocatableReservation, preReservedMemory); err != nil { if err := validateReservedMemory(machineInfo, nodeAllocatableReservation, preReservedMemory); err != nil {
return nil, err return nil, err
} }

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
) )
const ( const (
@ -58,7 +59,7 @@ type testMemoryManager struct {
nodeAllocatableReservation v1.ResourceList nodeAllocatableReservation v1.ResourceList
policyName policyType policyName policyType
affinity topologymanager.Store affinity topologymanager.Store
systemReservedMemory map[int]map[v1.ResourceName]resource.Quantity systemReservedMemory kubetypes.NUMANodeResources
expectedHints map[string][]topologymanager.TopologyHint expectedHints map[string][]topologymanager.TopologyHint
expectedReserved systemReservedMemory expectedReserved systemReservedMemory
reserved systemReservedMemory reserved systemReservedMemory
@ -157,21 +158,21 @@ func TestValidateReservedMemory(t *testing.T) {
description string description string
nodeAllocatableReservation v1.ResourceList nodeAllocatableReservation v1.ResourceList
machineInfo *cadvisorapi.MachineInfo machineInfo *cadvisorapi.MachineInfo
systemReservedMemory map[int]map[v1.ResourceName]resource.Quantity systemReservedMemory kubetypes.NUMANodeResources
expectedError string expectedError string
}{ }{
{ {
"Node Allocatable not set, reserved not set", "Node Allocatable not set, reserved not set",
v1.ResourceList{}, v1.ResourceList{},
machineInfo, machineInfo,
map[int]map[v1.ResourceName]resource.Quantity{}, kubetypes.NUMANodeResources{},
"", "",
}, },
{ {
"Node Allocatable set to zero, reserved set to zero", "Node Allocatable set to zero, reserved set to zero",
v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(0, resource.DecimalSI)}, v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(0, resource.DecimalSI)},
machineInfo, machineInfo,
map[int]map[v1.ResourceName]resource.Quantity{ kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(0, resource.DecimalSI)}, 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(0, resource.DecimalSI)},
}, },
"", "",
@ -180,7 +181,7 @@ func TestValidateReservedMemory(t *testing.T) {
"Node Allocatable not set (equal zero), reserved set", "Node Allocatable not set (equal zero), reserved set",
v1.ResourceList{}, v1.ResourceList{},
machineInfo, machineInfo,
map[int]map[v1.ResourceName]resource.Quantity{ kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI)}, 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI)},
}, },
fmt.Sprintf(msgNotEqual, v1.ResourceMemory), fmt.Sprintf(msgNotEqual, v1.ResourceMemory),
@ -189,14 +190,14 @@ func TestValidateReservedMemory(t *testing.T) {
"Node Allocatable set, reserved not set", "Node Allocatable set, reserved not set",
v1.ResourceList{hugepages2M: *resource.NewQuantity(5, resource.DecimalSI)}, v1.ResourceList{hugepages2M: *resource.NewQuantity(5, resource.DecimalSI)},
machineInfo, machineInfo,
map[int]map[v1.ResourceName]resource.Quantity{}, kubetypes.NUMANodeResources{},
fmt.Sprintf(msgNotEqual, hugepages2M), fmt.Sprintf(msgNotEqual, hugepages2M),
}, },
{ {
"Reserved not equal to Node Allocatable", "Reserved not equal to Node Allocatable",
v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(5, resource.DecimalSI)}, v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(5, resource.DecimalSI)},
machineInfo, machineInfo,
map[int]map[v1.ResourceName]resource.Quantity{ kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI)}, 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI)},
}, },
fmt.Sprintf(msgNotEqual, v1.ResourceMemory), fmt.Sprintf(msgNotEqual, v1.ResourceMemory),
@ -205,7 +206,7 @@ func TestValidateReservedMemory(t *testing.T) {
"Reserved contains the NUMA node that does not exist under the machine", "Reserved contains the NUMA node that does not exist under the machine",
v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(17, resource.DecimalSI)}, v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(17, resource.DecimalSI)},
machineInfo, machineInfo,
map[int]map[v1.ResourceName]resource.Quantity{ kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI)}, 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI)},
2: nodeResources{v1.ResourceMemory: *resource.NewQuantity(5, resource.DecimalSI)}, 2: nodeResources{v1.ResourceMemory: *resource.NewQuantity(5, resource.DecimalSI)},
}, },
@ -217,7 +218,7 @@ func TestValidateReservedMemory(t *testing.T) {
hugepages2M: *resource.NewQuantity(77, resource.DecimalSI), hugepages2M: *resource.NewQuantity(77, resource.DecimalSI),
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)}, hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
machineInfo, machineInfo,
map[int]map[v1.ResourceName]resource.Quantity{ kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI), 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI),
hugepages2M: *resource.NewQuantity(70, resource.DecimalSI), hugepages2M: *resource.NewQuantity(70, resource.DecimalSI),
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)}, hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
@ -232,7 +233,7 @@ func TestValidateReservedMemory(t *testing.T) {
hugepages2M: *resource.NewQuantity(14, resource.DecimalSI), hugepages2M: *resource.NewQuantity(14, resource.DecimalSI),
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)}, hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
machineInfo, machineInfo,
map[int]map[v1.ResourceName]resource.Quantity{ kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI), 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI),
hugepages2M: *resource.NewQuantity(70, resource.DecimalSI), hugepages2M: *resource.NewQuantity(70, resource.DecimalSI),
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)}, hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
@ -265,13 +266,13 @@ func TestConvertPreReserved(t *testing.T) {
testCases := []struct { testCases := []struct {
description string description string
systemReserved map[int]map[v1.ResourceName]resource.Quantity systemReserved kubetypes.NUMANodeResources
systemReservedExpected systemReservedMemory systemReservedExpected systemReservedMemory
expectedError string expectedError string
}{ }{
{ {
"Empty", "Empty",
map[int]map[v1.ResourceName]resource.Quantity{}, kubetypes.NUMANodeResources{},
systemReservedMemory{ systemReservedMemory{
0: map[v1.ResourceName]uint64{}, 0: map[v1.ResourceName]uint64{},
1: map[v1.ResourceName]uint64{}, 1: map[v1.ResourceName]uint64{},
@ -280,7 +281,7 @@ func TestConvertPreReserved(t *testing.T) {
}, },
{ {
"Single NUMA node is reserved", "Single NUMA node is reserved",
map[int]map[v1.ResourceName]resource.Quantity{ kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI), 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI),
hugepages2M: *resource.NewQuantity(70, resource.DecimalSI), hugepages2M: *resource.NewQuantity(70, resource.DecimalSI),
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)}, hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
@ -297,7 +298,7 @@ func TestConvertPreReserved(t *testing.T) {
}, },
{ {
"Both NUMA nodes are reserved", "Both NUMA nodes are reserved",
map[int]map[v1.ResourceName]resource.Quantity{ kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI), 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI),
hugepages2M: *resource.NewQuantity(70, resource.DecimalSI), hugepages2M: *resource.NewQuantity(70, resource.DecimalSI),
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)}, hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
@ -335,7 +336,7 @@ func TestGetSystemReservedMemory(t *testing.T) {
{ {
description: "Should return empty map when reservation is not done", description: "Should return empty map when reservation is not done",
nodeAllocatableReservation: v1.ResourceList{}, nodeAllocatableReservation: v1.ResourceList{},
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{}, systemReservedMemory: kubetypes.NUMANodeResources{},
expectedReserved: systemReservedMemory{ expectedReserved: systemReservedMemory{
0: {}, 0: {},
1: {}, 1: {},
@ -346,7 +347,7 @@ func TestGetSystemReservedMemory(t *testing.T) {
{ {
description: "Should return error when Allocatable reservation is not equal pre reserved memory", description: "Should return error when Allocatable reservation is not equal pre reserved memory",
nodeAllocatableReservation: v1.ResourceList{}, nodeAllocatableReservation: v1.ResourceList{},
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{ systemReservedMemory: kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)}, 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
}, },
expectedReserved: nil, expectedReserved: nil,
@ -356,7 +357,7 @@ func TestGetSystemReservedMemory(t *testing.T) {
{ {
description: "Reserved should be equal to systemReservedMemory", description: "Reserved should be equal to systemReservedMemory",
nodeAllocatableReservation: v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)}, nodeAllocatableReservation: v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)},
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{ systemReservedMemory: kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)}, 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
1: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)}, 1: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
}, },
@ -2062,7 +2063,7 @@ func TestNewManager(t *testing.T) {
policyName: policyTypeStatic, policyName: policyTypeStatic,
machineInfo: machineInfo, machineInfo: machineInfo,
nodeAllocatableReservation: v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)}, nodeAllocatableReservation: v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)},
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{ systemReservedMemory: kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)}, 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
1: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)}, 1: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
}, },
@ -2075,7 +2076,7 @@ func TestNewManager(t *testing.T) {
policyName: policyTypeStatic, policyName: policyTypeStatic,
machineInfo: machineInfo, machineInfo: machineInfo,
nodeAllocatableReservation: v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)}, nodeAllocatableReservation: v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)},
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{ systemReservedMemory: kubetypes.NUMANodeResources{
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)}, 0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
1: nodeResources{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)}, 1: nodeResources{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)},
}, },
@ -2088,7 +2089,7 @@ func TestNewManager(t *testing.T) {
policyName: policyTypeStatic, policyName: policyTypeStatic,
machineInfo: machineInfo, machineInfo: machineInfo,
nodeAllocatableReservation: v1.ResourceList{}, nodeAllocatableReservation: v1.ResourceList{},
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{}, systemReservedMemory: kubetypes.NUMANodeResources{},
affinity: topologymanager.NewFakeManager(), affinity: topologymanager.NewFakeManager(),
expectedError: fmt.Errorf("[memorymanager] you should specify the system reserved memory"), expectedError: fmt.Errorf("[memorymanager] you should specify the system reserved memory"),
expectedReserved: expectedReserved, expectedReserved: expectedReserved,
@ -2098,7 +2099,7 @@ func TestNewManager(t *testing.T) {
policyName: "fake", policyName: "fake",
machineInfo: machineInfo, machineInfo: machineInfo,
nodeAllocatableReservation: v1.ResourceList{}, nodeAllocatableReservation: v1.ResourceList{},
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{}, systemReservedMemory: kubetypes.NUMANodeResources{},
affinity: topologymanager.NewFakeManager(), affinity: topologymanager.NewFakeManager(),
expectedError: fmt.Errorf("unknown policy: \"fake\""), expectedError: fmt.Errorf("unknown policy: \"fake\""),
expectedReserved: expectedReserved, expectedReserved: expectedReserved,
@ -2108,7 +2109,7 @@ func TestNewManager(t *testing.T) {
policyName: policyTypeNone, policyName: policyTypeNone,
machineInfo: machineInfo, machineInfo: machineInfo,
nodeAllocatableReservation: v1.ResourceList{}, nodeAllocatableReservation: v1.ResourceList{},
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{}, systemReservedMemory: kubetypes.NUMANodeResources{},
affinity: topologymanager.NewFakeManager(), affinity: topologymanager.NewFakeManager(),
expectedError: nil, expectedError: nil,
expectedReserved: expectedReserved, expectedReserved: expectedReserved,

View File

@ -20,6 +20,7 @@ go_library(
deps = [ deps = [
"//pkg/apis/scheduling:go_default_library", "//pkg/apis/scheduling:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
], ],

View File

@ -21,6 +21,7 @@ import (
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
) )
@ -113,3 +114,6 @@ type ResolvedPodUID types.UID
// MirrorPodUID is a pod UID for a mirror pod. // MirrorPodUID is a pod UID for a mirror pod.
type MirrorPodUID types.UID type MirrorPodUID types.UID
// NUMANodeResources is a set of (resource name, quantity) pairs for each NUMA node.
type NUMANodeResources map[int]map[v1.ResourceName]resource.Quantity