mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
memory manager: provide the new type to contain resources for each NUMA node
Signed-off-by: Artyom Lukianov <alukiano@redhat.com>
This commit is contained in:
parent
74eeef2a0a
commit
ff2a110920
@ -1305,7 +1305,7 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) {
|
||||
return rl, nil
|
||||
}
|
||||
|
||||
func parseReservedMemoryConfig(config []map[string]string) (map[int]map[v1.ResourceName]resource.Quantity, error) {
|
||||
func parseReservedMemoryConfig(config []map[string]string) (kubetypes.NUMANodeResources, error) {
|
||||
if len(config) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@ -1327,7 +1327,7 @@ func parseReservedMemoryConfig(config []map[string]string) (map[int]map[v1.Resou
|
||||
}
|
||||
}
|
||||
|
||||
parsed := make(map[int]map[v1.ResourceName]resource.Quantity, len(config))
|
||||
parsed := make(kubetypes.NUMANodeResources, len(config))
|
||||
for _, m := range config {
|
||||
idxInString, _ := m[indexKey]
|
||||
idx, err := strconv.Atoi(idxInString)
|
||||
|
@ -41,6 +41,7 @@ go_library(
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/pluginmanager/cache:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/framework:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
@ -69,7 +70,6 @@ go_library(
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/kubelet/stats/pidlimit:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/util/oom:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
@ -130,7 +130,6 @@ go_library(
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/kubelet/stats/pidlimit:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/util/oom:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
|
@ -19,7 +19,6 @@ package cm
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
// TODO: Migrate kubelet to either use its own internal objects or client library.
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -32,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
|
||||
"fmt"
|
||||
@ -137,7 +137,7 @@ type NodeConfig struct {
|
||||
ExperimentalTopologyManagerScope string
|
||||
ExperimentalCPUManagerReconcilePeriod time.Duration
|
||||
ExperimentalMemoryManagerPolicy string
|
||||
ExperimentalMemoryManagerReservedMemory map[int]map[v1.ResourceName]resource.Quantity
|
||||
ExperimentalMemoryManagerReservedMemory kubetypes.NUMANodeResources
|
||||
ExperimentalPodPidsLimit int64
|
||||
EnforceCPULimits bool
|
||||
CPUCFSQuotaPeriod time.Duration
|
||||
|
@ -20,6 +20,7 @@ go_library(
|
||||
"//pkg/kubelet/cm/topologymanager/bitmask:go_default_library",
|
||||
"//pkg/kubelet/config:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library",
|
||||
@ -40,6 +41,7 @@ go_test(
|
||||
"//pkg/kubelet/cm/memorymanager/state:go_default_library",
|
||||
"//pkg/kubelet/cm/topologymanager:go_default_library",
|
||||
"//pkg/kubelet/cm/topologymanager/bitmask:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// memoryManagerStateFileName is the file name where memory manager stores its state
|
||||
@ -118,7 +119,7 @@ type manager struct {
|
||||
var _ Manager = &manager{}
|
||||
|
||||
// NewManager returns new instance of the memory manager
|
||||
func NewManager(policyName string, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory map[int]map[v1.ResourceName]resource.Quantity, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
|
||||
func NewManager(policyName string, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory kubetypes.NUMANodeResources, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
|
||||
var policy Policy
|
||||
|
||||
switch policyType(policyName) {
|
||||
@ -320,7 +321,7 @@ func (m *manager) policyRemoveContainerByRef(podUID string, containerName string
|
||||
return err
|
||||
}
|
||||
|
||||
func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory map[int]map[v1.ResourceName]resource.Quantity) map[v1.ResourceName]resource.Quantity {
|
||||
func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory kubetypes.NUMANodeResources) map[v1.ResourceName]resource.Quantity {
|
||||
totalMemoryType := map[v1.ResourceName]resource.Quantity{}
|
||||
|
||||
numaNodes := map[int]bool{}
|
||||
@ -345,7 +346,7 @@ func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMe
|
||||
return totalMemoryType
|
||||
}
|
||||
|
||||
func validateReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory map[int]map[v1.ResourceName]resource.Quantity) error {
|
||||
func validateReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory kubetypes.NUMANodeResources) error {
|
||||
totalMemoryType := getTotalMemoryTypeReserved(machineInfo, reservedMemory)
|
||||
|
||||
commonMemoryTypeSet := make(map[v1.ResourceName]bool)
|
||||
@ -381,7 +382,7 @@ func validateReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatabl
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory map[int]map[v1.ResourceName]resource.Quantity) (systemReservedMemory, error) {
|
||||
func convertReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory kubetypes.NUMANodeResources) (systemReservedMemory, error) {
|
||||
preReservedMemoryConverted := make(map[int]map[v1.ResourceName]uint64)
|
||||
for _, node := range machineInfo.Topology {
|
||||
preReservedMemoryConverted[node.Id] = make(map[v1.ResourceName]uint64)
|
||||
@ -401,7 +402,7 @@ func convertReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory map[in
|
||||
return preReservedMemoryConverted, nil
|
||||
}
|
||||
|
||||
func getSystemReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, preReservedMemory map[int]map[v1.ResourceName]resource.Quantity) (systemReservedMemory, error) {
|
||||
func getSystemReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, preReservedMemory kubetypes.NUMANodeResources) (systemReservedMemory, error) {
|
||||
if err := validateReservedMemory(machineInfo, nodeAllocatableReservation, preReservedMemory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -58,7 +59,7 @@ type testMemoryManager struct {
|
||||
nodeAllocatableReservation v1.ResourceList
|
||||
policyName policyType
|
||||
affinity topologymanager.Store
|
||||
systemReservedMemory map[int]map[v1.ResourceName]resource.Quantity
|
||||
systemReservedMemory kubetypes.NUMANodeResources
|
||||
expectedHints map[string][]topologymanager.TopologyHint
|
||||
expectedReserved systemReservedMemory
|
||||
reserved systemReservedMemory
|
||||
@ -157,21 +158,21 @@ func TestValidateReservedMemory(t *testing.T) {
|
||||
description string
|
||||
nodeAllocatableReservation v1.ResourceList
|
||||
machineInfo *cadvisorapi.MachineInfo
|
||||
systemReservedMemory map[int]map[v1.ResourceName]resource.Quantity
|
||||
systemReservedMemory kubetypes.NUMANodeResources
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
"Node Allocatable not set, reserved not set",
|
||||
v1.ResourceList{},
|
||||
machineInfo,
|
||||
map[int]map[v1.ResourceName]resource.Quantity{},
|
||||
kubetypes.NUMANodeResources{},
|
||||
"",
|
||||
},
|
||||
{
|
||||
"Node Allocatable set to zero, reserved set to zero",
|
||||
v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(0, resource.DecimalSI)},
|
||||
machineInfo,
|
||||
map[int]map[v1.ResourceName]resource.Quantity{
|
||||
kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(0, resource.DecimalSI)},
|
||||
},
|
||||
"",
|
||||
@ -180,7 +181,7 @@ func TestValidateReservedMemory(t *testing.T) {
|
||||
"Node Allocatable not set (equal zero), reserved set",
|
||||
v1.ResourceList{},
|
||||
machineInfo,
|
||||
map[int]map[v1.ResourceName]resource.Quantity{
|
||||
kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI)},
|
||||
},
|
||||
fmt.Sprintf(msgNotEqual, v1.ResourceMemory),
|
||||
@ -189,14 +190,14 @@ func TestValidateReservedMemory(t *testing.T) {
|
||||
"Node Allocatable set, reserved not set",
|
||||
v1.ResourceList{hugepages2M: *resource.NewQuantity(5, resource.DecimalSI)},
|
||||
machineInfo,
|
||||
map[int]map[v1.ResourceName]resource.Quantity{},
|
||||
kubetypes.NUMANodeResources{},
|
||||
fmt.Sprintf(msgNotEqual, hugepages2M),
|
||||
},
|
||||
{
|
||||
"Reserved not equal to Node Allocatable",
|
||||
v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(5, resource.DecimalSI)},
|
||||
machineInfo,
|
||||
map[int]map[v1.ResourceName]resource.Quantity{
|
||||
kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI)},
|
||||
},
|
||||
fmt.Sprintf(msgNotEqual, v1.ResourceMemory),
|
||||
@ -205,7 +206,7 @@ func TestValidateReservedMemory(t *testing.T) {
|
||||
"Reserved contains the NUMA node that does not exist under the machine",
|
||||
v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(17, resource.DecimalSI)},
|
||||
machineInfo,
|
||||
map[int]map[v1.ResourceName]resource.Quantity{
|
||||
kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI)},
|
||||
2: nodeResources{v1.ResourceMemory: *resource.NewQuantity(5, resource.DecimalSI)},
|
||||
},
|
||||
@ -217,7 +218,7 @@ func TestValidateReservedMemory(t *testing.T) {
|
||||
hugepages2M: *resource.NewQuantity(77, resource.DecimalSI),
|
||||
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
|
||||
machineInfo,
|
||||
map[int]map[v1.ResourceName]resource.Quantity{
|
||||
kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI),
|
||||
hugepages2M: *resource.NewQuantity(70, resource.DecimalSI),
|
||||
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
|
||||
@ -232,7 +233,7 @@ func TestValidateReservedMemory(t *testing.T) {
|
||||
hugepages2M: *resource.NewQuantity(14, resource.DecimalSI),
|
||||
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
|
||||
machineInfo,
|
||||
map[int]map[v1.ResourceName]resource.Quantity{
|
||||
kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI),
|
||||
hugepages2M: *resource.NewQuantity(70, resource.DecimalSI),
|
||||
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
|
||||
@ -265,13 +266,13 @@ func TestConvertPreReserved(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
systemReserved map[int]map[v1.ResourceName]resource.Quantity
|
||||
systemReserved kubetypes.NUMANodeResources
|
||||
systemReservedExpected systemReservedMemory
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
"Empty",
|
||||
map[int]map[v1.ResourceName]resource.Quantity{},
|
||||
kubetypes.NUMANodeResources{},
|
||||
systemReservedMemory{
|
||||
0: map[v1.ResourceName]uint64{},
|
||||
1: map[v1.ResourceName]uint64{},
|
||||
@ -280,7 +281,7 @@ func TestConvertPreReserved(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"Single NUMA node is reserved",
|
||||
map[int]map[v1.ResourceName]resource.Quantity{
|
||||
kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI),
|
||||
hugepages2M: *resource.NewQuantity(70, resource.DecimalSI),
|
||||
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
|
||||
@ -297,7 +298,7 @@ func TestConvertPreReserved(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"Both NUMA nodes are reserved",
|
||||
map[int]map[v1.ResourceName]resource.Quantity{
|
||||
kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(12, resource.DecimalSI),
|
||||
hugepages2M: *resource.NewQuantity(70, resource.DecimalSI),
|
||||
hugepages1G: *resource.NewQuantity(13, resource.DecimalSI)},
|
||||
@ -335,7 +336,7 @@ func TestGetSystemReservedMemory(t *testing.T) {
|
||||
{
|
||||
description: "Should return empty map when reservation is not done",
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{},
|
||||
systemReservedMemory: kubetypes.NUMANodeResources{},
|
||||
expectedReserved: systemReservedMemory{
|
||||
0: {},
|
||||
1: {},
|
||||
@ -346,7 +347,7 @@ func TestGetSystemReservedMemory(t *testing.T) {
|
||||
{
|
||||
description: "Should return error when Allocatable reservation is not equal pre reserved memory",
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{
|
||||
systemReservedMemory: kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
|
||||
},
|
||||
expectedReserved: nil,
|
||||
@ -356,7 +357,7 @@ func TestGetSystemReservedMemory(t *testing.T) {
|
||||
{
|
||||
description: "Reserved should be equal to systemReservedMemory",
|
||||
nodeAllocatableReservation: v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)},
|
||||
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{
|
||||
systemReservedMemory: kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
|
||||
1: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
|
||||
},
|
||||
@ -2062,7 +2063,7 @@ func TestNewManager(t *testing.T) {
|
||||
policyName: policyTypeStatic,
|
||||
machineInfo: machineInfo,
|
||||
nodeAllocatableReservation: v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)},
|
||||
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{
|
||||
systemReservedMemory: kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
|
||||
1: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
|
||||
},
|
||||
@ -2075,7 +2076,7 @@ func TestNewManager(t *testing.T) {
|
||||
policyName: policyTypeStatic,
|
||||
machineInfo: machineInfo,
|
||||
nodeAllocatableReservation: v1.ResourceList{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)},
|
||||
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{
|
||||
systemReservedMemory: kubetypes.NUMANodeResources{
|
||||
0: nodeResources{v1.ResourceMemory: *resource.NewQuantity(gb, resource.BinarySI)},
|
||||
1: nodeResources{v1.ResourceMemory: *resource.NewQuantity(2*gb, resource.BinarySI)},
|
||||
},
|
||||
@ -2088,7 +2089,7 @@ func TestNewManager(t *testing.T) {
|
||||
policyName: policyTypeStatic,
|
||||
machineInfo: machineInfo,
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{},
|
||||
systemReservedMemory: kubetypes.NUMANodeResources{},
|
||||
affinity: topologymanager.NewFakeManager(),
|
||||
expectedError: fmt.Errorf("[memorymanager] you should specify the system reserved memory"),
|
||||
expectedReserved: expectedReserved,
|
||||
@ -2098,7 +2099,7 @@ func TestNewManager(t *testing.T) {
|
||||
policyName: "fake",
|
||||
machineInfo: machineInfo,
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{},
|
||||
systemReservedMemory: kubetypes.NUMANodeResources{},
|
||||
affinity: topologymanager.NewFakeManager(),
|
||||
expectedError: fmt.Errorf("unknown policy: \"fake\""),
|
||||
expectedReserved: expectedReserved,
|
||||
@ -2108,7 +2109,7 @@ func TestNewManager(t *testing.T) {
|
||||
policyName: policyTypeNone,
|
||||
machineInfo: machineInfo,
|
||||
nodeAllocatableReservation: v1.ResourceList{},
|
||||
systemReservedMemory: map[int]map[v1.ResourceName]resource.Quantity{},
|
||||
systemReservedMemory: kubetypes.NUMANodeResources{},
|
||||
affinity: topologymanager.NewFakeManager(),
|
||||
expectedError: nil,
|
||||
expectedReserved: expectedReserved,
|
||||
|
@ -20,6 +20,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/scheduling:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
@ -113,3 +114,6 @@ type ResolvedPodUID types.UID
|
||||
|
||||
// MirrorPodUID is a pod UID for a mirror pod.
|
||||
type MirrorPodUID types.UID
|
||||
|
||||
// NUMANodeResources is a set of (resource name, quantity) pairs for each NUMA node.
|
||||
type NUMANodeResources map[int]map[v1.ResourceName]resource.Quantity
|
||||
|
Loading…
Reference in New Issue
Block a user