port setNodeStatusMachineInfo to Setter abstraction, add test

This commit is contained in:
Michael Taufen 2018-06-27 14:51:38 -07:00
parent aa94a3ba4e
commit 596fa89af0
5 changed files with 598 additions and 128 deletions

View File

@ -19,7 +19,6 @@ package kubelet
import ( import (
"context" "context"
"fmt" "fmt"
"math"
"net" "net"
goruntime "runtime" goruntime "runtime"
"time" "time"
@ -36,7 +35,6 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/nodestatus" "k8s.io/kubernetes/pkg/kubelet/nodestatus"
"k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util"
@ -442,131 +440,9 @@ func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) {
kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event) kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event)
} }
func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { // recordEvent records an event for this node, the Kubelet's nodeRef is passed to the recorder
// Note: avoid blindly overwriting the capacity in case opaque func (kl *Kubelet) recordEvent(eventType, event, message string) {
// resources are being advertised. kl.recorder.Eventf(kl.nodeRef, eventType, event, message)
if node.Status.Capacity == nil {
node.Status.Capacity = v1.ResourceList{}
}
var devicePluginAllocatable v1.ResourceList
var devicePluginCapacity v1.ResourceList
var removedDevicePlugins []string
// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
info, err := kl.GetCachedMachineInfo()
if err != nil {
// TODO(roberthbailey): This is required for test-cmd.sh to pass.
// See if the test should be updated instead.
node.Status.Capacity[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI)
node.Status.Capacity[v1.ResourceMemory] = resource.MustParse("0Gi")
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI)
glog.Errorf("Error getting machine info: %v", err)
} else {
node.Status.NodeInfo.MachineID = info.MachineID
node.Status.NodeInfo.SystemUUID = info.SystemUUID
for rName, rCap := range cadvisor.CapacityFromMachineInfo(info) {
node.Status.Capacity[rName] = rCap
}
if kl.podsPerCore > 0 {
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
int64(math.Min(float64(info.NumCores*kl.podsPerCore), float64(kl.maxPods))), resource.DecimalSI)
} else {
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
int64(kl.maxPods), resource.DecimalSI)
}
if node.Status.NodeInfo.BootID != "" &&
node.Status.NodeInfo.BootID != info.BootID {
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.NodeRebooted,
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
}
node.Status.NodeInfo.BootID = info.BootID
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
// TODO: all the node resources should use GetCapacity instead of deriving the
// capacity for every node status request
initialCapacity := kl.containerManager.GetCapacity()
if initialCapacity != nil {
node.Status.Capacity[v1.ResourceEphemeralStorage] = initialCapacity[v1.ResourceEphemeralStorage]
}
}
devicePluginCapacity, devicePluginAllocatable, removedDevicePlugins = kl.containerManager.GetDevicePluginResourceCapacity()
if devicePluginCapacity != nil {
for k, v := range devicePluginCapacity {
if old, ok := node.Status.Capacity[k]; !ok || old.Value() != v.Value() {
glog.V(2).Infof("Update capacity for %s to %d", k, v.Value())
}
node.Status.Capacity[k] = v
}
}
for _, removedResource := range removedDevicePlugins {
glog.V(2).Infof("Set capacity for %s to 0 on device removal", removedResource)
// Set the capacity of the removed resource to 0 instead of
// removing the resource from the node status. This is to indicate
// that the resource is managed by device plugin and had been
// registered before.
//
// This is required to differentiate the device plugin managed
// resources and the cluster-level resources, which are absent in
// node status.
node.Status.Capacity[v1.ResourceName(removedResource)] = *resource.NewQuantity(int64(0), resource.DecimalSI)
}
}
// Set Allocatable.
if node.Status.Allocatable == nil {
node.Status.Allocatable = make(v1.ResourceList)
}
// Remove extended resources from allocatable that are no longer
// present in capacity.
for k := range node.Status.Allocatable {
_, found := node.Status.Capacity[k]
if !found && v1helper.IsExtendedResourceName(k) {
delete(node.Status.Allocatable, k)
}
}
allocatableReservation := kl.containerManager.GetNodeAllocatableReservation()
for k, v := range node.Status.Capacity {
value := *(v.Copy())
if res, exists := allocatableReservation[k]; exists {
value.Sub(res)
}
if value.Sign() < 0 {
// Negative Allocatable resources don't make sense.
value.Set(0)
}
node.Status.Allocatable[k] = value
}
if devicePluginAllocatable != nil {
for k, v := range devicePluginAllocatable {
if old, ok := node.Status.Allocatable[k]; !ok || old.Value() != v.Value() {
glog.V(2).Infof("Update allocatable for %s to %d", k, v.Value())
}
node.Status.Allocatable[k] = v
}
}
// for every huge page reservation, we need to remove it from allocatable memory
for k, v := range node.Status.Capacity {
if v1helper.IsHugePageResourceName(k) {
allocatableMemory := node.Status.Allocatable[v1.ResourceMemory]
value := *(v.Copy())
allocatableMemory.Sub(value)
if allocatableMemory.Sign() < 0 {
// Negative Allocatable resources don't make sense.
allocatableMemory.Set(0)
}
node.Status.Allocatable[v1.ResourceMemory] = allocatableMemory
}
}
} }
// Set versioninfo for the node. // Set versioninfo for the node.
@ -693,7 +569,8 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
var setters []func(n *v1.Node) error var setters []func(n *v1.Node) error
setters = append(setters, setters = append(setters,
nodestatus.NodeAddress(kl.nodeIP, kl.nodeIPValidator, kl.hostname, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc), nodestatus.NodeAddress(kl.nodeIP, kl.nodeIPValidator, kl.hostname, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc),
withoutError(kl.setNodeStatusMachineInfo), nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity,
kl.containerManager.GetDevicePluginResourceCapacity, kl.containerManager.GetNodeAllocatableReservation, kl.recordEvent),
withoutError(kl.setNodeStatusVersionInfo), withoutError(kl.setNodeStatusVersionInfo),
withoutError(kl.setNodeStatusDaemonEndpoints), withoutError(kl.setNodeStatusDaemonEndpoints),
withoutError(kl.setNodeStatusImages), withoutError(kl.setNodeStatusImages),

View File

@ -196,6 +196,10 @@ func TestUpdateNewNodeStatus(t *testing.T) {
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
}, },
} }
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubeClient := testKubelet.fakeKubeClient kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
@ -329,6 +333,9 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
}, },
} }
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubeClient := testKubelet.fakeKubeClient kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ existingNode := v1.Node{
@ -595,6 +602,9 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
}, },
} }
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
clock := testKubelet.fakeClock clock := testKubelet.fakeClock
kubeClient := testKubelet.fakeKubeClient kubeClient := testKubelet.fakeKubeClient
@ -1036,6 +1046,10 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
}, },
} }
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubeClient := testKubelet.fakeKubeClient kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain

View File

@ -6,16 +6,20 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/kubelet/nodestatus", importpath = "k8s.io/kubernetes/pkg/kubelet/nodestatus",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/cadvisor:go_default_library",
"//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/cm:go_default_library",
"//pkg/kubelet/events:go_default_library", "//pkg/kubelet/events:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
], ],
) )
@ -46,6 +50,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library",
], ],

View File

@ -18,17 +18,23 @@ package nodestatus
import ( import (
"fmt" "fmt"
"math"
"net" "net"
"strings" "strings"
"time" "time"
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilnet "k8s.io/apimachinery/pkg/util/net" utilnet "k8s.io/apimachinery/pkg/util/net"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
@ -147,6 +153,145 @@ func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP
} }
} }
// MachineInfo returns a Setter that updates machine-related information on the node.
func MachineInfo(nodeName string,
maxPods int,
podsPerCore int,
machineInfoFunc func() (*cadvisorapiv1.MachineInfo, error), // typically Kubelet.GetCachedMachineInfo
capacityFunc func() v1.ResourceList, // typically Kubelet.containerManager.GetCapacity
devicePluginResourceCapacityFunc func() (v1.ResourceList, v1.ResourceList, []string), // typically Kubelet.containerManager.GetDevicePluginResourceCapacity
nodeAllocatableReservationFunc func() v1.ResourceList, // typically Kubelet.containerManager.GetNodeAllocatableReservation
recordEventFunc func(eventType, event, message string), // typically Kubelet.recordEvent
) Setter {
return func(node *v1.Node) error {
// Note: avoid blindly overwriting the capacity in case opaque
// resources are being advertised.
if node.Status.Capacity == nil {
node.Status.Capacity = v1.ResourceList{}
}
var devicePluginAllocatable v1.ResourceList
var devicePluginCapacity v1.ResourceList
var removedDevicePlugins []string
// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
info, err := machineInfoFunc()
if err != nil {
// TODO(roberthbailey): This is required for test-cmd.sh to pass.
// See if the test should be updated instead.
node.Status.Capacity[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI)
node.Status.Capacity[v1.ResourceMemory] = resource.MustParse("0Gi")
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(int64(maxPods), resource.DecimalSI)
glog.Errorf("Error getting machine info: %v", err)
} else {
node.Status.NodeInfo.MachineID = info.MachineID
node.Status.NodeInfo.SystemUUID = info.SystemUUID
for rName, rCap := range cadvisor.CapacityFromMachineInfo(info) {
node.Status.Capacity[rName] = rCap
}
if podsPerCore > 0 {
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
int64(math.Min(float64(info.NumCores*podsPerCore), float64(maxPods))), resource.DecimalSI)
} else {
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
int64(maxPods), resource.DecimalSI)
}
if node.Status.NodeInfo.BootID != "" &&
node.Status.NodeInfo.BootID != info.BootID {
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
recordEventFunc(v1.EventTypeWarning, events.NodeRebooted,
fmt.Sprintf("Node %s has been rebooted, boot id: %s", nodeName, info.BootID))
}
node.Status.NodeInfo.BootID = info.BootID
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
// TODO: all the node resources should use ContainerManager.GetCapacity instead of deriving the
// capacity for every node status request
initialCapacity := capacityFunc()
if initialCapacity != nil {
node.Status.Capacity[v1.ResourceEphemeralStorage] = initialCapacity[v1.ResourceEphemeralStorage]
}
}
devicePluginCapacity, devicePluginAllocatable, removedDevicePlugins = devicePluginResourceCapacityFunc()
if devicePluginCapacity != nil {
for k, v := range devicePluginCapacity {
if old, ok := node.Status.Capacity[k]; !ok || old.Value() != v.Value() {
glog.V(2).Infof("Update capacity for %s to %d", k, v.Value())
}
node.Status.Capacity[k] = v
}
}
for _, removedResource := range removedDevicePlugins {
glog.V(2).Infof("Set capacity for %s to 0 on device removal", removedResource)
// Set the capacity of the removed resource to 0 instead of
// removing the resource from the node status. This is to indicate
// that the resource is managed by device plugin and had been
// registered before.
//
// This is required to differentiate the device plugin managed
// resources and the cluster-level resources, which are absent in
// node status.
node.Status.Capacity[v1.ResourceName(removedResource)] = *resource.NewQuantity(int64(0), resource.DecimalSI)
}
}
// Set Allocatable.
if node.Status.Allocatable == nil {
node.Status.Allocatable = make(v1.ResourceList)
}
// Remove extended resources from allocatable that are no longer
// present in capacity.
for k := range node.Status.Allocatable {
_, found := node.Status.Capacity[k]
if !found && v1helper.IsExtendedResourceName(k) {
delete(node.Status.Allocatable, k)
}
}
allocatableReservation := nodeAllocatableReservationFunc()
for k, v := range node.Status.Capacity {
value := *(v.Copy())
if res, exists := allocatableReservation[k]; exists {
value.Sub(res)
}
if value.Sign() < 0 {
// Negative Allocatable resources don't make sense.
value.Set(0)
}
node.Status.Allocatable[k] = value
}
if devicePluginAllocatable != nil {
for k, v := range devicePluginAllocatable {
if old, ok := node.Status.Allocatable[k]; !ok || old.Value() != v.Value() {
glog.V(2).Infof("Update allocatable for %s to %d", k, v.Value())
}
node.Status.Allocatable[k] = v
}
}
// for every huge page reservation, we need to remove it from allocatable memory
for k, v := range node.Status.Capacity {
if v1helper.IsHugePageResourceName(k) {
allocatableMemory := node.Status.Allocatable[v1.ResourceMemory]
value := *(v.Copy())
allocatableMemory.Sub(value)
if allocatableMemory.Sign() < 0 {
// Negative Allocatable resources don't make sense.
allocatableMemory.Set(0)
}
node.Status.Allocatable[v1.ResourceMemory] = allocatableMemory
}
}
return nil
}
}
// ReadyCondition returns a Setter that updates the v1.NodeReady condition on the node. // ReadyCondition returns a Setter that updates the v1.NodeReady condition on the node.
func ReadyCondition( func ReadyCondition(
nowFunc func() time.Time, // typically Kubelet.clock.Now nowFunc func() time.Time, // typically Kubelet.clock.Now

View File

@ -23,6 +23,8 @@ import (
"testing" "testing"
"time" "time"
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
@ -190,6 +192,432 @@ func TestNodeAddress(t *testing.T) {
} }
} }
func TestMachineInfo(t *testing.T) {
const nodeName = "test-node"
type dprc struct {
capacity v1.ResourceList
allocatable v1.ResourceList
inactive []string
}
cases := []struct {
desc string
node *v1.Node
maxPods int
podsPerCore int
machineInfo *cadvisorapiv1.MachineInfo
machineInfoError error
capacity v1.ResourceList
devicePluginResourceCapacity dprc
nodeAllocatableReservation v1.ResourceList
expectNode *v1.Node
expectEvents []testEvent
}{
{
desc: "machine identifiers, basic capacity and allocatable",
node: &v1.Node{},
maxPods: 110,
machineInfo: &cadvisorapiv1.MachineInfo{
MachineID: "MachineID",
SystemUUID: "SystemUUID",
NumCores: 2,
MemoryCapacity: 1024,
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
MachineID: "MachineID",
SystemUUID: "SystemUUID",
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
},
{
desc: "podsPerCore greater than zero, but less than maxPods/cores",
node: &v1.Node{},
maxPods: 10,
podsPerCore: 4,
machineInfo: &cadvisorapiv1.MachineInfo{
NumCores: 2,
MemoryCapacity: 1024,
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(8, resource.DecimalSI),
},
},
},
},
{
desc: "podsPerCore greater than maxPods/cores",
node: &v1.Node{},
maxPods: 10,
podsPerCore: 6,
machineInfo: &cadvisorapiv1.MachineInfo{
NumCores: 2,
MemoryCapacity: 1024,
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
},
},
},
},
{
desc: "allocatable should equal capacity minus reservations",
node: &v1.Node{},
maxPods: 110,
machineInfo: &cadvisorapiv1.MachineInfo{
NumCores: 2,
MemoryCapacity: 1024,
},
nodeAllocatableReservation: v1.ResourceList{
// reserve 1 unit for each resource
v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(1, resource.BinarySI),
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1999, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1023, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(109, resource.DecimalSI),
},
},
},
},
{
desc: "allocatable memory does not double-count hugepages reservations",
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
// it's impossible on any real system to reserve 1 byte,
// but we just need to test that the setter does the math
v1.ResourceHugePagesPrefix + "test": *resource.NewQuantity(1, resource.BinarySI),
},
},
},
maxPods: 110,
machineInfo: &cadvisorapiv1.MachineInfo{
NumCores: 2,
MemoryCapacity: 1024,
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourceHugePagesPrefix + "test": *resource.NewQuantity(1, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
// memory has 1-unit difference for hugepages reservation
v1.ResourceMemory: *resource.NewQuantity(1023, resource.BinarySI),
v1.ResourceHugePagesPrefix + "test": *resource.NewQuantity(1, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
},
{
desc: "negative capacity resources should be set to 0 in allocatable",
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
"negative-resource": *resource.NewQuantity(-1, resource.BinarySI),
},
},
},
maxPods: 110,
machineInfo: &cadvisorapiv1.MachineInfo{
NumCores: 2,
MemoryCapacity: 1024,
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
"negative-resource": *resource.NewQuantity(-1, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
"negative-resource": *resource.NewQuantity(0, resource.BinarySI),
},
},
},
},
{
desc: "ephemeral storage is reflected in capacity and allocatable",
node: &v1.Node{},
maxPods: 110,
machineInfo: &cadvisorapiv1.MachineInfo{
NumCores: 2,
MemoryCapacity: 1024,
},
capacity: v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
},
{
desc: "device plugin resources are reflected in capacity and allocatable",
node: &v1.Node{},
maxPods: 110,
machineInfo: &cadvisorapiv1.MachineInfo{
NumCores: 2,
MemoryCapacity: 1024,
},
devicePluginResourceCapacity: dprc{
capacity: v1.ResourceList{
"device-plugin": *resource.NewQuantity(1, resource.BinarySI),
},
allocatable: v1.ResourceList{
"device-plugin": *resource.NewQuantity(1, resource.BinarySI),
},
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
"device-plugin": *resource.NewQuantity(1, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
"device-plugin": *resource.NewQuantity(1, resource.BinarySI),
},
},
},
},
{
desc: "inactive device plugin resources should have their capacity set to 0",
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
"inactive": *resource.NewQuantity(1, resource.BinarySI),
},
},
},
maxPods: 110,
machineInfo: &cadvisorapiv1.MachineInfo{
NumCores: 2,
MemoryCapacity: 1024,
},
devicePluginResourceCapacity: dprc{
inactive: []string{"inactive"},
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
"inactive": *resource.NewQuantity(0, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
"inactive": *resource.NewQuantity(0, resource.BinarySI),
},
},
},
},
{
desc: "extended resources not present in capacity are removed from allocatable",
node: &v1.Node{
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
"example.com/extended": *resource.NewQuantity(1, resource.BinarySI),
},
},
},
maxPods: 110,
machineInfo: &cadvisorapiv1.MachineInfo{
NumCores: 2,
MemoryCapacity: 1024,
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
},
{
desc: "on failure to get machine info, allocatable and capacity for memory and cpu are set to 0, pods to maxPods",
node: &v1.Node{},
maxPods: 110,
// podsPerCore is not accounted for when getting machine info fails
podsPerCore: 1,
machineInfoError: fmt.Errorf("foo"),
expectNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: resource.MustParse("0Gi"),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: resource.MustParse("0Gi"),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
},
{
desc: "node reboot event is recorded",
node: &v1.Node{
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
BootID: "foo",
},
},
},
maxPods: 110,
machineInfo: &cadvisorapiv1.MachineInfo{
BootID: "bar",
NumCores: 2,
MemoryCapacity: 1024,
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
BootID: "bar",
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
expectEvents: []testEvent{
{
eventType: v1.EventTypeWarning,
event: events.NodeRebooted,
message: fmt.Sprintf("Node %s has been rebooted, boot id: %s", nodeName, "bar"),
},
},
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
machineInfoFunc := func() (*cadvisorapiv1.MachineInfo, error) {
return tc.machineInfo, tc.machineInfoError
}
capacityFunc := func() v1.ResourceList {
return tc.capacity
}
devicePluginResourceCapacityFunc := func() (v1.ResourceList, v1.ResourceList, []string) {
c := tc.devicePluginResourceCapacity
return c.capacity, c.allocatable, c.inactive
}
nodeAllocatableReservationFunc := func() v1.ResourceList {
return tc.nodeAllocatableReservation
}
events := []testEvent{}
recordEventFunc := func(eventType, event, message string) {
events = append(events, testEvent{
eventType: eventType,
event: event,
message: message,
})
}
// construct setter
setter := MachineInfo(nodeName, tc.maxPods, tc.podsPerCore, machineInfoFunc, capacityFunc,
devicePluginResourceCapacityFunc, nodeAllocatableReservationFunc, recordEventFunc)
// call setter on node
if err := setter(tc.node); err != nil {
t.Fatalf("unexpected error: %v", err)
}
// check expected node
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectNode, tc.node),
"Diff: %s", diff.ObjectDiff(tc.expectNode, tc.node))
// check expected events
require.Equal(t, len(tc.expectEvents), len(events))
for i := range tc.expectEvents {
assert.Equal(t, tc.expectEvents[i], events[i])
}
})
}
}
func TestReadyCondition(t *testing.T) { func TestReadyCondition(t *testing.T) {
now := time.Now() now := time.Now()
before := now.Add(-time.Second) before := now.Add(-time.Second)
@ -792,6 +1220,7 @@ func sortNodeAddresses(addrs sortableNodeAddress) {
type testEvent struct { type testEvent struct {
eventType string eventType string
event string event string
message string
} }
func makeReadyCondition(ready bool, message string, transition, heartbeat time.Time) *v1.NodeCondition { func makeReadyCondition(ready bool, message string, transition, heartbeat time.Time) *v1.NodeCondition {