mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #47819 from verult/AlphaStorageStatus
Automatic merge from submit-queue (batch tested with PRs 34515, 47236, 46694, 47819, 47792) Adding alpha feature gate to node statuses from local storage capacity isolation. **What this PR does / why we need it**: The Capacity.storage node attribute should not be exposed since it's part of an alpha feature. Added an feature gate. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #47809 There should be a test for new statuses in the alpha feature. Will include in a different PR.
This commit is contained in:
commit
2f4df7ffa6
@ -37,7 +37,7 @@ func CapacityFromMachineInfo(info *cadvisorapi.MachineInfo) v1.ResourceList {
|
||||
|
||||
func StorageScratchCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceList {
|
||||
c := v1.ResourceList{
|
||||
v1.ResourceStorage: *resource.NewQuantity(
|
||||
v1.ResourceStorageScratch: *resource.NewQuantity(
|
||||
int64(info.Capacity),
|
||||
resource.BinarySI),
|
||||
}
|
||||
|
@ -32,9 +32,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
@ -553,9 +555,10 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
|
||||
node.Status.NodeInfo.BootID = info.BootID
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||
rootfs, err := kl.GetCachedRootFsInfo()
|
||||
if err != nil {
|
||||
node.Status.Capacity[v1.ResourceStorage] = resource.MustParse("0Gi")
|
||||
node.Status.Capacity[v1.ResourceStorageScratch] = resource.MustParse("0Gi")
|
||||
} else {
|
||||
for rName, rCap := range cadvisor.StorageScratchCapacityFromFsInfo(rootfs) {
|
||||
node.Status.Capacity[rName] = rCap
|
||||
@ -572,6 +575,7 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set Allocatable.
|
||||
if node.Status.Allocatable == nil {
|
||||
|
@ -210,13 +210,11 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Addresses: []v1.NodeAddress{
|
||||
@ -365,13 +363,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
@ -450,13 +446,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Addresses: []v1.NodeAddress{
|
||||
@ -663,7 +657,6 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(200*mb, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
|
||||
@ -736,13 +729,11 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(300*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Addresses: []v1.NodeAddress{
|
||||
@ -1152,13 +1143,11 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user