Fixing node statuses related to local storage capacity isolation.

- Wrapping all node statuses from local storage capacity isolation under an alpha feature check. Currently there should not be any storage statuses.
- Replaced all "storage" statuses with "storage.kubernetes.io/scratch". "storage" should never be exposed as a status.
This commit is contained in:
Cheng Xing 2017-06-20 15:53:54 -07:00
parent 9be331d5ea
commit de3bf36b61
3 changed files with 50 additions and 57 deletions

View File

@ -37,7 +37,7 @@ func CapacityFromMachineInfo(info *cadvisorapi.MachineInfo) v1.ResourceList {
func StorageScratchCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceList {
c := v1.ResourceList{
v1.ResourceStorage: *resource.NewQuantity(
v1.ResourceStorageScratch: *resource.NewQuantity(
int64(info.Capacity),
resource.BinarySI),
}

View File

@ -32,9 +32,11 @@ import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/api/v1"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/events"
@ -553,9 +555,10 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
node.Status.NodeInfo.BootID = info.BootID
}
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
rootfs, err := kl.GetCachedRootFsInfo()
if err != nil {
node.Status.Capacity[v1.ResourceStorage] = resource.MustParse("0Gi")
node.Status.Capacity[v1.ResourceStorageScratch] = resource.MustParse("0Gi")
} else {
for rName, rCap := range cadvisor.StorageScratchCapacityFromFsInfo(rootfs) {
node.Status.Capacity[rName] = rCap
@ -572,6 +575,7 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
}
}
}
}
// Set Allocatable.
if node.Status.Allocatable == nil {

View File

@ -210,13 +210,11 @@ func TestUpdateNewNodeStatus(t *testing.T) {
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []v1.NodeAddress{
@ -365,13 +363,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},
@ -450,13 +446,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []v1.NodeAddress{
@ -663,7 +657,6 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(200*mb, resource.BinarySI),
},
}
@ -736,13 +729,11 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(300*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []v1.NodeAddress{
@ -1152,13 +1143,11 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceStorage: *resource.NewQuantity(500*mb, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},