Update golang scientific notation using hack/update-gofmt.sh

This commit is contained in:
David Zhu 2019-09-12 18:05:49 -07:00
parent 4dd1e3fa43
commit f15cc60957
4 changed files with 78 additions and 78 deletions

View File

@ -194,12 +194,12 @@ func TestUpdateNewNodeStatus(t *testing.T) {
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
@ -215,7 +215,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9, // 10G
MemoryCapacity: 10e9, // 10G
}
kubelet.machineInfo = machineInfo
@ -271,13 +271,13 @@ func TestUpdateNewNodeStatus(t *testing.T) {
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(9900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
@ -324,11 +324,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
@ -377,12 +377,12 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},
@ -393,7 +393,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 20E9,
MemoryCapacity: 20e9,
}
kubelet.machineInfo = machineInfo
@ -449,13 +449,13 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
@ -546,11 +546,11 @@ func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
},
}
@ -577,13 +577,13 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
@ -599,7 +599,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9,
MemoryCapacity: 10e9,
}
kubelet.machineInfo = machineInfo
@ -648,15 +648,15 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(9900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
@ -800,11 +800,11 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
@ -821,7 +821,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 20E9,
MemoryCapacity: 20e9,
}
kubelet.machineInfo = machineInfo
@ -878,13 +878,13 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
@ -967,7 +967,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
// Update node status again when something is changed.
// Report node status even if it is still within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second)
var newMemoryCapacity int64 = 40E9
var newMemoryCapacity int64 = 40e9
kubelet.machineInfo.MemoryCapacity = uint64(newMemoryCapacity)
assert.NoError(t, kubelet.updateNodeStatus())
@ -1458,7 +1458,7 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
}
@ -1474,7 +1474,7 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9, // 10G
MemoryCapacity: 10e9, // 10G
}
kubelet.machineInfo = machineInfo
@ -1484,13 +1484,13 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
},
@ -1751,12 +1751,12 @@ func TestReconcileExtendedResource(t *testing.T) {
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
@ -1765,12 +1765,12 @@ func TestReconcileExtendedResource(t *testing.T) {
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
@ -1784,12 +1784,12 @@ func TestReconcileExtendedResource(t *testing.T) {
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
@ -1798,12 +1798,12 @@ func TestReconcileExtendedResource(t *testing.T) {
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
@ -1817,14 +1817,14 @@ func TestReconcileExtendedResource(t *testing.T) {
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
@ -1835,14 +1835,14 @@ func TestReconcileExtendedResource(t *testing.T) {
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),

View File

@ -953,7 +953,7 @@ func TestReadyCondition(t *testing.T) {
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},

View File

@ -72,7 +72,7 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
matchV1alpha1Expectations := gstruct.MatchAllKeys(gstruct.Keys{
"scrape_error": gstruct.Ignore(),
"node_cpu_usage_seconds_total": gstruct.MatchAllElements(nodeId, gstruct.Elements{
"": boundedSample(1, 1E6),
"": boundedSample(1, 1e6),
}),
"node_memory_working_set_bytes": gstruct.MatchAllElements(nodeId, gstruct.Elements{
"": boundedSample(10*volume.Mb, memoryLimit),

View File

@ -89,8 +89,8 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"StartTime": recent(maxStartAge),
"CPU": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"UsageNanoCores": bounded(10000, 2E9),
"UsageCoreNanoSeconds": bounded(10000000, 1E15),
"UsageNanoCores": bounded(10000, 2e9),
"UsageCoreNanoSeconds": bounded(10000000, 1e15),
}),
"Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"WorkingSetBytes": bounded(1*volume.Mb, memoryLimit),
// this now returns /sys/fs/cgroup/memory.stat total_rss
"RSSBytes": bounded(1*volume.Mb, memoryLimit),
"PageFaults": bounded(1000, 1E9),
"PageFaults": bounded(1000, 1e9),
"MajorPageFaults": bounded(0, 100000),
}),
"Accelerators": gomega.BeEmpty(),
@ -161,7 +161,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"UsageBytes": bounded(100*volume.Kb, memoryLimit),
"WorkingSetBytes": bounded(100*volume.Kb, memoryLimit),
"RSSBytes": bounded(100*volume.Kb, memoryLimit),
"PageFaults": bounded(1000, 1E9),
"PageFaults": bounded(1000, 1e9),
"MajorPageFaults": bounded(0, 100000),
})
systemContainers["misc"] = miscContExpectations
@ -176,8 +176,8 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"StartTime": recent(maxStartAge),
"CPU": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"UsageNanoCores": bounded(10000, 1E9),
"UsageCoreNanoSeconds": bounded(10000000, 1E11),
"UsageNanoCores": bounded(10000, 1e9),
"UsageCoreNanoSeconds": bounded(10000000, 1e11),
}),
"Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
@ -194,18 +194,18 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 10*volume.Mb),
"InodesFree": bounded(1E4, 1E8),
"Inodes": bounded(1E4, 1E8),
"InodesUsed": bounded(0, 1E8),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
}),
"Logs": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 10*volume.Mb),
"InodesFree": bounded(1E4, 1E8),
"Inodes": bounded(1E4, 1E8),
"InodesUsed": bounded(0, 1E8),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
}),
"UserDefinedMetrics": gomega.BeEmpty(),
}),
@ -223,8 +223,8 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}),
"CPU": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"UsageNanoCores": bounded(10000, 1E9),
"UsageCoreNanoSeconds": bounded(10000000, 1E11),
"UsageNanoCores": bounded(10000, 1e9),
"UsageCoreNanoSeconds": bounded(10000000, 1e11),
}),
"Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
@ -244,9 +244,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 1*volume.Mb),
"InodesFree": bounded(1E4, 1E8),
"Inodes": bounded(1E4, 1E8),
"InodesUsed": bounded(0, 1E8),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
}),
}),
}),
@ -255,9 +255,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 21*volume.Mb),
"InodesFree": bounded(1E4, 1E8),
"Inodes": bounded(1E4, 1E8),
"InodesUsed": bounded(0, 1E8),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
}),
})
@ -268,8 +268,8 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"SystemContainers": gstruct.MatchAllElements(summaryObjectID, systemContainers),
"CPU": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"UsageNanoCores": bounded(100E3, 2E9),
"UsageCoreNanoSeconds": bounded(1E9, 1E15),
"UsageNanoCores": bounded(100e3, 2e9),
"UsageCoreNanoSeconds": bounded(1e9, 1e15),
}),
"Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
@ -278,7 +278,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"WorkingSetBytes": bounded(10*volume.Mb, memoryLimit),
// this now returns /sys/fs/cgroup/memory.stat total_rss
"RSSBytes": bounded(1*volume.Kb, memoryLimit),
"PageFaults": bounded(1000, 1E9),
"PageFaults": bounded(1000, 1e9),
"MajorPageFaults": bounded(0, 100000),
}),
// TODO(#28407): Handle non-eth0 network interface names.
@ -299,9 +299,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"CapacityBytes": fsCapacityBounds,
// we assume we are not running tests on machines < 10tb of disk
"UsedBytes": bounded(volume.Kb, 10*volume.Tb),
"InodesFree": bounded(1E4, 1E8),
"Inodes": bounded(1E4, 1E8),
"InodesUsed": bounded(0, 1E8),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
}),
"Runtime": ptrMatchAllFields(gstruct.Fields{
"ImageFs": ptrMatchAllFields(gstruct.Fields{
@ -310,15 +310,15 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"CapacityBytes": fsCapacityBounds,
// we assume we are not running tests on machines < 10tb of disk
"UsedBytes": bounded(volume.Kb, 10*volume.Tb),
"InodesFree": bounded(1E4, 1E8),
"Inodes": bounded(1E4, 1E8),
"InodesUsed": bounded(0, 1E8),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
}),
}),
"Rlimit": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"MaxPID": bounded(0, 1E8),
"NumOfRunningProcesses": bounded(0, 1E8),
"MaxPID": bounded(0, 1e8),
"NumOfRunningProcesses": bounded(0, 1e8),
}),
}),
// Ignore extra pods since the tests run in parallel.