mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
modify fsStats to fsStatsType to avoid confusion with cadvisor types
This commit is contained in:
parent
9604b47c13
commit
0de1e62b30
@ -255,6 +255,7 @@ func memoryUsage(memStats *statsapi.MemoryStats) *resource.Quantity {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// localVolumeNames returns the set of volumes for the pod that are local
|
// localVolumeNames returns the set of volumes for the pod that are local
|
||||||
|
// TODO: sumamry API should report what volumes consume local storage rather than hard-code here.
|
||||||
func localVolumeNames(pod *api.Pod) []string {
|
func localVolumeNames(pod *api.Pod) []string {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for _, volume := range pod.Spec.Volumes {
|
for _, volume := range pod.Spec.Volumes {
|
||||||
@ -269,22 +270,23 @@ func localVolumeNames(pod *api.Pod) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// podDiskUsage aggregates pod disk usage for the specified stats to measure.
|
// podDiskUsage aggregates pod disk usage for the specified stats to measure.
|
||||||
func podDiskUsage(podStats statsapi.PodStats, pod *api.Pod, statsToMeasure []fsStats) (api.ResourceList, error) {
|
func podDiskUsage(podStats statsapi.PodStats, pod *api.Pod, statsToMeasure []fsStatsType) (api.ResourceList, error) {
|
||||||
disk := resource.Quantity{Format: resource.BinarySI}
|
disk := resource.Quantity{Format: resource.BinarySI}
|
||||||
for _, container := range podStats.Containers {
|
for _, container := range podStats.Containers {
|
||||||
if hasFsStats(statsToMeasure, fsStatsRoot) {
|
if hasFsStatsType(statsToMeasure, fsStatsRoot) {
|
||||||
disk.Add(*diskUsage(container.Rootfs))
|
disk.Add(*diskUsage(container.Rootfs))
|
||||||
}
|
}
|
||||||
if hasFsStats(statsToMeasure, fsStatsLogs) {
|
if hasFsStatsType(statsToMeasure, fsStatsLogs) {
|
||||||
disk.Add(*diskUsage(container.Logs))
|
disk.Add(*diskUsage(container.Logs))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if hasFsStats(statsToMeasure, fsStatsLocalVolumeSource) {
|
if hasFsStatsType(statsToMeasure, fsStatsLocalVolumeSource) {
|
||||||
volumeNames := localVolumeNames(pod)
|
volumeNames := localVolumeNames(pod)
|
||||||
for _, volumeName := range volumeNames {
|
for _, volumeName := range volumeNames {
|
||||||
for _, volumeStats := range podStats.VolumeStats {
|
for _, volumeStats := range podStats.VolumeStats {
|
||||||
if volumeStats.Name == volumeName {
|
if volumeStats.Name == volumeName {
|
||||||
disk.Add(*diskUsage(&volumeStats.FsStats))
|
disk.Add(*diskUsage(&volumeStats.FsStats))
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -451,7 +453,7 @@ func memory(stats statsFunc) cmpFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// disk compares pods by largest consumer of disk relative to request.
|
// disk compares pods by largest consumer of disk relative to request.
|
||||||
func disk(stats statsFunc, fsStatsToMeasure []fsStats) cmpFunc {
|
func disk(stats statsFunc, fsStatsToMeasure []fsStatsType) cmpFunc {
|
||||||
return func(p1, p2 *api.Pod) int {
|
return func(p1, p2 *api.Pod) int {
|
||||||
p1Stats, found := stats(p1)
|
p1Stats, found := stats(p1)
|
||||||
// if we have no usage stats for p1, we want p2 first
|
// if we have no usage stats for p1, we want p2 first
|
||||||
@ -489,7 +491,7 @@ func rankMemoryPressure(pods []*api.Pod, stats statsFunc) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// rankDiskPressureFunc returns a rankFunc that measures the specified fs stats.
|
// rankDiskPressureFunc returns a rankFunc that measures the specified fs stats.
|
||||||
func rankDiskPressureFunc(fsStatsToMeasure []fsStats) rankFunc {
|
func rankDiskPressureFunc(fsStatsToMeasure []fsStatsType) rankFunc {
|
||||||
return func(pods []*api.Pod, stats statsFunc) {
|
return func(pods []*api.Pod, stats statsFunc) {
|
||||||
orderedBy(qosComparator, disk(stats, fsStatsToMeasure)).Sort(pods)
|
orderedBy(qosComparator, disk(stats, fsStatsToMeasure)).Sort(pods)
|
||||||
}
|
}
|
||||||
@ -622,8 +624,8 @@ func nodeConditionsObservedSince(observedAt nodeConditionsObservedAt, period tim
|
|||||||
return results
|
return results
|
||||||
}
|
}
|
||||||
|
|
||||||
// hgasFsStats returns true if the fsStat is in the input list
|
// hasFsStatsType returns true if the fsStat is in the input list
|
||||||
func hasFsStats(inputs []fsStats, item fsStats) bool {
|
func hasFsStatsType(inputs []fsStatsType, item fsStatsType) bool {
|
||||||
for _, input := range inputs {
|
for _, input := range inputs {
|
||||||
if input == item {
|
if input == item {
|
||||||
return true
|
return true
|
||||||
@ -676,7 +678,7 @@ func isSoftEviction(thresholds []Threshold, starvedResource api.ResourceName) bo
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildresourceToRankFunc returns ranking functions associated with resources
|
// buildResourceToRankFunc returns ranking functions associated with resources
|
||||||
func buildResourceToRankFunc(withImageFs bool) map[api.ResourceName]rankFunc {
|
func buildResourceToRankFunc(withImageFs bool) map[api.ResourceName]rankFunc {
|
||||||
resourceToRankFunc := map[api.ResourceName]rankFunc{
|
resourceToRankFunc := map[api.ResourceName]rankFunc{
|
||||||
api.ResourceMemory: rankMemoryPressure,
|
api.ResourceMemory: rankMemoryPressure,
|
||||||
@ -684,12 +686,12 @@ func buildResourceToRankFunc(withImageFs bool) map[api.ResourceName]rankFunc {
|
|||||||
// usage of an imagefs is optional
|
// usage of an imagefs is optional
|
||||||
if withImageFs {
|
if withImageFs {
|
||||||
// with an imagefs, nodefs pod rank func for eviction only includes logs and local volumes
|
// with an imagefs, nodefs pod rank func for eviction only includes logs and local volumes
|
||||||
resourceToRankFunc[resourceNodeFs] = rankDiskPressureFunc([]fsStats{fsStatsLogs, fsStatsLocalVolumeSource})
|
resourceToRankFunc[resourceNodeFs] = rankDiskPressureFunc([]fsStatsType{fsStatsLogs, fsStatsLocalVolumeSource})
|
||||||
// with an imagefs, imagefs pod rank func for eviction only includes rootfs
|
// with an imagefs, imagefs pod rank func for eviction only includes rootfs
|
||||||
resourceToRankFunc[resourceImageFs] = rankDiskPressureFunc([]fsStats{fsStatsRoot})
|
resourceToRankFunc[resourceImageFs] = rankDiskPressureFunc([]fsStatsType{fsStatsRoot})
|
||||||
} else {
|
} else {
|
||||||
// without an imagefs, nodefs pod rank func for eviction looks at all fs stats
|
// without an imagefs, nodefs pod rank func for eviction looks at all fs stats
|
||||||
resourceToRankFunc[resourceNodeFs] = rankDiskPressureFunc([]fsStats{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource})
|
resourceToRankFunc[resourceNodeFs] = rankDiskPressureFunc([]fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource})
|
||||||
}
|
}
|
||||||
return resourceToRankFunc
|
return resourceToRankFunc
|
||||||
}
|
}
|
||||||
|
@ -311,7 +311,7 @@ func TestOrderedByDisk(t *testing.T) {
|
|||||||
return result, found
|
return result, found
|
||||||
}
|
}
|
||||||
pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
|
pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
|
||||||
orderedBy(disk(statsFn, []fsStats{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource})).Sort(pods)
|
orderedBy(disk(statsFn, []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource})).Sort(pods)
|
||||||
expected := []*api.Pod{pod6, pod5, pod4, pod3, pod2, pod1}
|
expected := []*api.Pod{pod6, pod5, pod4, pod3, pod2, pod1}
|
||||||
for i := range expected {
|
for i := range expected {
|
||||||
if pods[i] != expected[i] {
|
if pods[i] != expected[i] {
|
||||||
@ -377,7 +377,7 @@ func TestOrderedByQoSDisk(t *testing.T) {
|
|||||||
return result, found
|
return result, found
|
||||||
}
|
}
|
||||||
pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
|
pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
|
||||||
orderedBy(qosComparator, disk(statsFn, []fsStats{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource})).Sort(pods)
|
orderedBy(qosComparator, disk(statsFn, []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource})).Sort(pods)
|
||||||
expected := []*api.Pod{pod2, pod1, pod4, pod3, pod6, pod5}
|
expected := []*api.Pod{pod2, pod1, pod4, pod3, pod6, pod5}
|
||||||
for i := range expected {
|
for i := range expected {
|
||||||
if pods[i] != expected[i] {
|
if pods[i] != expected[i] {
|
||||||
|
@ -36,16 +36,16 @@ const (
|
|||||||
SignalImageFsAvailable Signal = "imagefs.available"
|
SignalImageFsAvailable Signal = "imagefs.available"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fsStats defines the types of filesystem stats to collect.
|
// fsStatsType defines the types of filesystem stats to collect.
|
||||||
type fsStats string
|
type fsStatsType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// fsStatsLocalVolumeSource identifies stats for pod local volume sources.
|
// fsStatsLocalVolumeSource identifies stats for pod local volume sources.
|
||||||
fsStatsLocalVolumeSource fsStats = "localVolumeSource"
|
fsStatsLocalVolumeSource fsStatsType = "localVolumeSource"
|
||||||
// fsStatsLogs identifies stats for pod logs.
|
// fsStatsLogs identifies stats for pod logs.
|
||||||
fsStatsLogs fsStats = "logs"
|
fsStatsLogs fsStatsType = "logs"
|
||||||
// fsStatsRoot identifies stats for pod container writable layers.
|
// fsStatsRoot identifies stats for pod container writable layers.
|
||||||
fsStatsRoot fsStats = "root"
|
fsStatsRoot fsStatsType = "root"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ThresholdOperator is the operator used to express a Threshold.
|
// ThresholdOperator is the operator used to express a Threshold.
|
||||||
|
@ -2965,7 +2965,7 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
fits, err := CheckNodeDiskPressurePredicate(test.pod, test.nodeInfo)
|
fits, err := CheckNodeDiskPressurePredicate(test.pod, nil, test.nodeInfo)
|
||||||
if fits != test.fits {
|
if fits != test.fits {
|
||||||
t.Errorf("%s: expected %v got %v", test.name, test.fits, fits)
|
t.Errorf("%s: expected %v got %v", test.name, test.fits, fits)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user