Merge pull request #51490 from NickrenREN/eviction-podLocalEphemeralStorageUsage

Automatic merge from submit-queue (batch tested with PRs 51628, 51637, 51490, 51279, 51302)

Fix pod local ephemeral storage usage calculation

We use podDiskUsage to calculate pod local ephemeral storage which is not correct, because podDiskUsage also contains HostPath volume  which is considered as persistent storage
This pr fixes it
**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #51489

**Special notes for your reviewer**:

**Release note**:
```release-note
NONE
```

/assign @jingxu97  @vishh 
cc @ddysher
This commit is contained in:
Kubernetes Submit Queue 2017-09-01 00:11:17 -07:00 committed by GitHub
commit aa50c0f54c
3 changed files with 209 additions and 15 deletions

View File

@ -520,13 +520,13 @@ func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStat
} else {
fsStatsSet = []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}
}
podUsage, err := podDiskUsage(podStats, pod, fsStatsSet)
podEphemeralUsage, err := podLocalEphemeralStorageUsage(podStats, pod, fsStatsSet)
if err != nil {
glog.Errorf("eviction manager: error getting pod disk usage %v", err)
return false
}
podEphemeralStorageTotalUsage.Add(podUsage[resourceDisk])
podEphemeralStorageTotalUsage.Add(podEphemeralUsage[resourceDisk])
if podEphemeralStorageTotalUsage.Cmp(podLimits[v1.ResourceEphemeralStorage]) > 0 {
// the total usage of pod exceeds the total size limit of containers, evict the pod
return m.evictPod(pod, v1.ResourceEphemeralStorage, fmt.Sprintf("pod ephemeral local storage usage exceeds the total limit of containers %v", podLimits[v1.ResourceEphemeralStorage]))

View File

@ -394,8 +394,8 @@ func localVolumeNames(pod *v1.Pod) []string {
return result
}
// podDiskUsage aggregates pod disk usage and inode consumption for the specified stats to measure.
func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType) (v1.ResourceList, error) {
// containerUsage aggregates container disk usage and inode consumption for the specified stats to measure.
func containerUsage(podStats statsapi.PodStats, statsToMeasure []fsStatsType) v1.ResourceList {
disk := resource.Quantity{Format: resource.BinarySI}
inodes := resource.Quantity{Format: resource.BinarySI}
for _, container := range podStats.Containers {
@ -408,18 +408,46 @@ func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsSt
inodes.Add(*inodeUsage(container.Logs))
}
}
if hasFsStatsType(statsToMeasure, fsStatsLocalVolumeSource) {
volumeNames := localVolumeNames(pod)
for _, volumeName := range volumeNames {
for _, volumeStats := range podStats.VolumeStats {
if volumeStats.Name == volumeName {
disk.Add(*diskUsage(&volumeStats.FsStats))
inodes.Add(*inodeUsage(&volumeStats.FsStats))
break
}
return v1.ResourceList{
resourceDisk: disk,
resourceInodes: inodes,
}
}
// podLocalVolumeUsage aggregates pod local volumes disk usage and inode consumption for the specified stats to measure.
func podLocalVolumeUsage(volumeNames []string, podStats statsapi.PodStats) v1.ResourceList {
disk := resource.Quantity{Format: resource.BinarySI}
inodes := resource.Quantity{Format: resource.BinarySI}
for _, volumeName := range volumeNames {
for _, volumeStats := range podStats.VolumeStats {
if volumeStats.Name == volumeName {
disk.Add(*diskUsage(&volumeStats.FsStats))
inodes.Add(*inodeUsage(&volumeStats.FsStats))
break
}
}
}
return v1.ResourceList{
resourceDisk: disk,
resourceInodes: inodes,
}
}
// podDiskUsage aggregates pod disk usage and inode consumption for the specified stats to measure.
func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType) (v1.ResourceList, error) {
disk := resource.Quantity{Format: resource.BinarySI}
inodes := resource.Quantity{Format: resource.BinarySI}
containerUsageList := containerUsage(podStats, statsToMeasure)
disk.Add(containerUsageList[resourceDisk])
inodes.Add(containerUsageList[resourceInodes])
if hasFsStatsType(statsToMeasure, fsStatsLocalVolumeSource) {
volumeNames := localVolumeNames(pod)
podLocalVolumeUsageList := podLocalVolumeUsage(volumeNames, podStats)
disk.Add(podLocalVolumeUsageList[resourceDisk])
inodes.Add(podLocalVolumeUsageList[resourceInodes])
}
return v1.ResourceList{
resourceDisk: disk,
resourceInodes: inodes,
@ -444,6 +472,40 @@ func podMemoryUsage(podStats statsapi.PodStats) (v1.ResourceList, error) {
}, nil
}
// localEphemeralVolumeNames returns the set of ephemeral volumes for the pod that are local
func localEphemeralVolumeNames(pod *v1.Pod) []string {
result := []string{}
for _, volume := range pod.Spec.Volumes {
if volume.GitRepo != nil ||
(volume.EmptyDir != nil && volume.EmptyDir.Medium != v1.StorageMediumMemory) ||
volume.ConfigMap != nil || volume.DownwardAPI != nil {
result = append(result, volume.Name)
}
}
return result
}
// podLocalEphemeralStorageUsage aggregates pod local ephemeral storage usage and inode consumption for the specified stats to measure.
func podLocalEphemeralStorageUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType) (v1.ResourceList, error) {
disk := resource.Quantity{Format: resource.BinarySI}
inodes := resource.Quantity{Format: resource.BinarySI}
containerUsageList := containerUsage(podStats, statsToMeasure)
disk.Add(containerUsageList[resourceDisk])
inodes.Add(containerUsageList[resourceInodes])
if hasFsStatsType(statsToMeasure, fsStatsLocalVolumeSource) {
volumeNames := localEphemeralVolumeNames(pod)
podLocalVolumeUsageList := podLocalVolumeUsage(volumeNames, podStats)
disk.Add(podLocalVolumeUsageList[resourceDisk])
inodes.Add(podLocalVolumeUsageList[resourceInodes])
}
return v1.ResourceList{
resourceDisk: disk,
resourceInodes: inodes,
}, nil
}
// formatThreshold formats a threshold for logging.
func formatThreshold(threshold evictionapi.Threshold) string {
return fmt.Sprintf("threshold(signal=%v, operator=%v, value=%v, gracePeriod=%v)", threshold.Signal, threshold.Operator, evictionapi.ThresholdValue(threshold.Value), threshold.GracePeriod)

View File

@ -36,7 +36,7 @@ type podEvictSpec struct {
}
const (
totalEvict = 4
totalEvict = 7
)
// Eviction Policy is described here:
@ -47,6 +47,9 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
f := framework.NewDefaultFramework("localstorage-eviction-test")
emptyDirVolumeName := "volume-emptydir-pod"
gitRepoVolumeName := "volume-gitrepo-pod"
configMapVolumeName := "volume-configmap-pod"
downwardAPIVolumeName := "volume-downwardapi-pod"
podTestSpecs := []podEvictSpec{
{evicted: true, // This pod should be evicted because emptyDir (default storage type) usage violation
pod: v1.Pod{
@ -226,10 +229,139 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
},
},
},
{evicted: true, // This pod should be evicted because pod ephemeral storage usage violation
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "downward-api-container-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "downward-api-container-hog-pod",
Command: []string{
"sh",
"-c",
"sleep 5; dd if=/dev/urandom of=target-file of=/cache/target-file bs=50000 count=1; while true; do sleep 5; done",
},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(
int64(40000),
resource.BinarySI),
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: downwardAPIVolumeName,
MountPath: "/cache",
},
},
},
},
Volumes: []v1.Volume{
{
Name: downwardAPIVolumeName,
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{},
},
},
},
},
},
},
{evicted: true, // This pod should be evicted because pod ephemeral storage usage violation
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "configmap-container-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "configmap-container-hog-pod",
Command: []string{
"sh",
"-c",
"sleep 5; dd if=/dev/urandom of=target-file of=/cache/target-file bs=50000 count=1; while true; do sleep 5; done",
},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(
int64(40000),
resource.BinarySI),
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: configMapVolumeName,
MountPath: "/cache",
},
},
},
},
Volumes: []v1.Volume{
{
Name: configMapVolumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "my-cfgmap",
},
},
},
},
},
},
},
},
{evicted: true, // This pod should be evicted because pod ephemeral storage usage violation
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "gitrepo-container-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "gitrepo-container-hog-pod",
Command: []string{
"sh",
"-c",
"sleep 5; dd if=/dev/urandom of=target-file of=/cache/target-file bs=50000 count=1; while true; do sleep 5; done",
},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(
int64(40000),
resource.BinarySI),
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: gitRepoVolumeName,
MountPath: "/cache",
},
},
},
},
Volumes: []v1.Volume{
{
Name: gitRepoVolumeName,
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: "my-repo",
},
},
},
},
},
},
},
}
evictionTestTimeout := 10 * time.Minute
testCondition := "EmptyDir/ContainerContainerEphemeralStorage usage limit violation"
testCondition := "PodLocalEphemeralStorage/ContainerLocalEphemeralStorage usage limit violation"
Context(fmt.Sprintf("EmptyDirEviction when we run containers that should cause %s", testCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.FeatureGates += ", LocalStorageCapacityIsolation=true"