Merge pull request #47327 from xingzhou/remove-error

Automatic merge from submit-queue (batch tested with PRs 47327, 48194)

Remove useless error

While doing https://github.com/kubernetes/kubernetes/pull/44898, found an useless return error.

**Release note**:
```
None
```
This commit is contained in:
Kubernetes Submit Queue 2017-07-05 14:21:36 -07:00 committed by GitHub
commit 67da2da32f
3 changed files with 4 additions and 14 deletions

View File

@ -27,7 +27,7 @@ import (
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all // PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
// containers of the pod. // containers of the pod.
func PodRequestsAndLimits(pod *v1.Pod) (reqs map[v1.ResourceName]resource.Quantity, limits map[v1.ResourceName]resource.Quantity, err error) { func PodRequestsAndLimits(pod *v1.Pod) (reqs map[v1.ResourceName]resource.Quantity, limits map[v1.ResourceName]resource.Quantity) {
reqs, limits = map[v1.ResourceName]resource.Quantity{}, map[v1.ResourceName]resource.Quantity{} reqs, limits = map[v1.ResourceName]resource.Quantity{}, map[v1.ResourceName]resource.Quantity{}
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
for name, quantity := range container.Resources.Requests { for name, quantity := range container.Resources.Requests {

View File

@ -86,10 +86,7 @@ func MilliCPUToShares(milliCPU int64) int64 {
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config. // ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig {
// sum requests and limits. // sum requests and limits.
reqs, limits, err := resource.PodRequestsAndLimits(pod) reqs, limits := resource.PodRequestsAndLimits(pod)
if err != nil {
return &ResourceConfig{}
}
cpuRequests := int64(0) cpuRequests := int64(0)
cpuLimits := int64(0) cpuLimits := int64(0)

View File

@ -148,10 +148,7 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]
// we only care about the burstable qos tier // we only care about the burstable qos tier
continue continue
} }
req, _, err := resource.PodRequestsAndLimits(pod) req, _ := resource.PodRequestsAndLimits(pod)
if err != nil {
return err
}
if request, found := req[v1.ResourceCPU]; found { if request, found := req[v1.ResourceCPU]; found {
burstablePodCPURequest += request.MilliValue() burstablePodCPURequest += request.MilliValue()
} }
@ -188,11 +185,7 @@ func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*C
// limits are not set for Best Effort pods // limits are not set for Best Effort pods
continue continue
} }
req, _, err := resource.PodRequestsAndLimits(pod) req, _ := resource.PodRequestsAndLimits(pod)
if err != nil {
glog.V(2).Infof("[Container Manager] Pod resource requests/limits could not be determined. Not setting QOS memory limts.")
return
}
if request, found := req[v1.ResourceMemory]; found { if request, found := req[v1.ResourceMemory]; found {
podMemoryRequest += request.Value() podMemoryRequest += request.Value()
} }