Merge pull request #57247 from dixudx/cm_return_err

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

cpumanager: Propagate error up instead panic

**What this PR does / why we need it**:

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #57239

**Special notes for your reviewer**:
/assign @sjenning 
**Release note**:

```release-note
None
```
This commit is contained in:
Kubernetes Submit Queue 2017-12-18 10:18:09 -08:00 committed by GitHub
commit eddb00e7c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 6 additions and 28 deletions

View File

@ -98,13 +98,7 @@ type manager struct {
var _ Manager = &manager{} var _ Manager = &manager{}
// NewManager creates new cpu manager based on provided policy // NewManager creates new cpu manager based on provided policy
func NewManager( func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirecory string) (Manager, error) {
cpuPolicyName string,
reconcilePeriod time.Duration,
machineInfo *cadvisorapi.MachineInfo,
nodeAllocatableReservation v1.ResourceList,
stateFileDirecory string,
) (Manager, error) {
var policy Policy var policy Policy
switch policyName(cpuPolicyName) { switch policyName(cpuPolicyName) {
@ -120,18 +114,16 @@ func NewManager(
glog.Infof("[cpumanager] detected CPU topology: %v", topo) glog.Infof("[cpumanager] detected CPU topology: %v", topo)
reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU] reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU]
if !ok { if !ok {
// The static policy cannot initialize without this information. Panic! // The static policy cannot initialize without this information.
panic("[cpumanager] unable to determine reserved CPU resources for static policy") return nil, fmt.Errorf("[cpumanager] unable to determine reserved CPU resources for static policy")
} }
if reservedCPUs.IsZero() { if reservedCPUs.IsZero() {
// Panic!
//
// The static policy requires this to be nonzero. Zero CPU reservation // The static policy requires this to be nonzero. Zero CPU reservation
// would allow the shared pool to be completely exhausted. At that point // would allow the shared pool to be completely exhausted. At that point
// either we would violate our guarantee of exclusivity or need to evict // either we would violate our guarantee of exclusivity or need to evict
// any pod that has at least one container that requires zero CPUs. // any pod that has at least one container that requires zero CPUs.
// See the comments in policy_static.go for more details. // See the comments in policy_static.go for more details.
panic("[cpumanager] the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero") return nil, fmt.Errorf("[cpumanager] the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero")
} }
// Take the ceiling of the reservation, since fractional CPUs cannot be // Take the ceiling of the reservation, since fractional CPUs cannot be

View File

@ -234,7 +234,6 @@ func TestCPUManagerGenerate(t *testing.T) {
cpuPolicyName string cpuPolicyName string
nodeAllocatableReservation v1.ResourceList nodeAllocatableReservation v1.ResourceList
isTopologyBroken bool isTopologyBroken bool
panicMsg string
expectedPolicy string expectedPolicy string
expectedError error expectedError error
skipIfPermissionsError bool skipIfPermissionsError bool
@ -270,14 +269,14 @@ func TestCPUManagerGenerate(t *testing.T) {
description: "static policy - broken reservation", description: "static policy - broken reservation",
cpuPolicyName: "static", cpuPolicyName: "static",
nodeAllocatableReservation: v1.ResourceList{}, nodeAllocatableReservation: v1.ResourceList{},
panicMsg: "unable to determine reserved CPU resources for static policy", expectedError: fmt.Errorf("unable to determine reserved CPU resources for static policy"),
skipIfPermissionsError: true, skipIfPermissionsError: true,
}, },
{ {
description: "static policy - no CPU resources", description: "static policy - no CPU resources",
cpuPolicyName: "static", cpuPolicyName: "static",
nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)}, nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)},
panicMsg: "the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero", expectedError: fmt.Errorf("the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero"),
skipIfPermissionsError: true, skipIfPermissionsError: true,
}, },
} }
@ -319,19 +318,6 @@ func TestCPUManagerGenerate(t *testing.T) {
t.Errorf("cannot create state file: %s", err.Error()) t.Errorf("cannot create state file: %s", err.Error())
} }
defer os.RemoveAll(sDir) defer os.RemoveAll(sDir)
defer func() {
if err := recover(); err != nil {
if testCase.panicMsg != "" {
if !strings.Contains(err.(string), testCase.panicMsg) {
t.Errorf("Unexpected panic message. Have: %q wants %q", err, testCase.panicMsg)
}
} else {
t.Errorf("Unexpected panic: %q", err)
}
} else if testCase.panicMsg != "" {
t.Error("Expected panic hasn't been raised")
}
}()
mgr, err := NewManager(testCase.cpuPolicyName, 5*time.Second, machineInfo, testCase.nodeAllocatableReservation, sDir) mgr, err := NewManager(testCase.cpuPolicyName, 5*time.Second, machineInfo, testCase.nodeAllocatableReservation, sDir)
if testCase.expectedError != nil { if testCase.expectedError != nil {