From f82e3c430c7f481206b2d6b45d9b02424a97bbec Mon Sep 17 00:00:00 2001 From: Jie Shen Date: Thu, 28 Jan 2021 09:13:40 +0800 Subject: [PATCH] Wrap all errors in pkg/scheduler --- pkg/scheduler/core/generic_scheduler_test.go | 2 +- pkg/scheduler/framework/plugins/noderesources/fit.go | 2 +- .../framework/plugins/noderesources/least_allocated.go | 2 +- .../framework/plugins/noderesources/most_allocated.go | 4 ++-- .../plugins/noderesources/requested_to_capacity_ratio.go | 2 +- pkg/scheduler/framework/plugins/nodevolumelimits/csi.go | 4 ++-- pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go | 4 ++-- .../framework/plugins/selectorspread/selector_spread.go | 4 ++-- .../framework/plugins/volumebinding/volume_binding.go | 2 +- pkg/scheduler/framework/plugins/volumezone/volume_zone.go | 4 ++-- 10 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index 97f082bcbea..01aa4b63072 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -185,7 +185,7 @@ func (pl *falseMapPlugin) Name() string { } func (pl *falseMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) (int64, *framework.Status) { - return 0, framework.NewStatus(framework.Error, errPrioritize.Error()) + return 0, framework.AsStatus(errPrioritize) } func (pl *falseMapPlugin) ScoreExtensions() framework.ScoreExtensions { diff --git a/pkg/scheduler/framework/plugins/noderesources/fit.go b/pkg/scheduler/framework/plugins/noderesources/fit.go index 8b79dc156af..f8d54a2ebb7 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -195,7 +195,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { s, err := getPreFilterState(cycleState) if err != nil { - return framework.NewStatus(framework.Error, err.Error()) + return framework.AsStatus(err) } insufficientResources := fitsRequest(s, nodeInfo, f.ignoredResources, f.ignoredResourceGroups) diff --git a/pkg/scheduler/framework/plugins/noderesources/least_allocated.go b/pkg/scheduler/framework/plugins/noderesources/least_allocated.go index 32d77a7672c..f196f30299a 100644 --- a/pkg/scheduler/framework/plugins/noderesources/least_allocated.go +++ b/pkg/scheduler/framework/plugins/noderesources/least_allocated.go @@ -47,7 +47,7 @@ func (la *LeastAllocated) Name() string { func (la *LeastAllocated) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { nodeInfo, err := la.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) if err != nil { - return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err)) } // la.score favors nodes with fewer requested resources. diff --git a/pkg/scheduler/framework/plugins/noderesources/most_allocated.go b/pkg/scheduler/framework/plugins/noderesources/most_allocated.go index 4df34e13017..7a7929dfd92 100644 --- a/pkg/scheduler/framework/plugins/noderesources/most_allocated.go +++ b/pkg/scheduler/framework/plugins/noderesources/most_allocated.go @@ -46,8 +46,8 @@ func (ma *MostAllocated) Name() string { // Score invoked at the Score extension point. func (ma *MostAllocated) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { nodeInfo, err := ma.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) - if err != nil || nodeInfo.Node() == nil { - return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v, node is nil: %v", nodeName, err, nodeInfo.Node() == nil)) + if err != nil { + return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err)) } // ma.score favors nodes with most requested resources. diff --git a/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go b/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go index b19485c4ebb..1bcae513931 100755 --- a/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go +++ b/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio.go @@ -111,7 +111,7 @@ func (pl *RequestedToCapacityRatio) Name() string { func (pl *RequestedToCapacityRatio) Score(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) if err != nil { - return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err)) } return pl.score(pod, nodeInfo) } diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go index b4a7b7a31a3..7c38478d677 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go @@ -87,7 +87,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v newVolumes := make(map[string]string) if err := pl.filterAttachableVolumes(csiNode, pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil { - return framework.NewStatus(framework.Error, err.Error()) + return framework.AsStatus(err) } // If the pod doesn't have any new CSI volumes, the predicate will always be true @@ -104,7 +104,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v attachedVolumes := make(map[string]string) for _, existingPod := range nodeInfo.Pods { if err := pl.filterAttachableVolumes(csiNode, existingPod.Pod.Spec.Volumes, existingPod.Pod.Namespace, attachedVolumes); err != nil { - return framework.NewStatus(framework.Error, err.Error()) + return framework.AsStatus(err) } } diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go index ac853bb14e3..c6f633b9c35 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go @@ -204,7 +204,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod newVolumes := make(map[string]bool) if err := pl.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil { - return framework.NewStatus(framework.Error, err.Error()) + return framework.AsStatus(err) } // quick return @@ -237,7 +237,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod existingVolumes := make(map[string]bool) for _, existingPod := range nodeInfo.Pods { if err := pl.filterVolumes(existingPod.Pod.Spec.Volumes, existingPod.Pod.Namespace, existingVolumes); err != nil { - return framework.NewStatus(framework.Error, err.Error()) + return framework.AsStatus(err) } } numExistingVolumes := len(existingVolumes) diff --git a/pkg/scheduler/framework/plugins/selectorspread/selector_spread.go b/pkg/scheduler/framework/plugins/selectorspread/selector_spread.go index 987503ffa4e..0e36e58cfce 100644 --- a/pkg/scheduler/framework/plugins/selectorspread/selector_spread.go +++ b/pkg/scheduler/framework/plugins/selectorspread/selector_spread.go @@ -123,7 +123,7 @@ func (pl *SelectorSpread) NormalizeScore(ctx context.Context, state *framework.C } nodeInfo, err := pl.sharedLister.NodeInfos().Get(scores[i].Name) if err != nil { - return framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", scores[i].Name, err)) + return framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", scores[i].Name, err)) } zoneID := utilnode.GetZoneKey(nodeInfo.Node()) if zoneID == "" { @@ -154,7 +154,7 @@ func (pl *SelectorSpread) NormalizeScore(ctx context.Context, state *framework.C if haveZones { nodeInfo, err := pl.sharedLister.NodeInfos().Get(scores[i].Name) if err != nil { - return framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", scores[i].Name, err)) + return framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", scores[i].Name, err)) } zoneID := utilnode.GetZoneKey(nodeInfo.Node()) diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 7b4e692800c..51cedc4b276 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -196,7 +196,7 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p podVolumes, reasons, err := pl.Binder.FindPodVolumes(pod, state.boundClaims, state.claimsToBind, node) if err != nil { - return framework.NewStatus(framework.Error, err.Error()) + return framework.AsStatus(err) } if len(reasons) > 0 { diff --git a/pkg/scheduler/framework/plugins/volumezone/volume_zone.go b/pkg/scheduler/framework/plugins/volumezone/volume_zone.go index b834bb9080e..6f601db8fb7 100644 --- a/pkg/scheduler/framework/plugins/volumezone/volume_zone.go +++ b/pkg/scheduler/framework/plugins/volumezone/volume_zone.go @@ -112,7 +112,7 @@ func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod * } pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName) if err != nil { - return framework.NewStatus(framework.Error, err.Error()) + return framework.AsStatus(err) } if pvc == nil { @@ -144,7 +144,7 @@ func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod * pv, err := pl.pvLister.Get(pvName) if err != nil { - return framework.NewStatus(framework.Error, err.Error()) + return framework.AsStatus(err) } if pv == nil {