mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 23:37:01 +00:00
Fix staticcheck failures for pkg/scheduler/...
This commit is contained in:
parent
c9c01fb902
commit
345e528c86
@ -47,11 +47,6 @@ pkg/registry/core/service/ipallocator
|
||||
pkg/registry/core/service/portallocator
|
||||
pkg/registry/core/service/storage
|
||||
pkg/registry/extensions/controller/storage
|
||||
pkg/scheduler
|
||||
pkg/scheduler/algorithm/predicates
|
||||
pkg/scheduler/algorithm/priorities
|
||||
pkg/scheduler/api/v1
|
||||
pkg/scheduler/internal/queue
|
||||
pkg/util/coverage
|
||||
pkg/util/ebtables
|
||||
pkg/util/ipconfig
|
||||
|
@ -62,7 +62,7 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
|
||||
if meta1.podBestEffort != meta2.podBestEffort {
|
||||
return fmt.Errorf("podBestEfforts are not equal")
|
||||
}
|
||||
if meta1.serviceAffinityInUse != meta1.serviceAffinityInUse {
|
||||
if meta1.serviceAffinityInUse != meta2.serviceAffinityInUse {
|
||||
return fmt.Errorf("serviceAffinityInUses are not equal")
|
||||
}
|
||||
if len(meta1.podPorts) != len(meta2.podPorts) {
|
||||
@ -1697,15 +1697,6 @@ var (
|
||||
softSpread = v1.ScheduleAnyway
|
||||
)
|
||||
|
||||
func newPairSet(kv ...string) topologyPairSet {
|
||||
result := make(topologyPairSet)
|
||||
for i := 0; i < len(kv); i += 2 {
|
||||
pair := topologyPair{key: kv[i], value: kv[i+1]}
|
||||
result[pair] = struct{}{}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// sortCriticalPaths is only served for testing purpose.
|
||||
func (c *podSpreadCache) sortCriticalPaths() {
|
||||
for _, paths := range c.tpKeyToCriticalPaths {
|
||||
|
@ -493,7 +493,4 @@ func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
hardSpread = v1.DoNotSchedule
|
||||
softSpread = v1.ScheduleAnyway
|
||||
)
|
||||
var softSpread = v1.ScheduleAnyway
|
||||
|
@ -145,7 +145,7 @@ type UtilizationShapePoint struct {
|
||||
// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments.
|
||||
type ResourceSpec struct {
|
||||
// Name of the resource to be managed by RequestedToCapacityRatio function.
|
||||
Name apiv1.ResourceName `json:"name,casttype=ResourceName"`
|
||||
Name apiv1.ResourceName `json:"name"`
|
||||
// Weight of the resource.
|
||||
Weight int64 `json:"weight,omitempty"`
|
||||
}
|
||||
@ -154,7 +154,7 @@ type ResourceSpec struct {
|
||||
// managed by an extender.
|
||||
type ExtenderManagedResource struct {
|
||||
// Name is the extended resource name.
|
||||
Name apiv1.ResourceName `json:"name,casttype=ResourceName"`
|
||||
Name apiv1.ResourceName `json:"name"`
|
||||
// IgnoredByScheduler indicates whether kube-scheduler should ignore this
|
||||
// resource when applying predicates.
|
||||
IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"`
|
||||
|
@ -35,7 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
var negPriority, lowPriority, midPriority, highPriority, veryHighPriority = int32(-100), int32(0), int32(100), int32(1000), int32(10000)
|
||||
var lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000)
|
||||
var mediumPriority = (lowPriority + highPriority) / 2
|
||||
var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -352,6 +352,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
||||
|
||||
waitPodExpireChan := make(chan struct{})
|
||||
timeout := make(chan struct{})
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
@ -361,7 +362,8 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
||||
}
|
||||
pods, err := scache.List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("cache.List failed: %v", err)
|
||||
errChan <- fmt.Errorf("cache.List failed: %v", err)
|
||||
return
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
close(waitPodExpireChan)
|
||||
@ -372,6 +374,8 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
||||
}()
|
||||
// waiting for the assumed pod to expire
|
||||
select {
|
||||
case err := <-errChan:
|
||||
t.Fatal(err)
|
||||
case <-waitPodExpireChan:
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
close(timeout)
|
||||
|
Loading…
Reference in New Issue
Block a user