Fix golint errors in pkg/scheduler based on golint check

This commit is contained in:
tossmilestone
2018-02-08 14:42:19 +08:00
parent a4e49d19f9
commit 3fdacfead5
51 changed files with 512 additions and 304 deletions

View File

@@ -28,7 +28,7 @@ import (
)
func TestImageLocalityPriority(t *testing.T) {
test_40_250 := v1.PodSpec{
test40250 := v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/40",
@@ -39,7 +39,7 @@ func TestImageLocalityPriority(t *testing.T) {
},
}
test_40_140 := v1.PodSpec{
test40140 := v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/40",
@@ -50,7 +50,7 @@ func TestImageLocalityPriority(t *testing.T) {
},
}
test_min_max := v1.PodSpec{
testMinMax := v1.PodSpec{
Containers: []v1.Container{
{
Image: "gcr.io/10",
@@ -61,7 +61,7 @@ func TestImageLocalityPriority(t *testing.T) {
},
}
node_40_140_2000 := v1.NodeStatus{
node401402000 := v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
@@ -87,7 +87,7 @@ func TestImageLocalityPriority(t *testing.T) {
},
}
node_250_10 := v1.NodeStatus{
node25010 := v1.NodeStatus{
Images: []v1.ContainerImage{
{
Names: []string{
@@ -122,8 +122,8 @@ func TestImageLocalityPriority(t *testing.T) {
// Node2
// Image: gcr.io/250 250MB
// Score: (250M-23M)/97.7M + 1 = 3
pod: &v1.Pod{Spec: test_40_250},
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
pod: &v1.Pod{Spec: test40250},
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
test: "two images spread on two nodes, prefer the larger image one",
},
@@ -137,8 +137,8 @@ func TestImageLocalityPriority(t *testing.T) {
// Node2
// Image: not present
// Score: 0
pod: &v1.Pod{Spec: test_40_140},
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
pod: &v1.Pod{Spec: test40140},
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
test: "two images on one node, prefer this node",
},
@@ -152,8 +152,8 @@ func TestImageLocalityPriority(t *testing.T) {
// Node2
// Image: gcr.io/10 10MB
// Score: 10 < min score = 0
pod: &v1.Pod{Spec: test_min_max},
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
pod: &v1.Pod{Spec: testMinMax},
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
test: "if exceed limit, use limit",
},

View File

@@ -32,6 +32,7 @@ import (
"github.com/golang/glog"
)
// InterPodAffinity contains information to calculate inter pod affinity.
type InterPodAffinity struct {
info predicates.NodeInfo
nodeLister algorithm.NodeLister
@@ -39,6 +40,7 @@ type InterPodAffinity struct {
hardPodAffinityWeight int32
}
// NewInterPodAffinityPriority creates an InterPodAffinity.
func NewInterPodAffinityPriority(
info predicates.NodeInfo,
nodeLister algorithm.NodeLister,

View File

@@ -24,7 +24,7 @@ import (
var (
leastResourcePriority = &ResourceAllocationPriority{"LeastResourceAllocation", leastResourceScorer}
// LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.
// LeastRequestedPriorityMap is a priority function that favors nodes with fewer requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
//

View File

@@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// PriorityMetadataFactory is a factory to produce PriorityMetadata.
type PriorityMetadataFactory struct {
serviceLister algorithm.ServiceLister
controllerLister algorithm.ControllerLister
@@ -32,6 +33,7 @@ type PriorityMetadataFactory struct {
statefulSetLister algorithm.StatefulSetLister
}
// NewPriorityMetadataFactory creates a PriorityMetadataFactory.
func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.PriorityMetadataProducer {
factory := &PriorityMetadataFactory{
serviceLister: serviceLister,

View File

@@ -32,7 +32,7 @@ import (
func TestPriorityMetadata(t *testing.T) {
nonZeroReqs := &schedulercache.Resource{}
nonZeroReqs.MilliCPU = priorityutil.DefaultMilliCpuRequest
nonZeroReqs.MilliCPU = priorityutil.DefaultMilliCPURequest
nonZeroReqs.Memory = priorityutil.DefaultMemoryRequest
specifiedReqs := &schedulercache.Resource{}

View File

@@ -24,7 +24,7 @@ import (
var (
mostResourcePriority = &ResourceAllocationPriority{"MostResourceAllocation", mostResourceScorer}
// MostRequestedPriority is a priority function that favors nodes with most requested resources.
// MostRequestedPriorityMap is a priority function that favors nodes with most requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the maximum of the average of the fraction of requested to capacity.
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2

View File

@@ -83,7 +83,7 @@ func TestMostRequested(t *testing.T) {
},
},
}
bigCpuAndMemory := v1.PodSpec{
bigCPUAndMemory := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
@@ -201,7 +201,7 @@ func TestMostRequested(t *testing.T) {
Memory Score: 9000 > 8000 return 0
Node2 Score: (5 + 0) / 2 = 2
*/
pod: &v1.Pod{Spec: bigCpuAndMemory},
pod: &v1.Pod{Spec: bigCPUAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}},
test: "resources requested with more than the node, pods scheduled with resources",

View File

@@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// CalculateNodeAffinityPriority prioritizes nodes according to node affinity scheduling preferences
// CalculateNodeAffinityPriorityMap prioritizes nodes according to node affinity scheduling preferences
// indicated in PreferredDuringSchedulingIgnoredDuringExecution. Each time a node match a preferredSchedulingTerm,
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
@@ -74,4 +74,5 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
}, nil
}
// CalculateNodeAffinityPriorityReduce is a reduce function for node affinity priority calculation.
var CalculateNodeAffinityPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, false)

View File

@@ -26,11 +26,13 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// NodeLabelPrioritizer contains information to calculate node label priority.
type NodeLabelPrioritizer struct {
label string
presence bool
}
// NewNodeLabelPriority creates a NodeLabelPrioritizer.
func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
labelPrioritizer := &NodeLabelPrioritizer{
label: label,
@@ -39,7 +41,7 @@ func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFun
return labelPrioritizer.CalculateNodeLabelPriorityMap, nil
}
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
// CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value.
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
// If presence is false, prioritizes nodes that do not have the specified label.
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {

View File

@@ -27,6 +27,8 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation
// "scheduler.alpha.kubernetes.io/preferAvoidPods".
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node()
if node == nil {

View File

@@ -26,11 +26,14 @@ import (
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
)
// ResourceAllocationPriority contains information to calculate resource allocation priority.
type ResourceAllocationPriority struct {
Name string
scorer func(requested, allocable *schedulercache.Resource) int64
}
// PriorityMap priorities nodes according to the resource allocations on the node.
// It will use `scorer` function to calculate the score.
func (r *ResourceAllocationPriority) PriorityMap(
pod *v1.Pod,
meta interface{},

View File

@@ -33,6 +33,7 @@ import (
// TODO: Any way to justify this weighting?
const zoneWeighting float64 = 2.0 / 3.0
// SelectorSpread contains information to calculate selector spread priority.
type SelectorSpread struct {
serviceLister algorithm.ServiceLister
controllerLister algorithm.ControllerLister
@@ -40,6 +41,7 @@ type SelectorSpread struct {
statefulSetLister algorithm.StatefulSetLister
}
// NewSelectorSpreadPriority creates a SelectorSpread.
func NewSelectorSpreadPriority(
serviceLister algorithm.ServiceLister,
controllerLister algorithm.ControllerLister,
@@ -125,16 +127,16 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
if result[i].Score > maxCountByNodeName {
maxCountByNodeName = result[i].Score
}
zoneId := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
if zoneId == "" {
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
if zoneID == "" {
continue
}
countsByZone[zoneId] += result[i].Score
countsByZone[zoneID] += result[i].Score
}
for zoneId := range countsByZone {
if countsByZone[zoneId] > maxCountByZone {
maxCountByZone = countsByZone[zoneId]
for zoneID := range countsByZone {
if countsByZone[zoneID] > maxCountByZone {
maxCountByZone = countsByZone[zoneID]
}
}
@@ -152,11 +154,11 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
}
// If there is zone information present, incorporate it
if haveZones {
zoneId := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
if zoneId != "" {
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
if zoneID != "" {
zoneScore := MaxPriorityFloat64
if maxCountByZone > 0 {
zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneId]) / maxCountByZoneFloat64)
zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64)
}
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
}
@@ -171,12 +173,14 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
return nil
}
// ServiceAntiAffinity contains information to calculate service anti-affinity priority.
type ServiceAntiAffinity struct {
podLister algorithm.PodLister
serviceLister algorithm.ServiceLister
label string
}
// NewServiceAntiAffinityPriority creates a ServiceAntiAffinity.
func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, label string) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
antiAffinity := &ServiceAntiAffinity{
podLister: podLister,

View File

@@ -26,7 +26,10 @@ import "k8s.io/api/core/v1"
// consuming no resources whatsoever. We chose these values to be similar to the
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
// As described in #11713, we use request instead of limit to deal with resource requirements.
const DefaultMilliCpuRequest int64 = 100 // 0.1 core
// DefaultMilliCPURequest defines default milli cpu request number.
const DefaultMilliCPURequest int64 = 100 // 0.1 core
// DefaultMemoryRequest defines default memory request size.
const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
// GetNonzeroRequests returns the default resource request if none is found or what is provided on the request
@@ -36,7 +39,7 @@ func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
var outMilliCPU, outMemory int64
// Override if un-set, but not if explicitly set to zero
if _, found := (*requests)[v1.ResourceCPU]; !found {
outMilliCPU = DefaultMilliCpuRequest
outMilliCPU = DefaultMilliCPURequest
} else {
outMilliCPU = requests.Cpu().MilliValue()
}

View File

@@ -35,7 +35,7 @@ func TestGetNonzeroRequests(t *testing.T) {
{
"cpu_and_memory_not_found",
v1.ResourceList{},
DefaultMilliCpuRequest,
DefaultMilliCPURequest,
DefaultMemoryRequest,
},
{
@@ -51,7 +51,7 @@ func TestGetNonzeroRequests(t *testing.T) {
v1.ResourceList{
v1.ResourceMemory: resource.MustParse("400Mi"),
},
DefaultMilliCpuRequest,
DefaultMilliCPURequest,
400 * 1024 * 1024,
},
{

View File

@@ -70,6 +70,7 @@ func NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool {
return false
}
// Topologies contains topologies information of nodes.
type Topologies struct {
DefaultKeys []string
}

View File

@@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetControllerRef gets pod's owner controller reference from a pod object.
func GetControllerRef(pod *v1.Pod) *metav1.OwnerReference {
if len(pod.OwnerReferences) == 0 {
return nil