fix go vet's from skipped tests

This commit is contained in:
Mike Danese 2017-04-27 09:58:00 -07:00
parent 21617a60ae
commit 0c80a7aa40
4 changed files with 13 additions and 7 deletions

View File

@ -561,7 +561,7 @@ func TestSingleWatch(t *testing.T) {
func TestMultiWatch(t *testing.T) { func TestMultiWatch(t *testing.T) {
// Disable this test as long as it demonstrates a problem. // Disable this test as long as it demonstrates a problem.
// TODO: Reenable this test when we get #6059 resolved. // TODO: Reenable this test when we get #6059 resolved.
return t.Skip()
const watcherCount = 50 const watcherCount = 50
rt.GOMAXPROCS(watcherCount) rt.GOMAXPROCS(watcherCount)

View File

@ -161,7 +161,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
} }
if atomic.LoadUint32(&numberPodsEvicted) != numOfEvictions { if atomic.LoadUint32(&numberPodsEvicted) != numOfEvictions {
t.Fatalf("fewer number of successful evictions than expected :", numberPodsEvicted) t.Fatalf("fewer number of successful evictions than expected : %d", numberPodsEvicted)
} }
} }

View File

@ -230,7 +230,10 @@ func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostP
if node.Name == "machine2" { if node.Name == "machine2" {
score = 10 score = 10
} }
result = append(result, schedulerapi.HostPriority{node.Name, score}) result = append(result, schedulerapi.HostPriority{
Host: node.Name,
Score: score,
})
} }
return &result, nil return &result, nil
} }
@ -242,7 +245,10 @@ func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostP
if node.Name == "machine3" { if node.Name == "machine3" {
score = 10 score = 10
} }
result = append(result, schedulerapi.HostPriority{node.Name, score}) result = append(result, schedulerapi.HostPriority{
Host: node.Name,
Score: score,
})
} }
return &result, nil return &result, nil
} }
@ -353,7 +359,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
Type: v1.NodeReady, Type: v1.NodeReady,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: fmt.Sprintf("schedulable condition"), Reason: fmt.Sprintf("schedulable condition"),
LastHeartbeatTime: metav1.Time{time.Now()}, LastHeartbeatTime: metav1.Time{Time: time.Now()},
} }
node := &v1.Node{ node := &v1.Node{
Spec: v1.NodeSpec{Unschedulable: false}, Spec: v1.NodeSpec{Unschedulable: false},

View File

@ -323,13 +323,13 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
Type: v1.NodeReady, Type: v1.NodeReady,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: fmt.Sprintf("schedulable condition"), Reason: fmt.Sprintf("schedulable condition"),
LastHeartbeatTime: metav1.Time{time.Now()}, LastHeartbeatTime: metav1.Time{Time: time.Now()},
} }
badCondition := v1.NodeCondition{ badCondition := v1.NodeCondition{
Type: v1.NodeReady, Type: v1.NodeReady,
Status: v1.ConditionUnknown, Status: v1.ConditionUnknown,
Reason: fmt.Sprintf("unschedulable condition"), Reason: fmt.Sprintf("unschedulable condition"),
LastHeartbeatTime: metav1.Time{time.Now()}, LastHeartbeatTime: metav1.Time{Time: time.Now()},
} }
// Create a new schedulable node, since we're first going to apply // Create a new schedulable node, since we're first going to apply
// the unschedulable condition and verify that pods aren't scheduled. // the unschedulable condition and verify that pods aren't scheduled.