Merge pull request #61920 from resouer/fix-61916

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add namespace name into e2e event verify function

**What this PR does / why we need it**:

Scheduler now events assigned msg with `ns/podname`, but the e2e is still expect `podname` only.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #61916

**Special notes for your reviewer**:

cc @Random-Liu @bsalamat 
@kubernetes/sig-scheduling-pr-reviews 

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-03-30 00:15:37 -07:00 committed by GitHub
commit f8d37cdaf1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 14 additions and 14 deletions

View File

@ -91,7 +91,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
WaitForSchedulerAfterAction(f, func() error { WaitForSchedulerAfterAction(f, func() error {
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false) err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
return err return err
}, rcName, false) }, ns, rcName, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName) defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName)
// the first replica pod is scheduled, and the second pod will be rejected. // the first replica pod is scheduled, and the second pod will be rejected.
verifyResult(cs, 1, 1, ns) verifyResult(cs, 1, 1, ns)
@ -157,7 +157,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
WaitForSchedulerAfterAction(f, func() error { WaitForSchedulerAfterAction(f, func() error {
err := framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false) err := framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
return err return err
}, affinityRCName, false) }, ns, affinityRCName, false)
// and this new pod should be rejected since node label has been updated // and this new pod should be rejected since node label has been updated
verifyReplicasResult(cs, replica, 1, ns, affinityRCName) verifyReplicasResult(cs, replica, 1, ns, affinityRCName)
}) })
@ -222,7 +222,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
WaitForSchedulerAfterAction(f, func() error { WaitForSchedulerAfterAction(f, func() error {
_, err := cs.CoreV1().ReplicationControllers(ns).Create(rc) _, err := cs.CoreV1().ReplicationControllers(ns).Create(rc)
return err return err
}, labelRCName, false) }, ns, labelRCName, false)
// these two replicas should all be rejected since podAntiAffinity says it they anit-affinity with pod {"service": "S1"} // these two replicas should all be rejected since podAntiAffinity says it they anit-affinity with pod {"service": "S1"}
verifyReplicasResult(cs, 0, replica, ns, labelRCName) verifyReplicasResult(cs, 0, replica, ns, labelRCName)

View File

@ -23,12 +23,12 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
) )
func scheduleSuccessEvent(podName, nodeName string) func(*v1.Event) bool { func scheduleSuccessEvent(ns, podName, nodeName string) func(*v1.Event) bool {
return func(e *v1.Event) bool { return func(e *v1.Event) bool {
return e.Type == v1.EventTypeNormal && return e.Type == v1.EventTypeNormal &&
e.Reason == "Scheduled" && e.Reason == "Scheduled" &&
strings.HasPrefix(e.Name, podName) && strings.HasPrefix(e.Name, podName) &&
strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v to %v", podName, nodeName)) strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v/%v to %v", ns, podName, nodeName))
} }
} }

View File

@ -147,7 +147,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{ WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
Name: podName, Name: podName,
Labels: map[string]string{"name": "additional"}, Labels: map[string]string{"name": "additional"},
}), podName, false) }), ns, podName, false)
verifyResult(cs, podsNeededForSaturation, 1, ns) verifyResult(cs, podsNeededForSaturation, 1, ns)
}) })
@ -222,7 +222,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
} }
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podName, false) WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, podsNeededForSaturation, 1, ns) verifyResult(cs, podsNeededForSaturation, 1, ns)
}) })
@ -337,7 +337,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}, },
}, },
} }
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podName, false) WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, len(fillerPods), 1, ns) verifyResult(cs, len(fillerPods), 1, ns)
}) })
@ -362,7 +362,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}, },
} }
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podName, false) WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, 0, 1, ns) verifyResult(cs, 0, 1, ns)
}) })
@ -461,7 +461,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}, },
Labels: map[string]string{"name": "restricted"}, Labels: map[string]string{"name": "restricted"},
} }
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podName, false) WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, 0, 1, ns) verifyResult(cs, 0, 1, ns)
}) })
@ -585,11 +585,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
NodeSelector: map[string]string{labelKey: labelValue}, NodeSelector: map[string]string{labelKey: labelValue},
} }
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podNameNoTolerations, false) WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podNameNoTolerations, false)
verifyResult(cs, 0, 1, ns) verifyResult(cs, 0, 1, ns)
By("Removing taint off the node") By("Removing taint off the node")
WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), podNameNoTolerations, true) WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), ns, podNameNoTolerations, true)
verifyResult(cs, 1, 0, ns) verifyResult(cs, 1, 0, ns)
}) })
@ -736,10 +736,10 @@ func createPausePodAction(f *framework.Framework, conf pausePodConfig) common.Ac
// WaitForSchedulerAfterAction performs the provided action and then waits for // WaitForSchedulerAfterAction performs the provided action and then waits for
// scheduler to act on the given pod. // scheduler to act on the given pod.
func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, podName string, expectSuccess bool) { func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, ns, podName string, expectSuccess bool) {
predicate := scheduleFailureEvent(podName) predicate := scheduleFailureEvent(podName)
if expectSuccess { if expectSuccess {
predicate = scheduleSuccessEvent(podName, "" /* any node */) predicate = scheduleSuccessEvent(ns, podName, "" /* any node */)
} }
success, err := common.ObserveEventAfterAction(f, predicate, action) success, err := common.ObserveEventAfterAction(f, predicate, action)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())