mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Merge pull request #11108 from mesosphere/fix-10795
Fix races in mesos scheduler plugin test
This commit is contained in:
commit
c70d8d4c59
6
Godeps/Godeps.json
generated
6
Godeps/Godeps.json
generated
@ -496,15 +496,15 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Rev": "7e4a149930b09fe4c2b134c50ce637457ba6e966"
|
||||
"Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/mock",
|
||||
"Rev": "7e4a149930b09fe4c2b134c50ce637457ba6e966"
|
||||
"Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/require",
|
||||
"Rev": "7e4a149930b09fe4c2b134c50ce637457ba6e966"
|
||||
"Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gocapability/capability",
|
||||
|
7
Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go
generated
vendored
7
Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go
generated
vendored
@ -84,6 +84,11 @@ func CallerInfo() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is a huge edge case, but it will panic if this is the case, see #180
|
||||
if file == "<autogenerated>" {
|
||||
break
|
||||
}
|
||||
|
||||
parts := strings.Split(file, "/")
|
||||
dir := parts[len(parts)-2]
|
||||
file = parts[len(parts)-1]
|
||||
@ -296,7 +301,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
}
|
||||
|
||||
if !success {
|
||||
Fail(t, "Expected not to be nil.", msgAndArgs...)
|
||||
Fail(t, "Expected value not to be nil.", msgAndArgs...)
|
||||
}
|
||||
|
||||
return success
|
||||
|
22
Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go
generated
vendored
22
Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go
generated
vendored
@ -2,6 +2,7 @@ package assert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
@ -789,3 +790,24 @@ func TestRegexp(t *testing.T) {
|
||||
True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str))
|
||||
}
|
||||
}
|
||||
|
||||
func testAutogeneratedFunction() {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
panic("did not panic")
|
||||
}
|
||||
CallerInfo()
|
||||
}()
|
||||
t := struct {
|
||||
io.Closer
|
||||
}{}
|
||||
var c io.Closer
|
||||
c = t
|
||||
c.Close()
|
||||
}
|
||||
|
||||
func TestCallerInfoWithAutogeneratedFunctions(t *testing.T) {
|
||||
NotPanics(t, func() {
|
||||
testAutogeneratedFunction()
|
||||
})
|
||||
}
|
||||
|
@ -370,7 +370,6 @@ func TestPlugin_New(t *testing.T) {
|
||||
// and play through the whole life cycle of the plugin while creating pods, deleting
|
||||
// and failing them.
|
||||
func TestPlugin_LifeCycle(t *testing.T) {
|
||||
t.Skip("disabled due to flakiness; see #10795")
|
||||
assert := &EventAssertions{*assert.New(t)}
|
||||
|
||||
// create a fake pod watch. We use that below to submit new pods to the scheduler
|
||||
@ -440,6 +439,8 @@ func TestPlugin_LifeCycle(t *testing.T) {
|
||||
}
|
||||
mockDriver.On("LaunchTasks", mAny("[]*mesosproto.OfferID"), mAny("[]*mesosproto.TaskInfo"), mAny("*mesosproto.Filters")).
|
||||
Return(mesos.Status_DRIVER_RUNNING, nil).Run(launchTasksCalledFunc)
|
||||
mockDriver.On("DeclineOffer", mAny("*mesosproto.OfferID"), mAny("*mesosproto.Filters")).
|
||||
Return(mesos.Status_DRIVER_RUNNING, nil)
|
||||
|
||||
// elect master with mock driver
|
||||
driverFactory := ha.DriverFactory(func() (bindings.SchedulerDriver, error) {
|
||||
@ -497,14 +498,22 @@ func TestPlugin_LifeCycle(t *testing.T) {
|
||||
}
|
||||
|
||||
// start another pod
|
||||
podNum := 1
|
||||
startPod := func(offers []*mesos.Offer) (*api.Pod, *mesos.TaskInfo) {
|
||||
podNum := 2
|
||||
startPod := func() (*api.Pod, *mesos.TaskInfo, *mesos.Offer) {
|
||||
podNum = podNum + 1
|
||||
|
||||
// create pod and matching offer
|
||||
// create pod
|
||||
pod := NewTestPod(podNum)
|
||||
podListWatch.Add(pod, true) // notify watchers
|
||||
|
||||
// wait for failedScheduling event because there is no offer
|
||||
assert.EventWithReason(eventObserver, "failedScheduling", "failedScheduling event not received")
|
||||
|
||||
// supply a matching offer
|
||||
offers := []*mesos.Offer{NewTestOffer(podNum)}
|
||||
testScheduler.ResourceOffers(mockDriver, offers)
|
||||
|
||||
// and wait to get scheduled
|
||||
assert.EventWithReason(eventObserver, "scheduled")
|
||||
|
||||
// wait for driver.launchTasks call
|
||||
@ -512,15 +521,15 @@ func TestPlugin_LifeCycle(t *testing.T) {
|
||||
case launchedTask := <-launchedTasks:
|
||||
testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask, mesos.TaskState_TASK_STAGING))
|
||||
testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask, mesos.TaskState_TASK_RUNNING))
|
||||
return pod, launchedTask
|
||||
return pod, launchedTask, offers[0]
|
||||
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for launchTasks")
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
pod, launchedTask := startPod(offers1)
|
||||
pod, launchedTask, _ := startPod()
|
||||
|
||||
// mock drvier.KillTask, should be invoked when a pod is deleted
|
||||
mockDriver.On("KillTask", mAny("*mesosproto.TaskID")).Return(mesos.Status_DRIVER_RUNNING, nil).Run(func(args mock.Arguments) {
|
||||
@ -561,23 +570,23 @@ func TestPlugin_LifeCycle(t *testing.T) {
|
||||
}
|
||||
|
||||
// 1. with pod deleted from the apiserver
|
||||
pod, launchedTask = startPod(offers1)
|
||||
pod, launchedTask, _ = startPod()
|
||||
podListWatch.Delete(pod, false) // not notifying the watchers
|
||||
failPodFromExecutor(launchedTask)
|
||||
|
||||
// 2. with pod still on the apiserver, not bound
|
||||
pod, launchedTask = startPod(offers1)
|
||||
pod, launchedTask, _ = startPod()
|
||||
failPodFromExecutor(launchedTask)
|
||||
|
||||
// 3. with pod still on the apiserver, bound i.e. host!=""
|
||||
pod, launchedTask = startPod(offers1)
|
||||
pod.Spec.NodeName = *offers1[0].Hostname
|
||||
pod, launchedTask, usedOffer := startPod()
|
||||
pod.Spec.NodeName = *usedOffer.Hostname
|
||||
podListWatch.Modify(pod, false) // not notifying the watchers
|
||||
failPodFromExecutor(launchedTask)
|
||||
|
||||
// 4. with pod still on the apiserver, bound i.e. host!="", notified via ListWatch
|
||||
pod, launchedTask = startPod(offers1)
|
||||
pod.Spec.NodeName = *offers1[0].Hostname
|
||||
pod, launchedTask, usedOffer = startPod()
|
||||
pod.Spec.NodeName = *usedOffer.Hostname
|
||||
podListWatch.Modify(pod, true) // notifying the watchers
|
||||
time.Sleep(time.Second / 2)
|
||||
failPodFromExecutor(launchedTask)
|
||||
|
Loading…
Reference in New Issue
Block a user