Merge pull request #28490 from wojtek-t/integration_namespace_3

Automatic merge from submit-queue

Migrate most of the remaining integration tests to run in dedicated namespace (when possible).
This commit is contained in:
k8s-merge-robot 2016-07-05 08:35:51 -07:00 committed by GitHub
commit f9a2de7248
11 changed files with 315 additions and 213 deletions

View File

@ -30,6 +30,7 @@ type NodeInterface interface {
Create(node *api.Node) (*api.Node, error) Create(node *api.Node) (*api.Node, error)
List(opts api.ListOptions) (*api.NodeList, error) List(opts api.ListOptions) (*api.NodeList, error)
Delete(name string) error Delete(name string) error
DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error
Update(*api.Node) (*api.Node, error) Update(*api.Node) (*api.Node, error)
UpdateStatus(*api.Node) (*api.Node, error) UpdateStatus(*api.Node) (*api.Node, error)
Watch(opts api.ListOptions) (watch.Interface, error) Watch(opts api.ListOptions) (watch.Interface, error)
@ -76,6 +77,16 @@ func (c *nodes) Delete(name string) error {
return c.r.Delete().Resource(c.resourceName()).Name(name).Do().Error() return c.r.Delete().Resource(c.resourceName()).Name(name).Do().Error()
} }
// DeleteCollection deletes a collection of nodes.
func (c *nodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
return c.r.Delete().
Resource(c.resourceName()).
VersionedParams(&listOptions, api.ParameterCodec).
Body(options).
Do().
Error()
}
// Update updates an existing node. // Update updates an existing node.
func (c *nodes) Update(node *api.Node) (*api.Node, error) { func (c *nodes) Update(node *api.Node) (*api.Node, error) {
result := &api.Node{} result := &api.Node{}

View File

@ -68,6 +68,11 @@ func (c *FakeNodes) Delete(name string) error {
return err return err
} }
func (c *FakeNodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
_, err := c.Fake.Invokes(NewRootDeleteCollectionAction("nodes", listOptions), &api.NodeList{})
return err
}
func (c *FakeNodes) Watch(opts api.ListOptions) (watch.Interface, error) { func (c *FakeNodes) Watch(opts api.ListOptions) (watch.Interface, error) {
return c.Fake.InvokesWatch(NewRootWatchAction("nodes", opts)) return c.Fake.InvokesWatch(NewRootWatchAction("nodes", opts))
} }

View File

@ -185,12 +185,12 @@ func machine_3_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.Hos
} }
func TestSchedulerExtender(t *testing.T) { func TestSchedulerExtender(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
extender1 := &Extender{ extender1 := &Extender{
@ -240,15 +240,19 @@ func TestSchedulerExtender(t *testing.T) {
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(restClient.Events("")) eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name))
scheduler.New(schedulerConfig).Run() scheduler.New(schedulerConfig).Run()
defer close(schedulerConfig.StopEverything) defer close(schedulerConfig.StopEverything)
DoTestPodScheduling(t, restClient) DoTestPodScheduling(ns, t, restClient)
} }
func DoTestPodScheduling(t *testing.T, restClient *client.Client) { func DoTestPodScheduling(ns *api.Namespace, t *testing.T, restClient *client.Client) {
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{})
goodCondition := api.NodeCondition{ goodCondition := api.NodeCondition{
Type: api.NodeReady, Type: api.NodeReady,
Status: api.ConditionTrue, Status: api.ConditionTrue,
@ -279,7 +283,7 @@ func DoTestPodScheduling(t *testing.T, restClient *client.Client) {
}, },
} }
myPod, err := restClient.Pods(api.NamespaceDefault).Create(pod) myPod, err := restClient.Pods(ns.Name).Create(pod)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
@ -289,7 +293,7 @@ func DoTestPodScheduling(t *testing.T, restClient *client.Client) {
t.Fatalf("Failed to schedule pod: %v", err) t.Fatalf("Failed to schedule pod: %v", err)
} }
if myPod, err := restClient.Pods(api.NamespaceDefault).Get(myPod.Name); err != nil { if myPod, err := restClient.Pods(ns.Name).Get(myPod.Name); err != nil {
t.Fatalf("Failed to get pod: %v", err) t.Fatalf("Failed to get pod: %v", err)
} else if myPod.Spec.NodeName != "machine3" { } else if myPod.Spec.NodeName != "machine3" {
t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName) t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName)

View File

@ -64,9 +64,6 @@ const (
// Rc manifest used to create pods for benchmarks. // Rc manifest used to create pods for benchmarks.
// TODO: Convert this to a full path? // TODO: Convert this to a full path?
TestRCManifest = "benchmark-controller.json" TestRCManifest = "benchmark-controller.json"
// Test Namspace, for pods and rcs.
TestNS = "test"
) )
// MasterComponents is a control struct for all master components started via NewMasterComponents. // MasterComponents is a control struct for all master components started via NewMasterComponents.
@ -326,23 +323,27 @@ func StartRC(controller *api.ReplicationController, restClient *client.Client) (
return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient) return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
} }
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up // StartPods check for numPods in namespace. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods. // a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error { func StartPods(namespace string, numPods int, host string, restClient *client.Client) error {
start := time.Now() start := time.Now()
defer func() { defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods) glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}() }()
hostField := fields.OneTermEqualSelector(api.PodHostField, host) hostField := fields.OneTermEqualSelector(api.PodHostField, host)
options := api.ListOptions{FieldSelector: hostField} options := api.ListOptions{FieldSelector: hostField}
pods, err := restClient.Pods(TestNS).List(options) pods, err := restClient.Pods(namespace).List(options)
if err != nil || len(pods.Items) == numPods { if err != nil || len(pods.Items) == numPods {
return err return err
} }
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods) glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest. // For the sake of simplicity, assume all pods in namespace have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest) controller := RCFromManifest(TestRCManifest)
// Overwrite namespace
controller.ObjectMeta.Namespace = namespace
controller.Spec.Template.ObjectMeta.Namespace = namespace
// Make the rc unique to the given host. // Make the rc unique to the given host.
controller.Spec.Replicas = int32(numPods) controller.Spec.Replicas = int32(numPods)
controller.Spec.Template.Spec.NodeName = host controller.Spec.Template.Spec.NodeName = host
@ -355,7 +356,7 @@ func StartPods(numPods int, host string, restClient *client.Client) error {
} else { } else {
// Delete the rc, otherwise when we restart master components for the next benchmark // Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager. // the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(TestNS).Delete(rc.Name) return restClient.ReplicationControllers(namespace).Delete(rc.Name)
} }
} }

View File

@ -20,7 +20,6 @@ package integration
import ( import (
"fmt" "fmt"
"net/http"
"net/http/httptest" "net/http/httptest"
"strconv" "strconv"
"strings" "strings"
@ -36,7 +35,6 @@ import (
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller/garbagecollector" "k8s.io/kubernetes/pkg/controller/garbagecollector"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/registry/generic/registry"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -59,7 +57,7 @@ const oneValidOwnerPodName = "test.pod.3"
const toBeDeletedRCName = "test.rc.1" const toBeDeletedRCName = "test.rc.1"
const remainingRCName = "test.rc.2" const remainingRCName = "test.rc.2"
func newPod(podName string, ownerReferences []v1.OwnerReference) *v1.Pod { func newPod(podName, podNamespace string, ownerReferences []v1.OwnerReference) *v1.Pod {
for i := 0; i < len(ownerReferences); i++ { for i := 0; i < len(ownerReferences); i++ {
if len(ownerReferences[i].Kind) == 0 { if len(ownerReferences[i].Kind) == 0 {
ownerReferences[i].Kind = "ReplicationController" ownerReferences[i].Kind = "ReplicationController"
@ -73,7 +71,7 @@ func newPod(podName string, ownerReferences []v1.OwnerReference) *v1.Pod {
}, },
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Namespace: framework.TestNS, Namespace: podNamespace,
OwnerReferences: ownerReferences, OwnerReferences: ownerReferences,
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
@ -87,14 +85,14 @@ func newPod(podName string, ownerReferences []v1.OwnerReference) *v1.Pod {
} }
} }
func newOwnerRC(name string) *v1.ReplicationController { func newOwnerRC(name, namespace string) *v1.ReplicationController {
return &v1.ReplicationController{ return &v1.ReplicationController{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "ReplicationController", Kind: "ReplicationController",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: framework.TestNS, Namespace: namespace,
Name: name, Name: name,
}, },
Spec: v1.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
@ -116,22 +114,10 @@ func newOwnerRC(name string) *v1.ReplicationController {
} }
} }
func setup(t *testing.T) (*garbagecollector.GarbageCollector, clientset.Interface) { func setup(t *testing.T) (*httptest.Server, *garbagecollector.GarbageCollector, clientset.Interface) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
var m *master.Master
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
// TODO: close the http server
masterConfig := framework.NewIntegrationTestMasterConfig() masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.EnableCoreControllers = false masterConfig.EnableCoreControllers = false
m, err := master.New(masterConfig) _, s := framework.RunAMaster(masterConfig)
if err != nil {
t.Fatalf("Error in bringing up the master: %v", err)
}
clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL}) clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
if err != nil { if err != nil {
@ -146,23 +132,28 @@ func setup(t *testing.T) (*garbagecollector.GarbageCollector, clientset.Interfac
if err != nil { if err != nil {
t.Fatalf("Failed to create garbage collector") t.Fatalf("Failed to create garbage collector")
} }
return gc, clientSet return s, gc, clientSet
} }
// This test simulates the cascading deletion. // This test simulates the cascading deletion.
func TestCascadingDeletion(t *testing.T) { func TestCascadingDeletion(t *testing.T) {
gc, clientSet := setup(t) s, gc, clientSet := setup(t)
defer s.Close()
ns := framework.CreateTestingNamespace("gc-cascading-deletion", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
oldEnableGarbageCollector := registry.EnableGarbageCollector oldEnableGarbageCollector := registry.EnableGarbageCollector
registry.EnableGarbageCollector = true registry.EnableGarbageCollector = true
defer func() { registry.EnableGarbageCollector = oldEnableGarbageCollector }() defer func() { registry.EnableGarbageCollector = oldEnableGarbageCollector }()
rcClient := clientSet.Core().ReplicationControllers(framework.TestNS) rcClient := clientSet.Core().ReplicationControllers(ns.Name)
podClient := clientSet.Core().Pods(framework.TestNS) podClient := clientSet.Core().Pods(ns.Name)
toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName)) toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name))
if err != nil { if err != nil {
t.Fatalf("Failed to create replication controller: %v", err) t.Fatalf("Failed to create replication controller: %v", err)
} }
remainingRC, err := rcClient.Create(newOwnerRC(remainingRCName)) remainingRC, err := rcClient.Create(newOwnerRC(remainingRCName, ns.Name))
if err != nil { if err != nil {
t.Fatalf("Failed to create replication controller: %v", err) t.Fatalf("Failed to create replication controller: %v", err)
} }
@ -176,14 +167,14 @@ func TestCascadingDeletion(t *testing.T) {
} }
// this pod should be cascadingly deleted. // this pod should be cascadingly deleted.
pod := newPod(garbageCollectedPodName, []v1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}}) pod := newPod(garbageCollectedPodName, ns.Name, []v1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}})
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
t.Fatalf("Failed to create Pod: %v", err) t.Fatalf("Failed to create Pod: %v", err)
} }
// this pod shouldn't be cascadingly deleted, because it has a valid reference. // this pod shouldn't be cascadingly deleted, because it has a valid reference.
pod = newPod(oneValidOwnerPodName, []v1.OwnerReference{ pod = newPod(oneValidOwnerPodName, ns.Name, []v1.OwnerReference{
{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}, {UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName},
{UID: remainingRC.ObjectMeta.UID, Name: remainingRCName}, {UID: remainingRC.ObjectMeta.UID, Name: remainingRCName},
}) })
@ -193,7 +184,7 @@ func TestCascadingDeletion(t *testing.T) {
} }
// this pod shouldn't be cascadingly deleted, because it doesn't have an owner. // this pod shouldn't be cascadingly deleted, because it doesn't have an owner.
pod = newPod(independentPodName, []v1.OwnerReference{}) pod = newPod(independentPodName, ns.Name, []v1.OwnerReference{})
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
t.Fatalf("Failed to create Pod: %v", err) t.Fatalf("Failed to create Pod: %v", err)
@ -253,13 +244,18 @@ func TestCascadingDeletion(t *testing.T) {
// This test simulates the case where an object is created with an owner that // This test simulates the case where an object is created with an owner that
// doesn't exist. It verifies the GC will delete such an object. // doesn't exist. It verifies the GC will delete such an object.
func TestCreateWithNonExistentOwner(t *testing.T) { func TestCreateWithNonExistentOwner(t *testing.T) {
gc, clientSet := setup(t) s, gc, clientSet := setup(t)
defer s.Close()
ns := framework.CreateTestingNamespace("gc-non-existing-owner", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
oldEnableGarbageCollector := registry.EnableGarbageCollector oldEnableGarbageCollector := registry.EnableGarbageCollector
registry.EnableGarbageCollector = true registry.EnableGarbageCollector = true
defer func() { registry.EnableGarbageCollector = oldEnableGarbageCollector }() defer func() { registry.EnableGarbageCollector = oldEnableGarbageCollector }()
podClient := clientSet.Core().Pods(framework.TestNS) podClient := clientSet.Core().Pods(ns.Name)
pod := newPod(garbageCollectedPodName, []v1.OwnerReference{{UID: "doesn't matter", Name: toBeDeletedRCName}}) pod := newPod(garbageCollectedPodName, ns.Name, []v1.OwnerReference{{UID: "doesn't matter", Name: toBeDeletedRCName}})
_, err := podClient.Create(pod) _, err := podClient.Create(pod)
if err != nil { if err != nil {
t.Fatalf("Failed to create Pod: %v", err) t.Fatalf("Failed to create Pod: %v", err)
@ -288,13 +284,13 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
} }
} }
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix string, initialFinalizers []string, options *api.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) { func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *api.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
defer wg.Done() defer wg.Done()
rcClient := clientSet.Core().ReplicationControllers(framework.TestNS) rcClient := clientSet.Core().ReplicationControllers(namespace)
podClient := clientSet.Core().Pods(framework.TestNS) podClient := clientSet.Core().Pods(namespace)
// create rc. // create rc.
rcName := "test.rc." + nameSuffix rcName := "test.rc." + nameSuffix
rc := newOwnerRC(rcName) rc := newOwnerRC(rcName, namespace)
rc.ObjectMeta.Finalizers = initialFinalizers rc.ObjectMeta.Finalizers = initialFinalizers
rc, err := rcClient.Create(rc) rc, err := rcClient.Create(rc)
if err != nil { if err != nil {
@ -305,7 +301,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
var podUIDs []types.UID var podUIDs []types.UID
for j := 0; j < 3; j++ { for j := 0; j < 3; j++ {
podName := "test.pod." + nameSuffix + "-" + strconv.Itoa(j) podName := "test.pod." + nameSuffix + "-" + strconv.Itoa(j)
pod := newPod(podName, []v1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}}) pod := newPod(podName, namespace, []v1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}})
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
t.Fatalf("Failed to create Pod: %v", err) t.Fatalf("Failed to create Pod: %v", err)
@ -325,9 +321,9 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
} }
} }
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, rcNum, podNum int) (bool, error) { func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
rcClient := clientSet.Core().ReplicationControllers(framework.TestNS) rcClient := clientSet.Core().ReplicationControllers(namespace)
podClient := clientSet.Core().Pods(framework.TestNS) podClient := clientSet.Core().Pods(namespace)
pods, err := podClient.List(api.ListOptions{}) pods, err := podClient.List(api.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err) return false, fmt.Errorf("Failed to list pods: %v", err)
@ -353,7 +349,12 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, rcNum,
// e2e tests that put more stress. // e2e tests that put more stress.
func TestStressingCascadingDeletion(t *testing.T) { func TestStressingCascadingDeletion(t *testing.T) {
t.Logf("starts garbage collector stress test") t.Logf("starts garbage collector stress test")
gc, clientSet := setup(t) s, gc, clientSet := setup(t)
defer s.Close()
ns := framework.CreateTestingNamespace("gc-stressing-cascading-deletion", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
oldEnableGarbageCollector := registry.EnableGarbageCollector oldEnableGarbageCollector := registry.EnableGarbageCollector
registry.EnableGarbageCollector = true registry.EnableGarbageCollector = true
defer func() { registry.EnableGarbageCollector = oldEnableGarbageCollector }() defer func() { registry.EnableGarbageCollector = oldEnableGarbageCollector }()
@ -367,13 +368,13 @@ func TestStressingCascadingDeletion(t *testing.T) {
rcUIDs := make(chan types.UID, collections*4) rcUIDs := make(chan types.UID, collections*4)
for i := 0; i < collections; i++ { for i := 0; i < collections; i++ {
// rc is created with empty finalizers, deleted with nil delete options, pods will be deleted // rc is created with empty finalizers, deleted with nil delete options, pods will be deleted
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), []string{}, nil, &wg, rcUIDs) go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, nil, &wg, rcUIDs)
// rc is created with the orphan finalizer, deleted with nil options, pods will remain. // rc is created with the orphan finalizer, deleted with nil options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), []string{api.FinalizerOrphan}, nil, &wg, rcUIDs) go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{api.FinalizerOrphan}, nil, &wg, rcUIDs)
// rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted. // rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted.
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), []string{api.FinalizerOrphan}, getNonOrphanOptions(), &wg, rcUIDs) go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{api.FinalizerOrphan}, getNonOrphanOptions(), &wg, rcUIDs)
// rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain. // rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection4-"+strconv.Itoa(i), []string{}, getOrphanOptions(), &wg, rcUIDs) go setupRCsPods(t, gc, clientSet, "collection4-"+strconv.Itoa(i), ns.Name, []string{}, getOrphanOptions(), &wg, rcUIDs)
} }
wg.Wait() wg.Wait()
t.Logf("all pods are created, all replications controllers are created then deleted") t.Logf("all pods are created, all replications controllers are created then deleted")
@ -390,14 +391,14 @@ func TestStressingCascadingDeletion(t *testing.T) {
podsInEachCollection := 3 podsInEachCollection := 3
// see the comments on the calls to setupRCsPods for details // see the comments on the calls to setupRCsPods for details
remainingGroups := 2 remainingGroups := 2
return verifyRemainingObjects(t, clientSet, 0, collections*podsInEachCollection*remainingGroups) return verifyRemainingObjects(t, clientSet, ns.Name, 0, collections*podsInEachCollection*remainingGroups)
}); err != nil { }); err != nil {
t.Fatal(err) t.Fatal(err)
} }
t.Logf("number of remaining replication controllers and pods are as expected") t.Logf("number of remaining replication controllers and pods are as expected")
// verify the remaining pods all have "orphan" in their names. // verify the remaining pods all have "orphan" in their names.
podClient := clientSet.Core().Pods(framework.TestNS) podClient := clientSet.Core().Pods(ns.Name)
pods, err := podClient.List(api.ListOptions{}) pods, err := podClient.List(api.ListOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -420,14 +421,19 @@ func TestStressingCascadingDeletion(t *testing.T) {
} }
func TestOrphaning(t *testing.T) { func TestOrphaning(t *testing.T) {
gc, clientSet := setup(t) s, gc, clientSet := setup(t)
defer s.Close()
ns := framework.CreateTestingNamespace("gc-orphaning", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
oldEnableGarbageCollector := registry.EnableGarbageCollector oldEnableGarbageCollector := registry.EnableGarbageCollector
registry.EnableGarbageCollector = true registry.EnableGarbageCollector = true
defer func() { registry.EnableGarbageCollector = oldEnableGarbageCollector }() defer func() { registry.EnableGarbageCollector = oldEnableGarbageCollector }()
podClient := clientSet.Core().Pods(framework.TestNS) podClient := clientSet.Core().Pods(ns.Name)
rcClient := clientSet.Core().ReplicationControllers(framework.TestNS) rcClient := clientSet.Core().ReplicationControllers(ns.Name)
// create the RC with the orphan finalizer set // create the RC with the orphan finalizer set
toBeDeletedRC := newOwnerRC(toBeDeletedRCName) toBeDeletedRC := newOwnerRC(toBeDeletedRCName, ns.Name)
toBeDeletedRC, err := rcClient.Create(toBeDeletedRC) toBeDeletedRC, err := rcClient.Create(toBeDeletedRC)
if err != nil { if err != nil {
t.Fatalf("Failed to create replication controller: %v", err) t.Fatalf("Failed to create replication controller: %v", err)
@ -438,7 +444,7 @@ func TestOrphaning(t *testing.T) {
podsNum := 3 podsNum := 3
for i := 0; i < podsNum; i++ { for i := 0; i < podsNum; i++ {
podName := garbageCollectedPodName + strconv.Itoa(i) podName := garbageCollectedPodName + strconv.Itoa(i)
pod := newPod(podName, []v1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}}) pod := newPod(podName, ns.Name, []v1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}})
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
t.Fatalf("Failed to create Pod: %v", err) t.Fatalf("Failed to create Pod: %v", err)

View File

@ -64,7 +64,6 @@ func init() {
Pods = *pods Pods = *pods
Workers = *workers Workers = *workers
Tasks = *tasks Tasks = *tasks
framework.DeleteAllEtcdKeys()
} }
// getPods returns the cmd line -pods or b.N if -pods wasn't specified. // getPods returns the cmd line -pods or b.N if -pods wasn't specified.
@ -98,7 +97,7 @@ func getIterations(bN int) int {
} }
// startPodsOnNodes creates numPods sharded across numNodes // startPodsOnNodes creates numPods sharded across numNodes
func startPodsOnNodes(numPods, numNodes int, restClient *client.Client) { func startPodsOnNodes(ns string, numPods, numNodes int, restClient *client.Client) {
podsPerNode := numPods / numNodes podsPerNode := numPods / numNodes
if podsPerNode < 1 { if podsPerNode < 1 {
podsPerNode = 1 podsPerNode = 1
@ -114,6 +113,9 @@ func BenchmarkPodList(b *testing.B) {
m := framework.NewMasterComponents(&framework.Config{nil, true, false, 250.0, 500}) m := framework.NewMasterComponents(&framework.Config{nil, true, false, 250.0, 500})
defer m.Stop(true, true) defer m.Stop(true, true)
ns := framework.CreateTestingNamespace("benchmark-pod-list", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
numPods, numTasks, iter := getPods(b.N), getTasks(b.N), getIterations(b.N) numPods, numTasks, iter := getPods(b.N), getTasks(b.N), getIterations(b.N)
podsPerNode := numPods / numTasks podsPerNode := numPods / numTasks
if podsPerNode < 1 { if podsPerNode < 1 {
@ -122,7 +124,7 @@ func BenchmarkPodList(b *testing.B) {
glog.Infof("Starting benchmark: b.N %d, pods %d, workers %d, podsPerNode %d", glog.Infof("Starting benchmark: b.N %d, pods %d, workers %d, podsPerNode %d",
b.N, numPods, numTasks, podsPerNode) b.N, numPods, numTasks, podsPerNode)
startPodsOnNodes(numPods, numTasks, m.RestClient) startPodsOnNodes(ns.Name, numPods, numTasks, m.RestClient)
// Stop the rc manager so it doesn't steal resources // Stop the rc manager so it doesn't steal resources
m.Stop(false, true) m.Stop(false, true)
@ -134,7 +136,7 @@ func BenchmarkPodList(b *testing.B) {
defer func() { defer func() {
glog.V(3).Infof("Worker %d: Node %v listing pods took %v", id, host, time.Since(now)) glog.V(3).Infof("Worker %d: Node %v listing pods took %v", id, host, time.Since(now))
}() }()
if pods, err := m.RestClient.Pods(framework.TestNS).List( if pods, err := m.RestClient.Pods(ns.Name).List(
labels.Everything(), labels.Everything(),
fields.OneTermEqualSelector(client.PodHost, host)); err != nil { fields.OneTermEqualSelector(client.PodHost, host)); err != nil {
return err return err
@ -153,13 +155,16 @@ func BenchmarkPodListEtcd(b *testing.B) {
m := framework.NewMasterComponents(&framework.Config{nil, true, false, 250.0, 500}) m := framework.NewMasterComponents(&framework.Config{nil, true, false, 250.0, 500})
defer m.Stop(true, true) defer m.Stop(true, true)
ns := framework.CreateTestingNamespace("benchmark-pod-list-etcd", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
numPods, numTasks, iter := getPods(b.N), getTasks(b.N), getIterations(b.N) numPods, numTasks, iter := getPods(b.N), getTasks(b.N), getIterations(b.N)
podsPerNode := numPods / numTasks podsPerNode := numPods / numTasks
if podsPerNode < 1 { if podsPerNode < 1 {
podsPerNode = 1 podsPerNode = 1
} }
startPodsOnNodes(numPods, numTasks, m.RestClient) startPodsOnNodes(ns.Name, numPods, numTasks, m.RestClient)
// Stop the rc manager so it doesn't steal resources // Stop the rc manager so it doesn't steal resources
m.Stop(false, true) m.Stop(false, true)
@ -173,7 +178,7 @@ func BenchmarkPodListEtcd(b *testing.B) {
defer func() { defer func() {
glog.V(3).Infof("Worker %d: listing pods took %v", id, time.Since(now)) glog.V(3).Infof("Worker %d: listing pods took %v", id, time.Since(now))
}() }()
pods, err := m.RestClient.Pods(framework.TestNS).List(labels.Everything(), fields.Everything()) pods, err := m.RestClient.Pods(ns.Name).List(labels.Everything(), fields.Everything())
if err != nil { if err != nil {
return err return err
} }

View File

@ -104,23 +104,27 @@ func testSleep() {
} }
func TestPersistentVolumeRecycler(t *testing.T) { func TestPersistentVolumeRecycler(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
glog.V(2).Infof("TestPersistentVolumeRecycler started") glog.V(2).Infof("TestPersistentVolumeRecycler started")
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
testClient, ctrl, watchPV, watchPVC := createClients(t, s) ns := framework.CreateTestingNamespace("pv-recycler", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, ctrl, watchPV, watchPVC := createClients(ns, t, s)
defer watchPV.Stop() defer watchPV.Stop()
defer watchPVC.Stop() defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
ctrl.Run() ctrl.Run()
defer ctrl.Stop() defer ctrl.Stop()
// This PV will be claimed, released, and recycled. // This PV will be claimed, released, and recycled.
pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRecycle) pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRecycle)
pvc := createPVC("fake-pvc-recycler", "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}) pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
_, err := testClient.PersistentVolumes().Create(pv) _, err := testClient.PersistentVolumes().Create(pv)
if err != nil { if err != nil {
@ -128,7 +132,7 @@ func TestPersistentVolumeRecycler(t *testing.T) {
} }
glog.V(2).Infof("TestPersistentVolumeRecycler pvc created") glog.V(2).Infof("TestPersistentVolumeRecycler pvc created")
_, err = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) _, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err) t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -137,11 +141,11 @@ func TestPersistentVolumeRecycler(t *testing.T) {
// wait until the controller pairs the volume and claim // wait until the controller pairs the volume and claim
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound) waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound)
glog.V(2).Infof("TestPersistentVolumeRecycler pv bound") glog.V(2).Infof("TestPersistentVolumeRecycler pv bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, watchPVC, api.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
glog.V(2).Infof("TestPersistentVolumeRecycler pvc bound") glog.V(2).Infof("TestPersistentVolumeRecycler pvc bound")
// deleting a claim releases the volume, after which it can be recycled // deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name, nil); err != nil { if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name) t.Errorf("error deleting claim %s", pvc.Name)
} }
glog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted") glog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted")
@ -150,45 +154,48 @@ func TestPersistentVolumeRecycler(t *testing.T) {
glog.V(2).Infof("TestPersistentVolumeRecycler pv released") glog.V(2).Infof("TestPersistentVolumeRecycler pv released")
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeAvailable) waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeAvailable)
glog.V(2).Infof("TestPersistentVolumeRecycler pv available") glog.V(2).Infof("TestPersistentVolumeRecycler pv available")
} }
func TestPersistentVolumeDeleter(t *testing.T) { func TestPersistentVolumeDeleter(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
glog.V(2).Infof("TestPersistentVolumeDeleter started") glog.V(2).Infof("TestPersistentVolumeDeleter started")
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
testClient, ctrl, watchPV, watchPVC := createClients(t, s) ns := framework.CreateTestingNamespace("pv-deleter", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, ctrl, watchPV, watchPVC := createClients(ns, t, s)
defer watchPV.Stop() defer watchPV.Stop()
defer watchPVC.Stop() defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
ctrl.Run() ctrl.Run()
defer ctrl.Stop() defer ctrl.Stop()
// This PV will be claimed, released, and deleted. // This PV will be claimed, released, and deleted.
pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimDelete) pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimDelete)
pvc := createPVC("fake-pvc-deleter", "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}) pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
_, err := testClient.PersistentVolumes().Create(pv) _, err := testClient.PersistentVolumes().Create(pv)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolume: %v", err) t.Errorf("Failed to create PersistentVolume: %v", err)
} }
glog.V(2).Infof("TestPersistentVolumeDeleter pv created") glog.V(2).Infof("TestPersistentVolumeDeleter pv created")
_, err = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) _, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err) t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
} }
glog.V(2).Infof("TestPersistentVolumeDeleter pvc created") glog.V(2).Infof("TestPersistentVolumeDeleter pvc created")
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound) waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound)
glog.V(2).Infof("TestPersistentVolumeDeleter pv bound") glog.V(2).Infof("TestPersistentVolumeDeleter pv bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, watchPVC, api.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
glog.V(2).Infof("TestPersistentVolumeDeleter pvc bound") glog.V(2).Infof("TestPersistentVolumeDeleter pvc bound")
// deleting a claim releases the volume, after which it can be recycled // deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name, nil); err != nil { if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name) t.Errorf("error deleting claim %s", pvc.Name)
} }
glog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted") glog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted")
@ -206,25 +213,28 @@ func TestPersistentVolumeDeleter(t *testing.T) {
} }
func TestPersistentVolumeBindRace(t *testing.T) { func TestPersistentVolumeBindRace(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
// Test a race binding many claims to a PV that is pre-bound to a specific // Test a race binding many claims to a PV that is pre-bound to a specific
// PVC. Only this specific PVC should get bound. // PVC. Only this specific PVC should get bound.
glog.V(2).Infof("TestPersistentVolumeBindRace started") glog.V(2).Infof("TestPersistentVolumeBindRace started")
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
testClient, ctrl, watchPV, watchPVC := createClients(t, s) ns := framework.CreateTestingNamespace("pv-bind-race", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, ctrl, watchPV, watchPVC := createClients(ns, t, s)
defer watchPV.Stop() defer watchPV.Stop()
defer watchPVC.Stop() defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
ctrl.Run() ctrl.Run()
defer ctrl.Stop() defer ctrl.Stop()
pv := createPV("fake-pv-race", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain) pv := createPV("fake-pv-race", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
pvc := createPVC("fake-pvc-race", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
pvc := createPVC("fake-pvc-race", "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
counter := 0 counter := 0
maxClaims := 100 maxClaims := 100
claims := []*api.PersistentVolumeClaim{} claims := []*api.PersistentVolumeClaim{}
@ -233,7 +243,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
clone, _ := conversion.NewCloner().DeepCopy(pvc) clone, _ := conversion.NewCloner().DeepCopy(pvc)
newPvc, _ := clone.(*api.PersistentVolumeClaim) newPvc, _ := clone.(*api.PersistentVolumeClaim)
newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)} newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
claim, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(newPvc) claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc)
if err != nil { if err != nil {
t.Fatal("Error creating newPvc: %v", err) t.Fatal("Error creating newPvc: %v", err)
} }
@ -276,16 +286,20 @@ func TestPersistentVolumeBindRace(t *testing.T) {
// TestPersistentVolumeClaimLabelSelector test binding using label selectors // TestPersistentVolumeClaimLabelSelector test binding using label selectors
func TestPersistentVolumeClaimLabelSelector(t *testing.T) { func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
testClient, controller, watchPV, watchPVC := createClients(t, s) ns := framework.CreateTestingNamespace("pvc-label-selector", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, controller, watchPV, watchPVC := createClients(ns, t, s)
defer watchPV.Stop() defer watchPV.Stop()
defer watchPVC.Stop() defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
controller.Run() controller.Run()
defer controller.Stop() defer controller.Stop()
@ -296,7 +310,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim) pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim) pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
pvc = createPVC("pvc-ls-1", "1G", modes) pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes)
) )
pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "true"}) pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "true"})
@ -318,7 +332,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
}, },
} }
_, err = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) _, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Fatalf("Failed to create PersistentVolumeClaim: %v", err) t.Fatalf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -326,7 +340,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound) waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
t.Log("volume bound") t.Log("volume bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, watchPVC, api.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
t.Log("claim bound") t.Log("claim bound")
pv, err := testClient.PersistentVolumes().Get("pv-false") pv, err := testClient.PersistentVolumes().Get("pv-false")
@ -351,16 +365,20 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
// TestPersistentVolumeClaimLabelSelectorMatchExpressions test binding using // TestPersistentVolumeClaimLabelSelectorMatchExpressions test binding using
// MatchExpressions label selectors // MatchExpressions label selectors
func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
testClient, controller, watchPV, watchPVC := createClients(t, s) ns := framework.CreateTestingNamespace("pvc-match-expresssions", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, controller, watchPV, watchPVC := createClients(ns, t, s)
defer watchPV.Stop() defer watchPV.Stop()
defer watchPVC.Stop() defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
controller.Run() controller.Run()
defer controller.Stop() defer controller.Stop()
@ -371,7 +389,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim) pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim) pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
pvc = createPVC("pvc-ls-1", "1G", modes) pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes)
) )
pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""}) pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""})
@ -412,7 +430,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
}, },
} }
_, err = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) _, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Fatalf("Failed to create PersistentVolumeClaim: %v", err) t.Fatalf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -420,7 +438,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound) waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
t.Log("volume bound") t.Log("volume bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, watchPVC, api.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
t.Log("claim bound") t.Log("claim bound")
pv, err := testClient.PersistentVolumes().Get("pv-false") pv, err := testClient.PersistentVolumes().Get("pv-false")
@ -445,16 +463,20 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
// TestPersistentVolumeMultiPVs tests binding of one PVC to 100 PVs with // TestPersistentVolumeMultiPVs tests binding of one PVC to 100 PVs with
// different size. // different size.
func TestPersistentVolumeMultiPVs(t *testing.T) { func TestPersistentVolumeMultiPVs(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
testClient, controller, watchPV, watchPVC := createClients(t, s) ns := framework.CreateTestingNamespace("multi-pvs", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, controller, watchPV, watchPVC := createClients(ns, t, s)
defer watchPV.Stop() defer watchPV.Stop()
defer watchPVC.Stop() defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
controller.Run() controller.Run()
defer controller.Stop() defer controller.Stop()
@ -466,7 +488,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain) []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
} }
pvc := createPVC("pvc-2", strconv.Itoa(maxPVs/2)+"G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}) pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
for i := 0; i < maxPVs; i++ { for i := 0; i < maxPVs; i++ {
_, err := testClient.PersistentVolumes().Create(pvs[i]) _, err := testClient.PersistentVolumes().Create(pvs[i])
@ -477,7 +499,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
} }
t.Log("volumes created") t.Log("volumes created")
_, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) _, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err) t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -486,7 +508,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
// wait until the binder pairs the claim with a volume // wait until the binder pairs the claim with a volume
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound) waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
t.Log("volume bound") t.Log("volume bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, watchPVC, api.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
t.Log("claim bound") t.Log("claim bound")
// only one PV is bound // only one PV is bound
@ -517,7 +539,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
} }
// deleting a claim releases the volume // deleting a claim releases the volume
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name, nil); err != nil { if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name) t.Errorf("error deleting claim %s", pvc.Name)
} }
t.Log("claim deleted") t.Log("claim deleted")
@ -529,16 +551,20 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
// TestPersistentVolumeMultiPVsPVCs tests binding of 100 PVC to 100 PVs. // TestPersistentVolumeMultiPVsPVCs tests binding of 100 PVC to 100 PVs.
// This test is configurable by KUBE_INTEGRATION_PV_* variables. // This test is configurable by KUBE_INTEGRATION_PV_* variables.
func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
testClient, binder, watchPV, watchPVC := createClients(t, s) ns := framework.CreateTestingNamespace("multi-pvs-pvcs", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, binder, watchPV, watchPVC := createClients(ns, t, s)
defer watchPV.Stop() defer watchPV.Stop()
defer watchPVC.Stop() defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
binder.Run() binder.Run()
defer binder.Stop() defer binder.Stop()
@ -549,7 +575,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// This PV will be claimed, released, and deleted // This PV will be claimed, released, and deleted
pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), "1G", pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), "1G",
[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain) []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
pvcs[i] = createPVC("pvc-"+strconv.Itoa(i), "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}) pvcs[i] = createPVC("pvc-"+strconv.Itoa(i), ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
} }
// Create PVs first // Create PVs first
@ -573,7 +599,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// Create the claims, again in a separate goroutine. // Create the claims, again in a separate goroutine.
go func() { go func() {
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvcs[i]) _, _ = testClient.PersistentVolumeClaims(ns.Name).Create(pvcs[i])
} }
}() }()
@ -595,7 +621,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
} }
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
pvc, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Get(pvcs[i].Name) pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name)
if err != nil { if err != nil {
t.Fatalf("Unexpected error getting pvc: %v", err) t.Fatalf("Unexpected error getting pvc: %v", err)
} }
@ -610,22 +636,27 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
// TestPersistentVolumeProvisionMultiPVCs tests provisioning of many PVCs. // TestPersistentVolumeProvisionMultiPVCs tests provisioning of many PVCs.
// This test is configurable by KUBE_INTEGRATION_PV_* variables. // This test is configurable by KUBE_INTEGRATION_PV_* variables.
func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
testClient, binder, watchPV, watchPVC := createClients(t, s) ns := framework.CreateTestingNamespace("provision-multi-pvs", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, binder, watchPV, watchPVC := createClients(ns, t, s)
defer watchPV.Stop() defer watchPV.Stop()
defer watchPVC.Stop() defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
binder.Run() binder.Run()
defer binder.Stop() defer binder.Stop()
objCount := getObjectCount() objCount := getObjectCount()
pvcs := make([]*api.PersistentVolumeClaim, objCount) pvcs := make([]*api.PersistentVolumeClaim, objCount)
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
pvc := createPVC("pvc-provision-"+strconv.Itoa(i), "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}) pvc := createPVC("pvc-provision-"+strconv.Itoa(i), ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
pvc.Annotations = map[string]string{ pvc.Annotations = map[string]string{
"volume.alpha.kubernetes.io/storage-class": "", "volume.alpha.kubernetes.io/storage-class": "",
} }
@ -637,7 +668,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
// early. It gets stuck with >3000 claims. // early. It gets stuck with >3000 claims.
go func() { go func() {
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvcs[i]) _, _ = testClient.PersistentVolumeClaims(ns.Name).Create(pvcs[i])
} }
}() }()
@ -666,7 +697,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
// Delete the claims // Delete the claims
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
_ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvcs[i].Name, nil) _ = testClient.PersistentVolumeClaims(ns.Name).Delete(pvcs[i].Name, nil)
} }
// Wait for the PVs to get deleted by listing remaining volumes // Wait for the PVs to get deleted by listing remaining volumes
@ -689,16 +720,20 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
// TestPersistentVolumeMultiPVsDiffAccessModes tests binding of one PVC to two // TestPersistentVolumeMultiPVsDiffAccessModes tests binding of one PVC to two
// PVs with different access modes. // PVs with different access modes.
func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
testClient, controller, watchPV, watchPVC := createClients(t, s) ns := framework.CreateTestingNamespace("multi-pvs-diff-access", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, controller, watchPV, watchPVC := createClients(ns, t, s)
defer watchPV.Stop() defer watchPV.Stop()
defer watchPVC.Stop() defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
controller.Run() controller.Run()
defer controller.Stop() defer controller.Stop()
@ -708,7 +743,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
pv_rwm := createPV("pv-rwm", "/tmp/bar", "10G", pv_rwm := createPV("pv-rwm", "/tmp/bar", "10G",
[]api.PersistentVolumeAccessMode{api.ReadWriteMany}, api.PersistentVolumeReclaimRetain) []api.PersistentVolumeAccessMode{api.ReadWriteMany}, api.PersistentVolumeReclaimRetain)
pvc := createPVC("pvc-rwm", "5G", []api.PersistentVolumeAccessMode{api.ReadWriteMany}) pvc := createPVC("pvc-rwm", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteMany})
_, err := testClient.PersistentVolumes().Create(pv_rwm) _, err := testClient.PersistentVolumes().Create(pv_rwm)
if err != nil { if err != nil {
@ -720,7 +755,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
} }
t.Log("volumes created") t.Log("volumes created")
_, err = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) _, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil { if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err) t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
} }
@ -729,7 +764,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
// wait until the controller pairs the volume and claim // wait until the controller pairs the volume and claim
waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound) waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
t.Log("volume bound") t.Log("volume bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, watchPVC, api.ClaimBound) waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, api.ClaimBound)
t.Log("claim bound") t.Log("claim bound")
// only RWM PV is bound // only RWM PV is bound
@ -752,7 +787,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
} }
// deleting a claim releases the volume // deleting a claim releases the volume
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name, nil); err != nil { if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name) t.Errorf("error deleting claim %s", pvc.Name)
} }
t.Log("claim deleted") t.Log("claim deleted")
@ -782,9 +817,9 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w
} }
} }
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName string, w watch.Interface, phase api.PersistentVolumeClaimPhase) { func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase api.PersistentVolumeClaimPhase) {
// Check if the claim is already in requested phase // Check if the claim is already in requested phase
claim, err := client.Core().PersistentVolumeClaims(api.NamespaceDefault).Get(claimName) claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName)
if err == nil && claim.Status.Phase == phase { if err == nil && claim.Status.Phase == phase {
return return
} }
@ -831,11 +866,21 @@ func waitForAnyPersistentVolumeClaimPhase(w watch.Interface, phase api.Persisten
} }
} }
func createClients(t *testing.T, s *httptest.Server) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) { func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
// Use higher QPS and Burst, there is a test for race conditions which // Use higher QPS and Burst, there is a test for race conditions which
// creates many objects and default values were too low. // creates many objects and default values were too low.
binderClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000}) binderClient := clientset.NewForConfigOrDie(&restclient.Config{
testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000}) Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
QPS: 1000000,
Burst: 1000000,
})
testClient := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
QPS: 1000000,
Burst: 1000000,
})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */) host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)
plugin := &volumetest.FakeVolumePlugin{ plugin := &volumetest.FakeVolumePlugin{
@ -860,7 +905,7 @@ func createClients(t *testing.T, s *httptest.Server) (*clientset.Clientset, *per
if err != nil { if err != nil {
t.Fatalf("Failed to watch PersistentVolumes: %v", err) t.Fatalf("Failed to watch PersistentVolumes: %v", err)
} }
watchPVC, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Watch(api.ListOptions{}) watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(api.ListOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err) t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
} }
@ -880,11 +925,11 @@ func createPV(name, path, cap string, mode []api.PersistentVolumeAccessMode, rec
} }
} }
func createPVC(name, cap string, mode []api.PersistentVolumeAccessMode) *api.PersistentVolumeClaim { func createPVC(name, namespace, cap string, mode []api.PersistentVolumeAccessMode) *api.PersistentVolumeClaim {
return &api.PersistentVolumeClaim{ return &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
Namespace: api.NamespaceDefault, Namespace: namespace,
}, },
Spec: api.PersistentVolumeClaimSpec{ Spec: api.PersistentVolumeClaimSpec{
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse(cap)}}, Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse(cap)}},

View File

@ -54,9 +54,6 @@ func init() {
// quota_test.go:100: Took 4.196205966s to scale up without quota // quota_test.go:100: Took 4.196205966s to scale up without quota
// quota_test.go:115: Took 12.021640372s to scale up with quota // quota_test.go:115: Took 12.021640372s to scale up with quota
func TestQuota(t *testing.T) { func TestQuota(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
initializationCh := make(chan struct{}) initializationCh := make(chan struct{})
// Set up a master // Set up a master
var m *master.Master var m *master.Master
@ -81,6 +78,11 @@ func TestQuota(t *testing.T) {
} }
close(initializationCh) close(initializationCh)
ns := framework.CreateTestingNamespace("quotaed", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
ns2 := framework.CreateTestingNamespace("non-quotaed", s, t)
defer framework.DeleteTestingNamespace(ns2, s, t)
controllerCh := make(chan struct{}) controllerCh := make(chan struct{})
defer close(controllerCh) defer close(controllerCh)
@ -102,12 +104,15 @@ func TestQuota(t *testing.T) {
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(2, controllerCh) go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(2, controllerCh)
startTime := time.Now() startTime := time.Now()
scale(t, api.NamespaceDefault, clientset) scale(t, ns2.Name, clientset)
endTime := time.Now() endTime := time.Now()
t.Logf("Took %v to scale up without quota", endTime.Sub(startTime)) t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))
quota := &api.ResourceQuota{ quota := &api.ResourceQuota{
ObjectMeta: api.ObjectMeta{Name: "quota"}, ObjectMeta: api.ObjectMeta{
Name: "quota",
Namespace: ns.Name,
},
Spec: api.ResourceQuotaSpec{ Spec: api.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: api.ResourceList{
api.ResourcePods: resource.MustParse("1000"), api.ResourcePods: resource.MustParse("1000"),
@ -128,7 +133,7 @@ func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.C
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
if _, err := clientset.Core().ResourceQuotas("quotaed").Create(quota); err != nil { if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }

View File

@ -26,14 +26,12 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest"
"net/http/httputil" "net/http/httputil"
"strings" "strings"
"testing" "testing"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
@ -228,6 +226,15 @@ var (
} }
} }
} }
`
podNamespace = `
{
"apiVersion": "` + testapi.Default.GroupVersion().String() + `",
"kind": "Namespace",
"metadata": {
"name": "pod-namespace"%s
}
}
` `
jobNamespace = ` jobNamespace = `
{ {
@ -237,6 +244,15 @@ var (
"name": "job-namespace"%s "name": "job-namespace"%s
} }
} }
`
forbiddenNamespace = `
{
"apiVersion": "` + testapi.Default.GroupVersion().String() + `",
"kind": "Namespace",
"metadata": {
"name": "forbidden-namespace"%s
}
}
` `
) )
@ -292,16 +308,19 @@ func TestRBAC(t *testing.T) {
}, },
}, },
requests: []request{ requests: []request{
// Create the namespace used later in the test
{superUser, "POST", "", "namespaces", "", "", podNamespace, http.StatusCreated},
{superUser, "GET", "", "pods", "", "", "", http.StatusOK}, {superUser, "GET", "", "pods", "", "", "", http.StatusOK},
{superUser, "GET", "", "pods", api.NamespaceDefault, "a", "", http.StatusNotFound}, {superUser, "GET", "", "pods", "pod-namespace", "a", "", http.StatusNotFound},
{superUser, "POST", "", "pods", api.NamespaceDefault, "", aPod, http.StatusCreated}, {superUser, "POST", "", "pods", "pod-namespace", "", aPod, http.StatusCreated},
{superUser, "GET", "", "pods", api.NamespaceDefault, "a", "", http.StatusOK}, {superUser, "GET", "", "pods", "pod-namespace", "a", "", http.StatusOK},
{"bob", "GET", "", "pods", "", "", "", http.StatusForbidden}, {"bob", "GET", "", "pods", "", "", "", http.StatusForbidden},
{"bob", "GET", "", "pods", api.NamespaceDefault, "a", "", http.StatusForbidden}, {"bob", "GET", "", "pods", "pod-namespace", "a", "", http.StatusForbidden},
{"pod-reader", "GET", "", "pods", "", "", "", http.StatusOK}, {"pod-reader", "GET", "", "pods", "", "", "", http.StatusOK},
{"pod-reader", "POST", "", "pods", api.NamespaceDefault, "", aPod, http.StatusForbidden}, {"pod-reader", "POST", "", "pods", "pod-namespace", "", aPod, http.StatusForbidden},
}, },
}, },
{ {
@ -330,21 +349,22 @@ func TestRBAC(t *testing.T) {
requests: []request{ requests: []request{
// Create the namespace used later in the test // Create the namespace used later in the test
{superUser, "POST", "", "namespaces", "", "", jobNamespace, http.StatusCreated}, {superUser, "POST", "", "namespaces", "", "", jobNamespace, http.StatusCreated},
{superUser, "POST", "", "namespaces", "", "", forbiddenNamespace, http.StatusCreated},
{"user-with-no-permissions", "POST", "batch", "jobs", "job-namespace", "", aJob, http.StatusForbidden}, {"user-with-no-permissions", "POST", "batch", "jobs", "job-namespace", "", aJob, http.StatusForbidden},
{"user-with-no-permissions", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusForbidden}, {"user-with-no-permissions", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusForbidden},
// job-writer-namespace cannot write to the "default" namespace // job-writer-namespace cannot write to the "forbidden-namespace"
{"job-writer-namespace", "GET", "batch", "jobs", "default", "", "", http.StatusForbidden}, {"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "", "", http.StatusForbidden},
{"job-writer-namespace", "GET", "batch", "jobs", "default", "pi", "", http.StatusForbidden}, {"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusForbidden},
{"job-writer-namespace", "POST", "batch", "jobs", "default", "", aJob, http.StatusForbidden}, {"job-writer-namespace", "POST", "batch", "jobs", "forbidden-namespace", "", aJob, http.StatusForbidden},
{"job-writer-namespace", "GET", "batch", "jobs", "default", "pi", "", http.StatusForbidden}, {"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusForbidden},
// job-writer can write to any namespace // job-writer can write to any namespace
{"job-writer", "GET", "batch", "jobs", "default", "", "", http.StatusOK}, {"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "", "", http.StatusOK},
{"job-writer", "GET", "batch", "jobs", "default", "pi", "", http.StatusNotFound}, {"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusNotFound},
{"job-writer", "POST", "batch", "jobs", "default", "", aJob, http.StatusCreated}, {"job-writer", "POST", "batch", "jobs", "forbidden-namespace", "", aJob, http.StatusCreated},
{"job-writer", "GET", "batch", "jobs", "default", "pi", "", http.StatusOK}, {"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusOK},
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "", "", http.StatusOK}, {"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "", "", http.StatusOK},
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusNotFound}, {"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusNotFound},
@ -355,24 +375,13 @@ func TestRBAC(t *testing.T) {
} }
for i, tc := range tests { for i, tc := range tests {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
var m *master.Master
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
m.Handler.ServeHTTP(w, r)
}))
defer s.Close()
// Create an API Server. // Create an API Server.
masterConfig := framework.NewIntegrationTestMasterConfig() masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.Authorizer = newRBACAuthorizer(t, superUser, masterConfig) masterConfig.Authorizer = newRBACAuthorizer(t, superUser, masterConfig)
masterConfig.Authenticator = newFakeAuthenticator() masterConfig.Authenticator = newFakeAuthenticator()
masterConfig.AuthorizerRBACSuperUser = superUser masterConfig.AuthorizerRBACSuperUser = superUser
m, err := master.New(masterConfig) _, s := framework.RunAMaster(masterConfig)
if err != nil { defer s.Close()
t.Fatalf("case %d: error bringing up master: %v", i, err)
}
// Bootstrap the API Server with the test case's initial roles. // Bootstrap the API Server with the test case's initial roles.
if err := tc.bootstrapRoles.bootstrap(clientForUser(superUser), s.URL); err != nil { if err := tc.bootstrapRoles.bootstrap(clientForUser(superUser), s.URL); err != nil {

View File

@ -50,12 +50,12 @@ type nodeStateManager struct {
} }
func TestUnschedulableNodes(t *testing.T) { func TestUnschedulableNodes(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
ns := framework.CreateTestingNamespace("unschedulable-nodes", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
@ -65,12 +65,12 @@ func TestUnschedulableNodes(t *testing.T) {
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(restClient.Events("")) eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name))
scheduler.New(schedulerConfig).Run() scheduler.New(schedulerConfig).Run()
defer close(schedulerConfig.StopEverything) defer close(schedulerConfig.StopEverything)
DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store) DoTestUnschedulableNodes(t, restClient, ns, schedulerConfigFactory.NodeLister.Store)
} }
func podScheduled(c *client.Client, podNamespace, podName string) wait.ConditionFunc { func podScheduled(c *client.Client, podNamespace, podName string) wait.ConditionFunc {
@ -119,7 +119,11 @@ func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n
return err return err
} }
func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore cache.Store) { func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, ns *api.Namespace, nodeStore cache.Store) {
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{})
goodCondition := api.NodeCondition{ goodCondition := api.NodeCondition{
Type: api.NodeReady, Type: api.NodeReady,
Status: api.ConditionTrue, Status: api.ConditionTrue,
@ -246,7 +250,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}}, Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}},
}, },
} }
myPod, err := restClient.Pods(api.NamespaceDefault).Create(pod) myPod, err := restClient.Pods(ns.Name).Create(pod)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
@ -277,7 +281,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
t.Logf("Test %d: Pod got scheduled on a schedulable node", i) t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
} }
err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, api.NewDeleteOptions(0)) err = restClient.Pods(ns.Name).Delete(myPod.Name, api.NewDeleteOptions(0))
if err != nil { if err != nil {
t.Errorf("Failed to delete pod: %v", err) t.Errorf("Failed to delete pod: %v", err)
} }
@ -289,14 +293,14 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
} }
func TestMultiScheduler(t *testing.T) { func TestMultiScheduler(t *testing.T) {
// TODO: Limit the test to a single non-default namespace and clean this up at the end.
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
// TODO: Uncomment when fix #19254 // TODO: Uncomment when fix #19254
// This seems to be a different issue - it still doesn't work. // This seems to be a different issue - it still doesn't work.
// defer s.Close() // defer s.Close()
ns := framework.CreateTestingNamespace("multi-scheduler", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
/* /*
This integration tests the multi-scheduler feature in the following way: This integration tests the multi-scheduler feature in the following way:
1. create a default scheduler 1. create a default scheduler
@ -319,6 +323,10 @@ func TestMultiScheduler(t *testing.T) {
// 1. create and start default-scheduler // 1. create and start default-scheduler
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{})
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
schedulerConfig, err := schedulerConfigFactory.Create() schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil { if err != nil {
@ -326,7 +334,7 @@ func TestMultiScheduler(t *testing.T) {
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(restClient.Events("")) eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name))
scheduler.New(schedulerConfig).Run() scheduler.New(schedulerConfig).Run()
// default-scheduler will be stopped later // default-scheduler will be stopped later
@ -344,21 +352,21 @@ func TestMultiScheduler(t *testing.T) {
// 3. create 3 pods for testing // 3. create 3 pods for testing
podWithNoAnnotation := createPod(restClient, "pod-with-no-annotation", nil) podWithNoAnnotation := createPod(restClient, "pod-with-no-annotation", nil)
testPodNoAnnotation, err := restClient.Pods(api.NamespaceDefault).Create(podWithNoAnnotation) testPodNoAnnotation, err := restClient.Pods(ns.Name).Create(podWithNoAnnotation)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"} schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
podWithAnnotationFitsDefault := createPod(restClient, "pod-with-annotation-fits-default", schedulerAnnotationFitsDefault) podWithAnnotationFitsDefault := createPod(restClient, "pod-with-annotation-fits-default", schedulerAnnotationFitsDefault)
testPodWithAnnotationFitsDefault, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsDefault) testPodWithAnnotationFitsDefault, err := restClient.Pods(ns.Name).Create(podWithAnnotationFitsDefault)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"} schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
podWithAnnotationFitsFoo := createPod(restClient, "pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo) podWithAnnotationFitsFoo := createPod(restClient, "pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo)
testPodWithAnnotationFitsFoo, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsFoo) testPodWithAnnotationFitsFoo, err := restClient.Pods(ns.Name).Create(podWithAnnotationFitsFoo)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
@ -397,7 +405,7 @@ func TestMultiScheduler(t *testing.T) {
} }
eventBroadcaster2 := record.NewBroadcaster() eventBroadcaster2 := record.NewBroadcaster()
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.EventSource{Component: "foo-scheduler"}) schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.EventSource{Component: "foo-scheduler"})
eventBroadcaster2.StartRecordingToSink(restClient2.Events("")) eventBroadcaster2.StartRecordingToSink(restClient2.Events(ns.Name))
scheduler.New(schedulerConfig2).Run() scheduler.New(schedulerConfig2).Run()
defer close(schedulerConfig2.StopEverything) defer close(schedulerConfig2.StopEverything)
@ -412,11 +420,11 @@ func TestMultiScheduler(t *testing.T) {
} }
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler // 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
err = restClient.Pods(api.NamespaceDefault).Delete(testPodNoAnnotation.Name, api.NewDeleteOptions(0)) err = restClient.Pods(ns.Name).Delete(testPodNoAnnotation.Name, api.NewDeleteOptions(0))
if err != nil { if err != nil {
t.Errorf("Failed to delete pod: %v", err) t.Errorf("Failed to delete pod: %v", err)
} }
err = restClient.Pods(api.NamespaceDefault).Delete(testPodWithAnnotationFitsDefault.Name, api.NewDeleteOptions(0)) err = restClient.Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, api.NewDeleteOptions(0))
if err != nil { if err != nil {
t.Errorf("Failed to delete pod: %v", err) t.Errorf("Failed to delete pod: %v", err)
} }
@ -434,11 +442,11 @@ func TestMultiScheduler(t *testing.T) {
// - note: these two pods belong to default scheduler which no longer exists // - note: these two pods belong to default scheduler which no longer exists
podWithNoAnnotation2 := createPod("pod-with-no-annotation2", nil) podWithNoAnnotation2 := createPod("pod-with-no-annotation2", nil)
podWithAnnotationFitsDefault2 := createPod("pod-with-annotation-fits-default2", schedulerAnnotationFitsDefault) podWithAnnotationFitsDefault2 := createPod("pod-with-annotation-fits-default2", schedulerAnnotationFitsDefault)
testPodNoAnnotation2, err := restClient.Pods(api.NamespaceDefault).Create(podWithNoAnnotation2) testPodNoAnnotation2, err := restClient.Pods(ns.Name).Create(podWithNoAnnotation2)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
testPodWithAnnotationFitsDefault2, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsDefault2) testPodWithAnnotationFitsDefault2, err := restClient.Pods(ns.Name).Create(podWithAnnotationFitsDefault2)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
@ -471,14 +479,19 @@ func createPod(client *client.Client, name string, annotation map[string]string)
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not. // This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
func TestAllocatable(t *testing.T) { func TestAllocatable(t *testing.T) {
framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(nil) _, s := framework.RunAMaster(nil)
defer s.Close() defer s.Close()
ns := framework.CreateTestingNamespace("allocatable", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
// 1. create and start default-scheduler // 1. create and start default-scheduler
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer restClient.Nodes().DeleteCollection(nil, api.ListOptions{})
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
schedulerConfig, err := schedulerConfigFactory.Create() schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil { if err != nil {
@ -486,7 +499,7 @@ func TestAllocatable(t *testing.T) {
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(restClient.Events("")) eventBroadcaster.StartRecordingToSink(restClient.Events(ns.Name))
scheduler.New(schedulerConfig).Run() scheduler.New(schedulerConfig).Run()
// default-scheduler will be stopped later // default-scheduler will be stopped later
defer close(schedulerConfig.StopEverything) defer close(schedulerConfig.StopEverything)
@ -528,7 +541,7 @@ func TestAllocatable(t *testing.T) {
}, },
} }
testAllocPod, err := restClient.Pods(api.NamespaceDefault).Create(podResource) testAllocPod, err := restClient.Pods(ns.Name).Create(podResource)
if err != nil { if err != nil {
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err) t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
} }
@ -559,13 +572,13 @@ func TestAllocatable(t *testing.T) {
t.Fatalf("Failed to update node with Status.Allocatable: %v", err) t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
} }
if err := restClient.Pods(api.NamespaceDefault).Delete(podResource.Name, &api.DeleteOptions{}); err != nil { if err := restClient.Pods(ns.Name).Delete(podResource.Name, &api.DeleteOptions{}); err != nil {
t.Fatalf("Failed to remove first resource pod: %v", err) t.Fatalf("Failed to remove first resource pod: %v", err)
} }
// 6. Make another pod with different name, same resource request // 6. Make another pod with different name, same resource request
podResource.ObjectMeta.Name = "pod-test-allocatable2" podResource.ObjectMeta.Name = "pod-test-allocatable2"
testAllocPod2, err := restClient.Pods(api.NamespaceDefault).Create(podResource) testAllocPod2, err := restClient.Pods(ns.Name).Create(podResource)
if err != nil { if err != nil {
t.Fatalf("Test allocatable awareness failed to create pod: %v", err) t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
} }

View File

@ -336,8 +336,6 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
// startServiceAccountTestServer returns a started server // startServiceAccountTestServer returns a started server
// It is the responsibility of the caller to ensure the returned stopFunc is called // It is the responsibility of the caller to ensure the returned stopFunc is called
func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclient.Config, func()) { func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclient.Config, func()) {
framework.DeleteAllEtcdKeys()
// Listener // Listener
var m *master.Master var m *master.Master
apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {