Merge pull request #63289 from k82cn/k8s_63287

Automatic merge from submit-queue (batch tested with PRs 63669, 63511, 63561, 63289). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Cleanup DaemonSet after each integration test.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #63287 

**Release note**:

```release-note
None
```
This commit is contained in:
Kubernetes Submit Queue 2018-05-10 14:25:19 -07:00 committed by GitHub
commit c3afbe845c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 79 additions and 17 deletions

View File

@ -23,6 +23,7 @@ go_test(
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library",

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -40,6 +41,8 @@ import (
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
var zero = int64(0)
func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) { func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig() masterConfig := framework.NewIntegrationTestMasterConfig()
_, server, closeFn := framework.RunAMaster(masterConfig) _, server, closeFn := framework.RunAMaster(masterConfig)
@ -92,13 +95,55 @@ func newDaemonSet(name, namespace string) *apps.DaemonSet {
Labels: testLabels(), Labels: testLabels(),
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "foo", Image: "bar"}}, Containers: []v1.Container{{Name: "foo", Image: "bar"}},
TerminationGracePeriodSeconds: &zero,
}, },
}, },
}, },
} }
} }
func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) {
ds, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Failed to get DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
return
}
// We set the nodeSelector to a random label. This label is nearly guaranteed
// to not be set on any node so the DameonSetController will start deleting
// daemon pods. Once it's done deleting the daemon pods, it's safe to delete
// the DaemonSet.
ds.Spec.Template.Spec.NodeSelector = map[string]string{
string(uuid.NewUUID()): string(uuid.NewUUID()),
}
// force update to avoid version conflict
ds.ResourceVersion = ""
if ds, err = cs.AppsV1().DaemonSets(ds.Namespace).Update(ds); err != nil {
t.Errorf("Failed to update DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
return
}
// Wait for the daemon set controller to kill all the daemon pods.
if err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
updatedDS, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
}); err != nil {
t.Errorf("Failed to kill the pods of DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
return
}
falseVar := false
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions); err != nil {
t.Errorf("Failed to delete DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
}
}
func newRollbackStrategy() *apps.DaemonSetUpdateStrategy { func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {
one := intstr.FromInt(1) one := intstr.FromInt(1)
return &apps.DaemonSetUpdateStrategy{ return &apps.DaemonSetUpdateStrategy{
@ -140,6 +185,7 @@ func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
}, },
}, },
}, },
TerminationGracePeriodSeconds: &zero,
} }
} }
@ -257,8 +303,15 @@ func validateFailedPlacementEvent(eventClient corev1typed.EventInterface, t *tes
} }
} }
func TestOneNodeDaemonLaunchesPod(t *testing.T) { func forEachStrategy(t *testing.T, tf func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy)) {
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
t.Run(fmt.Sprintf("%s (%v)", t.Name(), strategy),
func(tt *testing.T) { tf(tt, strategy) })
}
}
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t) server, closeFn, dc, informers, clientset := setup(t)
defer closeFn() defer closeFn()
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t) ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
@ -268,9 +321,11 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
podClient := clientset.CoreV1().Pods(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name)
nodeClient := clientset.CoreV1().Nodes() nodeClient := clientset.CoreV1().Nodes()
podInformer := informers.Core().V1().Pods().Informer() podInformer := informers.Core().V1().Pods().Informer()
stopCh := make(chan struct{}) stopCh := make(chan struct{})
informers.Start(stopCh) informers.Start(stopCh)
go dc.Run(5, stopCh) go dc.Run(5, stopCh)
defer close(stopCh)
ds := newDaemonSet("foo", ns.Name) ds := newDaemonSet("foo", ns.Name)
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -278,6 +333,8 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err) t.Fatalf("Failed to create DaemonSet: %v", err)
} }
defer cleanupDaemonSets(t, clientset, ds)
_, err = nodeClient.Create(newNode("single-node", nil)) _, err = nodeClient.Create(newNode("single-node", nil))
if err != nil { if err != nil {
t.Fatalf("Failed to create node: %v", err) t.Fatalf("Failed to create node: %v", err)
@ -285,13 +342,11 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t) validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 1, t) validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 1, t)
})
close(stopCh)
}
} }
func TestSimpleDaemonSetLaunchesPods(t *testing.T) { func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
for _, strategy := range updateStrategies() { forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t) server, closeFn, dc, informers, clientset := setup(t)
defer closeFn() defer closeFn()
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
@ -301,9 +356,11 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
podClient := clientset.CoreV1().Pods(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name)
nodeClient := clientset.CoreV1().Nodes() nodeClient := clientset.CoreV1().Nodes()
podInformer := informers.Core().V1().Pods().Informer() podInformer := informers.Core().V1().Pods().Informer()
stopCh := make(chan struct{}) stopCh := make(chan struct{})
informers.Start(stopCh) informers.Start(stopCh)
go dc.Run(5, stopCh) go dc.Run(5, stopCh)
defer close(stopCh)
ds := newDaemonSet("foo", ns.Name) ds := newDaemonSet("foo", ns.Name)
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -311,17 +368,17 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err) t.Fatalf("Failed to create DaemonSet: %v", err)
} }
defer cleanupDaemonSets(t, clientset, ds)
addNodes(nodeClient, 0, 5, nil, t) addNodes(nodeClient, 0, 5, nil, t)
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 5, t) validateDaemonSetPodsAndMarkReady(podClient, podInformer, 5, t)
validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 5, t) validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 5, t)
})
close(stopCh)
}
} }
func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
for _, strategy := range updateStrategies() { forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t) server, closeFn, dc, informers, clientset := setup(t)
defer closeFn() defer closeFn()
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
@ -331,9 +388,11 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
podClient := clientset.CoreV1().Pods(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name)
nodeClient := clientset.CoreV1().Nodes() nodeClient := clientset.CoreV1().Nodes()
podInformer := informers.Core().V1().Pods().Informer() podInformer := informers.Core().V1().Pods().Informer()
stopCh := make(chan struct{}) stopCh := make(chan struct{})
informers.Start(stopCh) informers.Start(stopCh)
go dc.Run(5, stopCh) go dc.Run(5, stopCh)
defer close(stopCh)
ds := newDaemonSet("foo", ns.Name) ds := newDaemonSet("foo", ns.Name)
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -341,6 +400,8 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err) t.Fatalf("Failed to create DaemonSet: %v", err)
} }
defer cleanupDaemonSets(t, clientset, ds)
node := newNode("single-node", nil) node := newNode("single-node", nil)
node.Status.Conditions = []v1.NodeCondition{ node.Status.Conditions = []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionFalse}, {Type: v1.NodeReady, Status: v1.ConditionFalse},
@ -352,13 +413,11 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t) validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 1, t) validateDaemonSetStatus(dsClient, ds.Name, ds.Namespace, 1, t)
})
close(stopCh)
}
} }
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
for _, strategy := range updateStrategies() { forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t) server, closeFn, dc, informers, clientset := setup(t)
defer closeFn() defer closeFn()
ns := framework.CreateTestingNamespace("insufficient-capacity", server, t) ns := framework.CreateTestingNamespace("insufficient-capacity", server, t)
@ -367,9 +426,11 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
dsClient := clientset.AppsV1().DaemonSets(ns.Name) dsClient := clientset.AppsV1().DaemonSets(ns.Name)
nodeClient := clientset.CoreV1().Nodes() nodeClient := clientset.CoreV1().Nodes()
eventClient := clientset.CoreV1().Events(ns.Namespace) eventClient := clientset.CoreV1().Events(ns.Namespace)
stopCh := make(chan struct{}) stopCh := make(chan struct{})
informers.Start(stopCh) informers.Start(stopCh)
go dc.Run(5, stopCh) go dc.Run(5, stopCh)
defer close(stopCh)
ds := newDaemonSet("foo", ns.Name) ds := newDaemonSet("foo", ns.Name)
ds.Spec.Template.Spec = resourcePodSpec("node-with-limited-memory", "120M", "75m") ds.Spec.Template.Spec = resourcePodSpec("node-with-limited-memory", "120M", "75m")
@ -378,6 +439,8 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err) t.Fatalf("Failed to create DaemonSet: %v", err)
} }
defer cleanupDaemonSets(t, clientset, ds)
node := newNode("node-with-limited-memory", nil) node := newNode("node-with-limited-memory", nil)
node.Status.Allocatable = allocatableResources("100M", "200m") node.Status.Allocatable = allocatableResources("100M", "200m")
_, err = nodeClient.Create(node) _, err = nodeClient.Create(node)
@ -386,7 +449,5 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
} }
validateFailedPlacementEvent(eventClient, t) validateFailedPlacementEvent(eventClient, t)
})
close(stopCh)
}
} }