Merge pull request #76328 from pontiyaraja/limitRangeCNF

removed flaky watch code and added NewIndexerInformerWatcher for LimitRange test case
This commit is contained in:
Kubernetes Prow Robot 2019-07-15 16:33:25 -07:00 committed by GitHub
commit d0dc9c84c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 57 additions and 12 deletions

View File

@ -42,6 +42,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/gpu:go_default_library",

View File

@ -19,18 +19,24 @@ package scheduling
import (
"fmt"
"reflect"
"strconv"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -40,21 +46,28 @@ const (
var _ = SIGDescribe("LimitRange", func() {
f := framework.NewDefaultFramework("limitrange")
/*
Release : v1.15
Testname: LimitRange, resources
Description: Creating a Limitrange and verifying the creation of Limitrange, updating the Limitrange and validating the Limitrange. Creating Pods with resources and validate the pod resources are applied to the Limitrange
*/
//note: this test case can be promoted to conformance after verified the stability of the test case
ginkgo.It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
ginkgo.By("Creating a LimitRange")
min := getResourceList("50m", "100Mi", "100Gi")
max := getResourceList("500m", "500Mi", "500Gi")
defaultLimit := getResourceList("500m", "500Mi", "500Gi")
defaultRequest := getResourceList("100m", "200Mi", "200Gi")
maxLimitRequestRatio := v1.ResourceList{}
limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
value := strconv.Itoa(time.Now().Nanosecond()) + string(uuid.NewUUID())
limitRange := newLimitRange("limit-range", value, v1.LimitTypeContainer,
min, max,
defaultLimit, defaultRequest,
maxLimitRequestRatio)
ginkgo.By("Setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
framework.ExpectNoError(err, "failed to query for limitRanges")
@ -63,8 +76,30 @@ var _ = SIGDescribe("LimitRange", func() {
LabelSelector: selector.String(),
ResourceVersion: limitRanges.ListMeta.ResourceVersion,
}
w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{})
framework.ExpectNoError(err, "failed to set up watch")
listCompleted := make(chan bool, 1)
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = selector.String()
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
if err == nil {
select {
case listCompleted <- true:
e2elog.Logf("observed the limitRanges list")
return limitRanges, err
default:
e2elog.Logf("channel blocked")
}
}
return limitRanges, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = selector.String()
return f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(options)
},
}
_, _, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.LimitRange{})
defer w.Stop()
ginkgo.By("Submitting a LimitRange")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
@ -72,12 +107,17 @@ var _ = SIGDescribe("LimitRange", func() {
ginkgo.By("Verifying LimitRange creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
e2elog.Failf("Failed to observe pod creation: %v", event)
case <-listCompleted:
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
e2elog.Failf("Failed to observe limitRange creation : %v", event)
}
case <-time.After(framework.ServiceRespondingTimeout):
e2elog.Failf("Timeout while waiting for LimitRange creation")
}
case <-time.After(framework.ServiceRespondingTimeout):
e2elog.Failf("Timeout while waiting for LimitRange creation")
e2elog.Failf("Timeout while waiting for LimitRange list complete")
}
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
@ -165,7 +205,7 @@ var _ = SIGDescribe("LimitRange", func() {
framework.ExpectNoError(err)
ginkgo.By("Verifying the LimitRange was deleted")
err = wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
gomega.Expect(wait.Poll(time.Second*5, framework.ServiceRespondingTimeout, func() (bool, error) {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
@ -190,7 +230,8 @@ var _ = SIGDescribe("LimitRange", func() {
return false, nil
})
}))
framework.ExpectNoError(err, "kubelet never observed the termination notice")
ginkgo.By("Creating a Pod with more than former max resources")
@ -241,13 +282,16 @@ func getResourceList(cpu, memory string, ephemeralStorage string) v1.ResourceLis
}
// newLimitRange returns a limit range with specified data
func newLimitRange(name string, limitType v1.LimitType,
func newLimitRange(name, value string, limitType v1.LimitType,
min, max,
defaultLimit, defaultRequest,
maxLimitRequestRatio v1.ResourceList) *v1.LimitRange {
return &v1.LimitRange{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"time": value,
},
},
Spec: v1.LimitRangeSpec{
Limits: []v1.LimitRangeItem{