mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
use log func in test/e2e/apimachinery
This commit is contained in:
parent
07e58a361b
commit
937d4df16a
@ -83,7 +83,6 @@ go_library(
|
||||
"//test/e2e/apps:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
|
@ -41,7 +41,6 @@ import (
|
||||
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
@ -81,11 +80,11 @@ var _ = SIGDescribe("Aggregator", func() {
|
||||
if aggrclient == nil {
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
e2elog.Failf("could not load config: %v", err)
|
||||
framework.Failf("could not load config: %v", err)
|
||||
}
|
||||
aggrclient, err = aggregatorclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
e2elog.Failf("could not create aggregator client: %v", err)
|
||||
framework.Failf("could not create aggregator client: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -375,16 +374,16 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
}, "Waited %s for the sample-apiserver to be ready to handle requests.")
|
||||
if err != nil {
|
||||
currentAPIServiceJSON, _ := json.Marshal(currentAPIService)
|
||||
e2elog.Logf("current APIService: %s", string(currentAPIServiceJSON))
|
||||
framework.Logf("current APIService: %s", string(currentAPIServiceJSON))
|
||||
|
||||
currentPodsJSON, _ := json.Marshal(currentPods)
|
||||
e2elog.Logf("current pods: %s", string(currentPodsJSON))
|
||||
framework.Logf("current pods: %s", string(currentPodsJSON))
|
||||
|
||||
if currentPods != nil {
|
||||
for _, pod := range currentPods.Items {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := e2epod.GetPodLogs(client, namespace, pod.Name, container.Name)
|
||||
e2elog.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
|
||||
framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -402,7 +401,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
var statusCode int
|
||||
result.StatusCode(&statusCode)
|
||||
if statusCode != 201 {
|
||||
e2elog.Failf("Flunders client creation response was status %d, not 201", statusCode)
|
||||
framework.Failf("Flunders client creation response was status %d, not 201", statusCode)
|
||||
}
|
||||
|
||||
pods, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
@ -416,7 +415,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
err = json.Unmarshal(contents, &flundersList)
|
||||
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.k8s.io/v1alpha1")
|
||||
if len(flundersList.Items) != 1 {
|
||||
e2elog.Failf("failed to get back the correct flunders list %v", flundersList)
|
||||
framework.Failf("failed to get back the correct flunders list %v", flundersList)
|
||||
}
|
||||
|
||||
// kubectl delete flunder test-flunder -v 9
|
||||
@ -431,7 +430,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
err = json.Unmarshal(contents, &flundersList)
|
||||
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.k8s.io/v1alpha1")
|
||||
if len(flundersList.Items) != 0 {
|
||||
e2elog.Failf("failed to get back the correct deleted flunders list %v", flundersList)
|
||||
framework.Failf("failed to get back the correct deleted flunders list %v", flundersList)
|
||||
}
|
||||
|
||||
flunderName = generateFlunderName("dynamic-flunder")
|
||||
@ -443,7 +442,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
gvr := schema.GroupVersionResource{Group: "wardle.k8s.io", Version: "v1alpha1", Resource: "flunders"}
|
||||
_, ok := groupVersionResources[gvr]
|
||||
if !ok {
|
||||
e2elog.Failf("could not find group version resource for dynamic client and wardle/flunders (discovery error: %v, discovery results: %#v)", discoveryErr, groupVersionResources)
|
||||
framework.Failf("could not find group version resource for dynamic client and wardle/flunders (discovery error: %v, discovery results: %#v)", discoveryErr, groupVersionResources)
|
||||
}
|
||||
dynamicClient := f.DynamicClient.Resource(gvr).Namespace(namespace)
|
||||
|
||||
@ -469,7 +468,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
unstructuredList, err := dynamicClient.List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "listing flunders using dynamic client")
|
||||
if len(unstructuredList.Items) != 1 {
|
||||
e2elog.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList)
|
||||
framework.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList)
|
||||
}
|
||||
|
||||
// kubectl delete flunder test-flunder
|
||||
@ -480,19 +479,19 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
unstructuredList, err = dynamicClient.List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "listing flunders using dynamic client")
|
||||
if len(unstructuredList.Items) != 0 {
|
||||
e2elog.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList)
|
||||
framework.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList)
|
||||
}
|
||||
|
||||
cleanTest(client, aggrclient, namespace)
|
||||
}
|
||||
|
||||
// pollTimed will call Poll but time how long Poll actually took.
|
||||
// It will then e2elog.Logf the msg with the duration of the Poll.
|
||||
// It will then framework.Logf the msg with the duration of the Poll.
|
||||
// It is assumed that msg will contain one %s for the elapsed time.
|
||||
func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error {
|
||||
defer func(start time.Time, msg string) {
|
||||
elapsed := time.Since(start)
|
||||
e2elog.Logf(msg, elapsed)
|
||||
framework.Logf(msg, elapsed)
|
||||
}(time.Now(), msg)
|
||||
return wait.Poll(interval, timeout, condition)
|
||||
}
|
||||
@ -513,7 +512,7 @@ func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodL
|
||||
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)
|
||||
}
|
||||
|
||||
e2elog.Failf(msg)
|
||||
framework.Failf(msg)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
|
||||
"k8s.io/client-go/util/cert"
|
||||
"k8s.io/client-go/util/keyutil"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@ -38,27 +38,27 @@ type certContext struct {
|
||||
func setupServerCert(namespaceName, serviceName string) *certContext {
|
||||
certDir, err := ioutil.TempDir("", "test-e2e-server-cert")
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create a temp dir for cert generation %v", err)
|
||||
framework.Failf("Failed to create a temp dir for cert generation %v", err)
|
||||
}
|
||||
defer os.RemoveAll(certDir)
|
||||
signingKey, err := utils.NewPrivateKey()
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create CA private key %v", err)
|
||||
framework.Failf("Failed to create CA private key %v", err)
|
||||
}
|
||||
signingCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "e2e-server-cert-ca"}, signingKey)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create CA cert for apiserver %v", err)
|
||||
framework.Failf("Failed to create CA cert for apiserver %v", err)
|
||||
}
|
||||
caCertFile, err := ioutil.TempFile(certDir, "ca.crt")
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create a temp file for ca cert generation %v", err)
|
||||
framework.Failf("Failed to create a temp file for ca cert generation %v", err)
|
||||
}
|
||||
if err := ioutil.WriteFile(caCertFile.Name(), utils.EncodeCertPEM(signingCert), 0644); err != nil {
|
||||
e2elog.Failf("Failed to write CA cert %v", err)
|
||||
framework.Failf("Failed to write CA cert %v", err)
|
||||
}
|
||||
key, err := utils.NewPrivateKey()
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create private key for %v", err)
|
||||
framework.Failf("Failed to create private key for %v", err)
|
||||
}
|
||||
signedCert, err := utils.NewSignedCert(
|
||||
&cert.Config{
|
||||
@ -68,25 +68,25 @@ func setupServerCert(namespaceName, serviceName string) *certContext {
|
||||
key, signingCert, signingKey,
|
||||
)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create cert%v", err)
|
||||
framework.Failf("Failed to create cert%v", err)
|
||||
}
|
||||
certFile, err := ioutil.TempFile(certDir, "server.crt")
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create a temp file for cert generation %v", err)
|
||||
framework.Failf("Failed to create a temp file for cert generation %v", err)
|
||||
}
|
||||
keyFile, err := ioutil.TempFile(certDir, "server.key")
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create a temp file for key generation %v", err)
|
||||
framework.Failf("Failed to create a temp file for key generation %v", err)
|
||||
}
|
||||
if err = ioutil.WriteFile(certFile.Name(), utils.EncodeCertPEM(signedCert), 0600); err != nil {
|
||||
e2elog.Failf("Failed to write cert file %v", err)
|
||||
framework.Failf("Failed to write cert file %v", err)
|
||||
}
|
||||
privateKeyPEM, err := keyutil.MarshalPrivateKeyToPEM(key)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to marshal key %v", err)
|
||||
framework.Failf("Failed to marshal key %v", err)
|
||||
}
|
||||
if err = ioutil.WriteFile(keyFile.Name(), privateKeyPEM, 0644); err != nil {
|
||||
e2elog.Failf("Failed to write key file %v", err)
|
||||
framework.Failf("Failed to write key file %v", err)
|
||||
}
|
||||
return &certContext{
|
||||
cert: utils.EncodeCertPEM(signedCert),
|
||||
|
@ -35,7 +35,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
func shouldCheckRemainingItem() bool {
|
||||
@ -69,9 +68,9 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
e2elog.Logf("Got an error creating template %d: %v", i, err)
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
e2elog.Fail("Unable to create template %d, exiting", i)
|
||||
framework.Failf("Unable to create template %d, exiting", i)
|
||||
})
|
||||
})
|
||||
|
||||
@ -88,7 +87,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
|
||||
list, err := client.List(opts)
|
||||
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
|
||||
|
||||
if len(lastRV) == 0 {
|
||||
@ -143,7 +142,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
gomega.Expect(int(*list.GetRemainingItemCount()) + len(list.Items)).To(gomega.BeNumerically("==", numberOfTotalResources))
|
||||
}
|
||||
}
|
||||
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
||||
|
||||
ginkgo.By("retrieving the second page until the token expires")
|
||||
opts.Continue = firstToken
|
||||
@ -151,13 +150,13 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
|
||||
_, err := client.List(opts)
|
||||
if err == nil {
|
||||
e2elog.Logf("Token %s has not expired yet", firstToken)
|
||||
framework.Logf("Token %s has not expired yet", firstToken)
|
||||
return false, nil
|
||||
}
|
||||
if err != nil && !errors.IsResourceExpired(err) {
|
||||
return false, err
|
||||
}
|
||||
e2elog.Logf("got error %s", err)
|
||||
framework.Logf("got error %s", err)
|
||||
status, ok := err.(errors.APIStatus)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
|
||||
@ -166,7 +165,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
if len(inconsistentToken) == 0 {
|
||||
return false, fmt.Errorf("expect non empty continue token")
|
||||
}
|
||||
e2elog.Logf("Retrieved inconsistent continue %s", inconsistentToken)
|
||||
framework.Logf("Retrieved inconsistent continue %s", inconsistentToken)
|
||||
return true, nil
|
||||
})
|
||||
|
||||
@ -205,7 +204,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
gomega.Expect(int(*list.GetRemainingItemCount()) + len(list.Items) + found).To(gomega.BeNumerically("==", numberOfTotalResources))
|
||||
}
|
||||
}
|
||||
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
|
||||
framework.ExpectEqual(list.ResourceVersion, lastRV)
|
||||
for _, item := range list.Items {
|
||||
|
@ -35,7 +35,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
@ -238,7 +237,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa
|
||||
},
|
||||
})
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
e2elog.Logf("role binding %s already exists", roleBindingCRDName)
|
||||
framework.Logf("role binding %s already exists", roleBindingCRDName)
|
||||
} else {
|
||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||
}
|
||||
@ -506,7 +505,7 @@ func waitWebhookConversionReady(f *framework.Framework, crd *apiextensionsv1.Cus
|
||||
if err != nil {
|
||||
// tolerate clusters that do not set --enable-aggregator-routing and have to wait for kube-proxy
|
||||
// to program the service network, during which conversion requests return errors
|
||||
e2elog.Logf("error waiting for conversion to succeed during setup: %v", err)
|
||||
framework.Logf("error waiting for conversion to succeed during setup: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,6 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
openapiutil "k8s.io/kube-openapi/pkg/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
@ -65,7 +64,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
framework.ConformanceIt("works for CRD with validation schema", func() {
|
||||
crd, err := setupCRD(f, schemaFoo, "foo", "v1")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-foo")
|
||||
@ -74,59 +73,59 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with known and required properties")
|
||||
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(validCR, ns, "create", "-f", "-"); err != nil {
|
||||
e2elog.Failf("failed to create valid CR %s: %v", validCR, err)
|
||||
framework.Failf("failed to create valid CR %s: %v", validCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
|
||||
e2elog.Failf("failed to delete valid CR: %v", err)
|
||||
framework.Failf("failed to delete valid CR: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(validCR, ns, "apply", "-f", "-"); err != nil {
|
||||
e2elog.Failf("failed to apply valid CR %s: %v", validCR, err)
|
||||
framework.Failf("failed to apply valid CR %s: %v", validCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
|
||||
e2elog.Failf("failed to delete valid CR: %v", err)
|
||||
framework.Failf("failed to delete valid CR: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("client-side validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema")
|
||||
unknownCR := fmt.Sprintf(`{%s,"spec":{"foo":true}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(unknownCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) {
|
||||
e2elog.Failf("unexpected no error when creating CR with unknown field: %v", err)
|
||||
framework.Failf("unexpected no error when creating CR with unknown field: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(unknownCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) {
|
||||
e2elog.Failf("unexpected no error when applying CR with unknown field: %v", err)
|
||||
framework.Failf("unexpected no error when applying CR with unknown field: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("client-side validation (kubectl create and apply) rejects request without required properties")
|
||||
noRequireCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"age":"10"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(noRequireCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) {
|
||||
e2elog.Failf("unexpected no error when creating CR without required field: %v", err)
|
||||
framework.Failf("unexpected no error when creating CR without required field: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(noRequireCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) {
|
||||
e2elog.Failf("unexpected no error when applying CR without required field: %v", err)
|
||||
framework.Failf("unexpected no error when applying CR without required field: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR properties")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*Foo CRD for Testing.*FIELDS:.*apiVersion.*<string>.*APIVersion defines.*spec.*<Object>.*Specification of Foo`); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR properties recursively")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".metadata", `(?s)DESCRIPTION:.*Standard object's metadata.*FIELDS:.*creationTimestamp.*<string>.*CreationTimestamp is a timestamp`); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".spec", `(?s)DESCRIPTION:.*Specification of Foo.*FIELDS:.*bars.*<\[\]Object>.*List of Bars and their specs`); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".spec.bars", `(?s)RESOURCE:.*bars.*<\[\]Object>.*DESCRIPTION:.*List of Bars and their specs.*FIELDS:.*bazs.*<\[\]string>.*List of Bazs.*name.*<string>.*Name of Bar`); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to return error when explain is called on property that doesn't exist")
|
||||
if _, err := framework.RunKubectl("explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) {
|
||||
e2elog.Failf("unexpected no error when explaining property that doesn't exist: %v", err)
|
||||
framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err)
|
||||
}
|
||||
|
||||
if err := cleanupCRD(f, crd); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -140,7 +139,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
framework.ConformanceIt("works for CRD without validation schema", func() {
|
||||
crd, err := setupCRD(f, nil, "empty", "v1")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
|
||||
@ -149,25 +148,25 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties")
|
||||
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
e2elog.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err)
|
||||
framework.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
e2elog.Failf("failed to delete random CR: %v", err)
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil {
|
||||
e2elog.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
|
||||
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
e2elog.Failf("failed to delete random CR: %v", err)
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR without validation schema")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*<empty>`); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
if err := cleanupCRD(f, crd); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -181,7 +180,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
framework.ConformanceIt("works for CRD preserving unknown fields at the schema root", func() {
|
||||
crd, err := setupCRDAndVerifySchema(f, schemaPreserveRoot, nil, "unknown-at-root", "v1")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
|
||||
@ -190,25 +189,25 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties")
|
||||
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
e2elog.Failf("failed to create random CR %s for CRD that allows unknown properties at the root: %v", randomCR, err)
|
||||
framework.Failf("failed to create random CR %s for CRD that allows unknown properties at the root: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
e2elog.Failf("failed to delete random CR: %v", err)
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil {
|
||||
e2elog.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
|
||||
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
e2elog.Failf("failed to delete random CR: %v", err)
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, fmt.Sprintf(`(?s)KIND:.*%s`, crd.Crd.Spec.Names.Kind)); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
if err := cleanupCRD(f, crd); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -223,7 +222,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
framework.ConformanceIt("works for CRD preserving unknown fields in an embedded object", func() {
|
||||
crd, err := setupCRDAndVerifySchema(f, schemaPreserveNested, nil, "unknown-in-nested", "v1")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
|
||||
@ -232,25 +231,25 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties")
|
||||
randomCR := fmt.Sprintf(`{%s,"spec":{"b":[{"c":"d"}]}}`, meta)
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil {
|
||||
e2elog.Failf("failed to create random CR %s for CRD that allows unknown properties in a nested object: %v", randomCR, err)
|
||||
framework.Failf("failed to create random CR %s for CRD that allows unknown properties in a nested object: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
e2elog.Failf("failed to delete random CR: %v", err)
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil {
|
||||
e2elog.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
|
||||
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
|
||||
}
|
||||
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
|
||||
e2elog.Failf("failed to delete random CR: %v", err)
|
||||
framework.Failf("failed to delete random CR: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("kubectl explain works to explain CR")
|
||||
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*preserve-unknown-properties in nested field for Testing`); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
if err := cleanupCRD(f, crd); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -264,26 +263,26 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
ginkgo.By("CRs in different groups (two CRDs) show up in OpenAPI documentation")
|
||||
crdFoo, err := setupCRD(f, schemaFoo, "foo", "v1")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
crdWaldo, err := setupCRD(f, schemaWaldo, "waldo", "v1beta1")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if crdFoo.Crd.Spec.Group == crdWaldo.Crd.Spec.Group {
|
||||
e2elog.Failf("unexpected: CRDs should be of different group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
|
||||
framework.Failf("unexpected: CRDs should be of different group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdWaldo, "v1beta1"), schemaWaldo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v1"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := cleanupCRD(f, crdFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := cleanupCRD(f, crdWaldo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -297,41 +296,41 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
ginkgo.By("CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation")
|
||||
crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v3"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := cleanupCRD(f, crdMultiVer); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation")
|
||||
crdFoo, err := setupCRD(f, schemaFoo, "common-group", "v4")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
crdWaldo, err := setupCRD(f, schemaWaldo, "common-group", "v5")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if crdFoo.Crd.Spec.Group != crdWaldo.Crd.Spec.Group {
|
||||
e2elog.Failf("unexpected: CRDs should be of the same group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
|
||||
framework.Failf("unexpected: CRDs should be of the same group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdWaldo, "v5"), schemaWaldo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v4"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := cleanupCRD(f, crdFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := cleanupCRD(f, crdWaldo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -345,26 +344,26 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
ginkgo.By("CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation")
|
||||
crdFoo, err := setupCRD(f, schemaFoo, "common-group", "v6")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
crdWaldo, err := setupCRD(f, schemaWaldo, "common-group", "v6")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if crdFoo.Crd.Spec.Group != crdWaldo.Crd.Spec.Group {
|
||||
e2elog.Failf("unexpected: CRDs should be of the same group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
|
||||
framework.Failf("unexpected: CRDs should be of the same group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdWaldo, "v6"), schemaWaldo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v6"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := cleanupCRD(f, crdFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := cleanupCRD(f, crdWaldo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -379,13 +378,13 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
ginkgo.By("set up a multi version CRD")
|
||||
crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v3"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("rename a version")
|
||||
@ -395,27 +394,27 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
]`)
|
||||
crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(crdMultiVer.Crd.Name, types.JSONPatchType, patch)
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("check the new version name is served")
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v4"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
ginkgo.By("check the old version name is removed")
|
||||
if err := waitForDefinitionCleanup(f.ClientSet, definitionName(crdMultiVer, "v3")); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
ginkgo.By("check the other version is not changed")
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
// TestCrd.Versions is different from TestCrd.Crd.Versions, we have to manually
|
||||
// update the name there. Used by cleanupCRD
|
||||
crdMultiVer.Crd.Spec.Versions[1].Name = "v4"
|
||||
if err := cleanupCRD(f, crdMultiVer); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -430,38 +429,38 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
|
||||
ginkgo.By("set up a multi version CRD")
|
||||
crd, err := setupCRD(f, schemaFoo, "multi-to-single-ver", "v5", "v6alpha1")
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
// just double check. setupCRD() checked this for us already
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crd, "v6alpha1"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crd, "v5"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("mark a version not serverd")
|
||||
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(crd.Crd.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
crd.Crd.Spec.Versions[1].Served = false
|
||||
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(crd.Crd)
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("check the unserved version gets removed")
|
||||
if err := waitForDefinitionCleanup(f.ClientSet, definitionName(crd, "v6alpha1")); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
ginkgo.By("check the other version is not changed")
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crd, "v5"), schemaFoo); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
||||
if err := cleanupCRD(f, crd); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -55,24 +54,24 @@ var _ = SIGDescribe("CustomResourceDefinition Watch [Privileged:ClusterAdmin]",
|
||||
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load config: %v", err)
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
apiExtensionClient, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
noxuDefinition := fixtures.NewNoxuV1CustomResourceDefinition(apiextensionsv1.ClusterScoped)
|
||||
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin]", func() {
|
||||
@ -116,7 +115,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
|
||||
}
|
||||
framework.ExpectNotEqual(expected, nil)
|
||||
if !equality.Semantic.DeepEqual(actual.Spec, expected.Spec) {
|
||||
e2elog.Failf("Expected CustomResourceDefinition in list with name %s to match crd created with same name, but got different specs:\n%s",
|
||||
framework.Failf("Expected CustomResourceDefinition in list with name %s to match crd created with same name, but got different specs:\n%s",
|
||||
actual.Name, diff.ObjectReflectDiff(expected.Spec, actual.Spec))
|
||||
}
|
||||
}
|
||||
@ -161,7 +160,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
|
||||
framework.ExpectNoError(err, "getting CustomResourceDefinition status")
|
||||
status := unstructuredToCRD(u)
|
||||
if !equality.Semantic.DeepEqual(status.Spec, crd.Spec) {
|
||||
e2elog.Failf("Expected CustomResourceDefinition Spec to match status sub-resource Spec, but got:\n%s", diff.ObjectReflectDiff(status.Spec, crd.Spec))
|
||||
framework.Failf("Expected CustomResourceDefinition Spec to match status sub-resource Spec, but got:\n%s", diff.ObjectReflectDiff(status.Spec, crd.Spec))
|
||||
}
|
||||
status.Status.Conditions = append(status.Status.Conditions, updateCondition)
|
||||
updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(status)
|
||||
@ -267,5 +266,5 @@ func expectCondition(conditions []v1.CustomResourceDefinitionCondition, expected
|
||||
return
|
||||
}
|
||||
}
|
||||
e2elog.Failf("Condition %#v not found in conditions %#v", expected, conditions)
|
||||
framework.Failf("Condition %#v not found in conditions %#v", expected, conditions)
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apiserver/pkg/endpoints/discovery"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -50,7 +49,7 @@ var _ = SIGDescribe("Discovery", func() {
|
||||
spec := testcrd.Crd.Spec
|
||||
resources, err := testcrd.APIExtensionClient.Discovery().ServerResourcesForGroupVersion(spec.Group + "/" + spec.Versions[0].Name)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to find the discovery doc for %v: %v", resources, err)
|
||||
framework.Failf("failed to find the discovery doc for %v: %v", resources, err)
|
||||
}
|
||||
found := false
|
||||
var storageVersion string
|
||||
@ -69,12 +68,12 @@ var _ = SIGDescribe("Discovery", func() {
|
||||
if r.Name == spec.Names.Plural {
|
||||
found = true
|
||||
if r.StorageVersionHash != expected {
|
||||
e2elog.Failf("expected storageVersionHash of %s/%s/%s to be %s, got %s", r.Group, r.Version, r.Name, expected, r.StorageVersionHash)
|
||||
framework.Failf("expected storageVersionHash of %s/%s/%s to be %s, got %s", r.Group, r.Version, r.Name, expected, r.StorageVersionHash)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
e2elog.Failf("didn't find resource %s in the discovery doc", spec.Names.Plural)
|
||||
framework.Failf("didn't find resource %s in the discovery doc", spec.Names.Plural)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/apps"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -102,7 +101,7 @@ func masterExec(cmd string) {
|
||||
framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||
if result.Code != 0 {
|
||||
e2essh.LogResult(result)
|
||||
e2elog.Failf("master exec command returned non-zero")
|
||||
framework.Failf("master exec command returned non-zero")
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,7 +115,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
e2elog.Logf("apiserver returned error, as expected before recovery: %v", err)
|
||||
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if len(pods.Items) == 0 {
|
||||
@ -126,7 +125,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
}
|
||||
e2elog.Logf("apiserver has recovered")
|
||||
framework.Logf("apiserver has recovered")
|
||||
return true, nil
|
||||
}))
|
||||
|
||||
|
@ -38,7 +38,6 @@ import (
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -244,14 +243,14 @@ func gatherMetrics(f *framework.Framework) {
|
||||
var summary framework.TestDataSummary
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, false, false, true, false, false)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
|
||||
framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
|
||||
} else {
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
e2elog.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
||||
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
||||
} else {
|
||||
summary = (*e2emetrics.ComponentCollection)(&received)
|
||||
e2elog.Logf(summary.PrintHumanReadable())
|
||||
framework.Logf(summary.PrintHumanReadable())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -317,7 +316,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create replication controller: %v", err)
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
// wait for rc to create some pods
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
@ -335,13 +334,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
return false, nil
|
||||
|
||||
}); err != nil {
|
||||
e2elog.Failf("failed to wait for the rc to create some pods: %v", err)
|
||||
framework.Failf("failed to wait for the rc to create some pods: %v", err)
|
||||
}
|
||||
ginkgo.By("delete the rc")
|
||||
deleteOptions := getBackgroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
e2elog.Failf("failed to delete the rc: %v", err)
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
ginkgo.By("wait for all pods to be garbage collected")
|
||||
// wait for the RCs and Pods to reach the expected numbers.
|
||||
@ -349,12 +348,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
|
||||
return verifyRemainingObjects(f, objects)
|
||||
}); err != nil {
|
||||
e2elog.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
framework.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
remainingPods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to list pods post mortem: %v", err)
|
||||
framework.Failf("failed to list pods post mortem: %v", err)
|
||||
} else {
|
||||
e2elog.Failf("remaining pods are: %#v", remainingPods)
|
||||
framework.Failf("remaining pods are: %#v", remainingPods)
|
||||
}
|
||||
}
|
||||
gatherMetrics(f)
|
||||
@ -375,7 +374,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create replication controller: %v", err)
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
// wait for rc to create pods
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
@ -389,13 +388,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
return false, nil
|
||||
|
||||
}); err != nil {
|
||||
e2elog.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
ginkgo.By("delete the rc")
|
||||
deleteOptions := getOrphanOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
e2elog.Failf("failed to delete the rc: %v", err)
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
ginkgo.By("wait for the rc to be deleted")
|
||||
// Orphaning the 100 pods takes 100 PATCH operations. The default qps of
|
||||
@ -415,16 +414,16 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
time.Sleep(30 * time.Second)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to list pods: %v", err)
|
||||
framework.Failf("Failed to list pods: %v", err)
|
||||
}
|
||||
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
|
||||
e2elog.Failf("expect %d pods, got %d pods", e, a)
|
||||
framework.Failf("expect %d pods, got %d pods", e, a)
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
@ -441,7 +440,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create replication controller: %v", err)
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
// wait for rc to create some pods
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
@ -454,22 +453,22 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
e2elog.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
ginkgo.By("delete the rc")
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
e2elog.Failf("failed to delete the rc: %v", err)
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
time.Sleep(30 * time.Second)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to list pods: %v", err)
|
||||
framework.Failf("Failed to list pods: %v", err)
|
||||
}
|
||||
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
|
||||
e2elog.Failf("expect %d pods, got %d pods", e, a)
|
||||
framework.Failf("expect %d pods, got %d pods", e, a)
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
@ -489,7 +488,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.By("create the deployment")
|
||||
createdDeployment, err := deployClient.Create(deployment)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create deployment: %v", err)
|
||||
framework.Failf("Failed to create deployment: %v", err)
|
||||
}
|
||||
// wait for deployment to create some rs
|
||||
ginkgo.By("Wait for the Deployment to create new ReplicaSet")
|
||||
@ -502,14 +501,14 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
|
||||
})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
|
||||
framework.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("delete the deployment")
|
||||
deleteOptions := getBackgroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
e2elog.Failf("failed to delete the deployment: %v", err)
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
}
|
||||
ginkgo.By("wait for all rs to be garbage collected")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
@ -526,7 +525,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
errList = append(errList, fmt.Errorf("remaining rs are: %#v", remainingRSs))
|
||||
}
|
||||
aggregatedError := utilerrors.NewAggregate(errList)
|
||||
e2elog.Failf("Failed to wait for all rs to be garbage collected: %v", aggregatedError)
|
||||
framework.Failf("Failed to wait for all rs to be garbage collected: %v", aggregatedError)
|
||||
|
||||
}
|
||||
|
||||
@ -548,7 +547,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.By("create the deployment")
|
||||
createdDeployment, err := deployClient.Create(deployment)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create deployment: %v", err)
|
||||
framework.Failf("Failed to create deployment: %v", err)
|
||||
}
|
||||
// wait for deployment to create some rs
|
||||
ginkgo.By("Wait for the Deployment to create new ReplicaSet")
|
||||
@ -561,21 +560,21 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
|
||||
})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
|
||||
framework.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("delete the deployment")
|
||||
deleteOptions := getOrphanOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
e2elog.Failf("failed to delete the deployment: %v", err)
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
}
|
||||
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
|
||||
time.Sleep(30 * time.Second)
|
||||
objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2}
|
||||
ok, err := verifyRemainingObjects(f, objects)
|
||||
if err != nil {
|
||||
e2elog.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
|
||||
framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
errList := make([]error, 0)
|
||||
@ -592,15 +591,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
errList = append(errList, fmt.Errorf("remaining deployment's post mortem: %#v", remainingDSs))
|
||||
}
|
||||
aggregatedError := utilerrors.NewAggregate(errList)
|
||||
e2elog.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError)
|
||||
framework.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError)
|
||||
}
|
||||
rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to list ReplicaSet %v", err)
|
||||
framework.Failf("Failed to list ReplicaSet %v", err)
|
||||
}
|
||||
for _, replicaSet := range rs.Items {
|
||||
if metav1.GetControllerOf(&replicaSet.ObjectMeta) != nil {
|
||||
e2elog.Failf("Found ReplicaSet with non nil ownerRef %v", replicaSet)
|
||||
framework.Failf("Found ReplicaSet with non nil ownerRef %v", replicaSet)
|
||||
}
|
||||
}
|
||||
|
||||
@ -622,7 +621,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create replication controller: %v", err)
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
// wait for rc to create pods
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
@ -635,13 +634,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
e2elog.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
ginkgo.By("delete the rc")
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
e2elog.Failf("failed to delete the rc: %v", err)
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
ginkgo.By("wait for the rc to be deleted")
|
||||
// default client QPS is 20, deleting each pod requires 2 requests, so 30s should be enough
|
||||
@ -653,15 +652,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
pods, _ := podClient.List(metav1.ListOptions{})
|
||||
e2elog.Logf("%d pods remaining", len(pods.Items))
|
||||
framework.Logf("%d pods remaining", len(pods.Items))
|
||||
count := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp == nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
e2elog.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
e2elog.Logf("")
|
||||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
return false, nil
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
@ -671,22 +670,22 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}); err != nil {
|
||||
pods, err2 := podClient.List(metav1.ListOptions{})
|
||||
if err2 != nil {
|
||||
e2elog.Failf("%v", err2)
|
||||
framework.Failf("%v", err2)
|
||||
}
|
||||
e2elog.Logf("%d remaining pods are:", len(pods.Items))
|
||||
e2elog.Logf("The ObjectMeta of the remaining pods are:")
|
||||
framework.Logf("%d remaining pods are:", len(pods.Items))
|
||||
framework.Logf("The ObjectMeta of the remaining pods are:")
|
||||
for _, pod := range pods.Items {
|
||||
e2elog.Logf("%#v", pod.ObjectMeta)
|
||||
framework.Logf("%#v", pod.ObjectMeta)
|
||||
}
|
||||
e2elog.Failf("failed to delete the rc: %v", err)
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
// There shouldn't be any pods
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if len(pods.Items) != 0 {
|
||||
e2elog.Failf("expected no pods, got %#v", pods)
|
||||
framework.Failf("expected no pods, got %#v", pods)
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
@ -709,7 +708,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.By("create the rc1")
|
||||
rc1, err := rcClient.Create(rc1)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create replication controller: %v", err)
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
rc2Name := "simpletest-rc-to-stay"
|
||||
uniqLabelsStay := getUniqLabel("gctest_s", "valid_and_pending_owners_s")
|
||||
@ -717,7 +716,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.By("create the rc2")
|
||||
rc2, err = rcClient.Create(rc2)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create replication controller: %v", err)
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
// wait for rc1 to be stable
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
@ -730,7 +729,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
e2elog.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
@ -746,7 +745,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc1.UID))
|
||||
if err := rcClient.Delete(rc1.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
e2elog.Failf("failed to delete the rc: %v", err)
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
ginkgo.By("wait for the rc to be deleted")
|
||||
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
|
||||
@ -755,15 +754,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := rcClient.Get(rc1.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
pods, _ := podClient.List(metav1.ListOptions{})
|
||||
e2elog.Logf("%d pods remaining", len(pods.Items))
|
||||
framework.Logf("%d pods remaining", len(pods.Items))
|
||||
count := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp == nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
e2elog.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
e2elog.Logf("")
|
||||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
return false, nil
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
@ -773,30 +772,30 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}); err != nil {
|
||||
pods, err2 := podClient.List(metav1.ListOptions{})
|
||||
if err2 != nil {
|
||||
e2elog.Failf("%v", err2)
|
||||
framework.Failf("%v", err2)
|
||||
}
|
||||
e2elog.Logf("%d remaining pods are:", len(pods.Items))
|
||||
e2elog.Logf("ObjectMeta of remaining pods are:")
|
||||
framework.Logf("%d remaining pods are:", len(pods.Items))
|
||||
framework.Logf("ObjectMeta of remaining pods are:")
|
||||
for _, pod := range pods.Items {
|
||||
e2elog.Logf("%#v", pod.ObjectMeta)
|
||||
framework.Logf("%#v", pod.ObjectMeta)
|
||||
}
|
||||
e2elog.Failf("failed to delete rc %s, err: %v", rc1Name, err)
|
||||
framework.Failf("failed to delete rc %s, err: %v", rc1Name, err)
|
||||
}
|
||||
// half of the pods should still exist,
|
||||
pods, err = podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("%v", err)
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
if len(pods.Items) != halfReplicas {
|
||||
e2elog.Failf("expected %d pods, got %d", halfReplicas, len(pods.Items))
|
||||
framework.Failf("expected %d pods, got %d", halfReplicas, len(pods.Items))
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp != nil {
|
||||
e2elog.Failf("expected pod DeletionTimestamp to be nil, got %#v", pod.ObjectMeta)
|
||||
framework.Failf("expected pod DeletionTimestamp to be nil, got %#v", pod.ObjectMeta)
|
||||
}
|
||||
// they should only have 1 ownerReference left
|
||||
if len(pod.ObjectMeta.OwnerReferences) != 1 {
|
||||
e2elog.Failf("expected pod to only have 1 owner, got %#v", pod.ObjectMeta.OwnerReferences)
|
||||
framework.Failf("expected pod to only have 1 owner, got %#v", pod.ObjectMeta.OwnerReferences)
|
||||
}
|
||||
}
|
||||
gatherMetrics(f)
|
||||
@ -830,15 +829,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
patch1 := addRefPatch(pod3.Name, pod3.UID)
|
||||
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||
e2elog.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
patch2 := addRefPatch(pod1.Name, pod1.UID)
|
||||
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||
e2elog.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
patch3 := addRefPatch(pod2.Name, pod2.UID)
|
||||
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||
e2elog.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
// delete one pod, should result in the deletion of all pods
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
|
||||
@ -858,20 +857,20 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
e2elog.Logf("pods are %#v", pods.Items)
|
||||
e2elog.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
framework.Logf("pods are %#v", pods.Items)
|
||||
framework.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should support cascading deletion of custom resources", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load config: %v", err)
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
apiExtensionClient, err := apiextensionsclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
// Create a random custom resource definition and ensure it's available for
|
||||
@ -880,12 +879,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
defer func() {
|
||||
err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
e2elog.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
}()
|
||||
definition, err = apiextensionstestserver.CreateNewV1CustomResourceDefinition(definition, apiExtensionClient, f.DynamicClient)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
framework.ExpectEqual(len(definition.Spec.Versions), 1, "custom resource definition should have one version")
|
||||
version := definition.Spec.Versions[0]
|
||||
@ -909,9 +908,9 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
e2elog.Logf("created owner resource %q", ownerName)
|
||||
framework.Logf("created owner resource %q", ownerName)
|
||||
|
||||
// Create a custom dependent resource.
|
||||
dependentName := names.SimpleNameGenerator.GenerateName("dependent")
|
||||
@ -934,15 +933,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
persistedDependent, err := resourceClient.Create(dependent, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
}
|
||||
e2elog.Logf("created dependent resource %q", dependentName)
|
||||
framework.Logf("created dependent resource %q", dependentName)
|
||||
|
||||
// Delete the owner.
|
||||
background := metav1.DeletePropagationBackground
|
||||
err = resourceClient.Delete(ownerName, &metav1.DeleteOptions{PropagationPolicy: &background})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete owner resource %q: %v", ownerName, err)
|
||||
framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
|
||||
// Ensure the dependent is deleted.
|
||||
@ -950,18 +949,18 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
e2elog.Logf("owner: %#v", persistedOwner)
|
||||
e2elog.Logf("dependent: %#v", persistedDependent)
|
||||
e2elog.Failf("failed waiting for dependent resource %q to be deleted", dependentName)
|
||||
framework.Logf("owner: %#v", persistedOwner)
|
||||
framework.Logf("dependent: %#v", persistedDependent)
|
||||
framework.Failf("failed waiting for dependent resource %q to be deleted", dependentName)
|
||||
}
|
||||
|
||||
// Ensure the owner is deleted.
|
||||
_, err = resourceClient.Get(ownerName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
e2elog.Failf("expected owner resource %q to be deleted", ownerName)
|
||||
framework.Failf("expected owner resource %q to be deleted", ownerName)
|
||||
} else {
|
||||
if !errors.IsNotFound(err) {
|
||||
e2elog.Failf("unexpected error getting owner resource %q: %v", ownerName, err)
|
||||
framework.Failf("unexpected error getting owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -969,12 +968,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.It("should support orphan deletion of custom resources", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load config: %v", err)
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
apiExtensionClient, err := apiextensionsclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
// Create a random custom resource definition and ensure it's available for
|
||||
@ -983,12 +982,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
defer func() {
|
||||
err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
e2elog.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
}()
|
||||
definition, err = apiextensionstestserver.CreateNewV1CustomResourceDefinition(definition, apiExtensionClient, f.DynamicClient)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
framework.ExpectEqual(len(definition.Spec.Versions), 1, "custom resource definition should have one version")
|
||||
version := definition.Spec.Versions[0]
|
||||
@ -1012,9 +1011,9 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
e2elog.Logf("created owner resource %q", ownerName)
|
||||
framework.Logf("created owner resource %q", ownerName)
|
||||
|
||||
// Create a custom dependent resource.
|
||||
dependentName := names.SimpleNameGenerator.GenerateName("dependent")
|
||||
@ -1037,14 +1036,14 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
_, err = resourceClient.Create(dependent, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
}
|
||||
e2elog.Logf("created dependent resource %q", dependentName)
|
||||
framework.Logf("created dependent resource %q", dependentName)
|
||||
|
||||
// Delete the owner and orphan the dependent.
|
||||
err = resourceClient.Delete(ownerName, getOrphanOptions())
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete owner resource %q: %v", ownerName, err)
|
||||
framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
|
||||
ginkgo.By("wait for the owner to be deleted")
|
||||
@ -1058,7 +1057,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
e2elog.Failf("timeout in waiting for the owner to be deleted: %v", err)
|
||||
framework.Failf("timeout in waiting for the owner to be deleted: %v", err)
|
||||
}
|
||||
|
||||
// Wait 30s and ensure the dependent is not deleted.
|
||||
@ -1067,7 +1066,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
|
||||
return false, err
|
||||
}); err != nil && err != wait.ErrWaitTimeout {
|
||||
e2elog.Failf("failed to ensure the dependent is not deleted: %v", err)
|
||||
framework.Failf("failed to ensure the dependent is not deleted: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -1088,12 +1087,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
return len(jobs.Items) > 0, nil
|
||||
})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to wait for the CronJob to create some Jobs: %v", err)
|
||||
framework.Failf("Failed to wait for the CronJob to create some Jobs: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("Delete the cronjob")
|
||||
if err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Delete(cronJob.Name, getBackgroundOptions()); err != nil {
|
||||
e2elog.Failf("Failed to delete the CronJob: %v", err)
|
||||
framework.Failf("Failed to delete the CronJob: %v", err)
|
||||
}
|
||||
ginkgo.By("Verify if cronjob does not leave jobs nor pods behind")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
@ -1101,7 +1100,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
return verifyRemainingObjects(f, objects)
|
||||
})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)
|
||||
framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)
|
||||
}
|
||||
|
||||
gatherMetrics(f)
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -70,10 +69,10 @@ func observeCreation(w watch.Interface) {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
e2elog.Failf("Failed to observe the creation: %v", event)
|
||||
framework.Failf("Failed to observe the creation: %v", event)
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
e2elog.Failf("Timeout while waiting for observing the creation")
|
||||
framework.Failf("Timeout while waiting for observing the creation")
|
||||
}
|
||||
}
|
||||
|
||||
@ -94,7 +93,7 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool)
|
||||
}
|
||||
}
|
||||
if !updated {
|
||||
e2elog.Failf("Failed to observe pod update")
|
||||
framework.Failf("Failed to observe pod update")
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -113,7 +112,7 @@ var _ = SIGDescribe("Generated clientset", func() {
|
||||
options := metav1.ListOptions{LabelSelector: selector}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to query for pods: %v", err)
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
framework.ExpectEqual(len(pods.Items), 0)
|
||||
options = metav1.ListOptions{
|
||||
@ -122,13 +121,13 @@ var _ = SIGDescribe("Generated clientset", func() {
|
||||
}
|
||||
w, err := podClient.Watch(options)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to set up watch: %v", err)
|
||||
framework.Failf("Failed to set up watch: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("creating the pod")
|
||||
pod, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create pod: %v", err)
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("verifying the pod is in kubernetes")
|
||||
@ -138,7 +137,7 @@ var _ = SIGDescribe("Generated clientset", func() {
|
||||
}
|
||||
pods, err = podClient.List(options)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to query for pods: %v", err)
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
framework.ExpectEqual(len(pods.Items), 1)
|
||||
|
||||
@ -152,7 +151,7 @@ var _ = SIGDescribe("Generated clientset", func() {
|
||||
ginkgo.By("deleting the pod gracefully")
|
||||
gracePeriod := int64(31)
|
||||
if err := podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil {
|
||||
e2elog.Failf("Failed to delete pod: %v", err)
|
||||
framework.Failf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("verifying the deletionTimestamp and deletionGracePeriodSeconds of the pod is set")
|
||||
@ -229,7 +228,7 @@ var _ = SIGDescribe("Generated clientset", func() {
|
||||
options := metav1.ListOptions{LabelSelector: selector}
|
||||
cronJobs, err := cronJobClient.List(options)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to query for cronJobs: %v", err)
|
||||
framework.Failf("Failed to query for cronJobs: %v", err)
|
||||
}
|
||||
framework.ExpectEqual(len(cronJobs.Items), 0)
|
||||
options = metav1.ListOptions{
|
||||
@ -238,13 +237,13 @@ var _ = SIGDescribe("Generated clientset", func() {
|
||||
}
|
||||
w, err := cronJobClient.Watch(options)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to set up watch: %v", err)
|
||||
framework.Failf("Failed to set up watch: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("creating the cronJob")
|
||||
cronJob, err = cronJobClient.Create(cronJob)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to create cronJob: %v", err)
|
||||
framework.Failf("Failed to create cronJob: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("verifying the cronJob is in kubernetes")
|
||||
@ -254,7 +253,7 @@ var _ = SIGDescribe("Generated clientset", func() {
|
||||
}
|
||||
cronJobs, err = cronJobClient.List(options)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to query for cronJobs: %v", err)
|
||||
framework.Failf("Failed to query for cronJobs: %v", err)
|
||||
}
|
||||
framework.ExpectEqual(len(cronJobs.Items), 1)
|
||||
|
||||
@ -265,13 +264,13 @@ var _ = SIGDescribe("Generated clientset", func() {
|
||||
// Use DeletePropagationBackground so the CronJob is really gone when the call returns.
|
||||
propagationPolicy := metav1.DeletePropagationBackground
|
||||
if err := cronJobClient.Delete(cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
|
||||
e2elog.Failf("Failed to delete cronJob: %v", err)
|
||||
framework.Failf("Failed to delete cronJob: %v", err)
|
||||
}
|
||||
|
||||
options = metav1.ListOptions{LabelSelector: selector}
|
||||
cronJobs, err = cronJobClient.List(options)
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to list cronJobs to verify deletion: %v", err)
|
||||
framework.Failf("Failed to list cronJobs to verify deletion: %v", err)
|
||||
}
|
||||
framework.ExpectEqual(len(cronJobs.Items), 0)
|
||||
})
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -75,7 +74,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
||||
}
|
||||
}
|
||||
if cnt > maxAllowedAfterDel {
|
||||
e2elog.Logf("Remaining namespaces : %v", cnt)
|
||||
framework.Logf("Remaining namespaces : %v", cnt)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -33,7 +33,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -1688,7 +1687,7 @@ func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.R
|
||||
// verify that the quota shows the expected used resource values
|
||||
for k, v := range used {
|
||||
if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
|
||||
e2elog.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
|
||||
framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,6 @@ import (
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -53,7 +52,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
c := f.ClientSet
|
||||
|
||||
podName := "pod-1"
|
||||
e2elog.Logf("Creating pod %s", podName)
|
||||
framework.Logf("Creating pod %s", podName)
|
||||
|
||||
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns)
|
||||
@ -61,7 +60,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
table := &metav1beta1.Table{}
|
||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
framework.ExpectNoError(err, "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||
e2elog.Logf("Table: %#v", table)
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">", 2))
|
||||
framework.ExpectEqual(len(table.Rows), 1)
|
||||
@ -72,7 +71,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
out := printTable(table)
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s"))
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("\npod-1\\s"))
|
||||
e2elog.Logf("Table:\n%s", out)
|
||||
framework.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
ginkgo.It("should return chunks of table results for list calls", func() {
|
||||
@ -98,9 +97,9 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
e2elog.Logf("Got an error creating template %d: %v", i, err)
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
e2elog.Fail("Unable to create template %d, exiting", i)
|
||||
framework.Failf("Unable to create template %d, exiting", i)
|
||||
})
|
||||
|
||||
pagedTable := &metav1beta1.Table{}
|
||||
@ -131,7 +130,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
table := &metav1beta1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
framework.ExpectNoError(err, "failed to get nodes in Table form across all namespaces")
|
||||
e2elog.Logf("Table: %#v", table)
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">=", 2))
|
||||
gomega.Expect(len(table.Rows)).To(gomega.BeNumerically(">=", 1))
|
||||
@ -142,7 +141,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
|
||||
out := printTable(table)
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s"))
|
||||
e2elog.Logf("Table:\n%s", out)
|
||||
framework.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -216,7 +215,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
expectEvent(testWatchBroken, watch.Added, testConfigMap)
|
||||
lastEvent, ok := waitForEvent(testWatchBroken, watch.Modified, nil, 1*time.Minute)
|
||||
if !ok {
|
||||
e2elog.Failf("Timed out waiting for second watch notification")
|
||||
framework.Failf("Timed out waiting for second watch notification")
|
||||
}
|
||||
testWatchBroken.Stop()
|
||||
|
||||
@ -229,7 +228,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
ginkgo.By("creating a new watch on configmaps from the last resource version observed by the first watch")
|
||||
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
|
||||
if !ok {
|
||||
e2elog.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent)
|
||||
framework.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent)
|
||||
}
|
||||
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
|
||||
framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
|
||||
@ -352,7 +351,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
for _, wc := range wcs[1:] {
|
||||
e := waitForNextConfigMapEvent(wc)
|
||||
if resourceVersion != e.ResourceVersion {
|
||||
e2elog.Failf("resource version mismatch, expected %s but got %s", resourceVersion, e.ResourceVersion)
|
||||
framework.Failf("resource version mismatch, expected %s but got %s", resourceVersion, e.ResourceVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -396,13 +395,13 @@ func setConfigMapData(cm *v1.ConfigMap, key, value string) {
|
||||
|
||||
func expectEvent(w watch.Interface, eventType watch.EventType, object runtime.Object) {
|
||||
if event, ok := waitForEvent(w, eventType, object, 1*time.Minute); !ok {
|
||||
e2elog.Failf("Timed out waiting for expected watch notification: %v", event)
|
||||
framework.Failf("Timed out waiting for expected watch notification: %v", event)
|
||||
}
|
||||
}
|
||||
|
||||
func expectNoEvent(w watch.Interface, eventType watch.EventType, object runtime.Object) {
|
||||
if event, ok := waitForEvent(w, eventType, object, 10*time.Second); ok {
|
||||
e2elog.Failf("Unexpected watch notification observed: %v", event)
|
||||
framework.Failf("Unexpected watch notification observed: %v", event)
|
||||
}
|
||||
}
|
||||
|
||||
@ -413,9 +412,9 @@ func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject ru
|
||||
select {
|
||||
case actual, ok := <-w.ResultChan():
|
||||
if ok {
|
||||
e2elog.Logf("Got : %v %v", actual.Type, actual.Object)
|
||||
framework.Logf("Got : %v %v", actual.Type, actual.Object)
|
||||
} else {
|
||||
e2elog.Failf("Watch closed unexpectedly")
|
||||
framework.Failf("Watch closed unexpectedly")
|
||||
}
|
||||
if expectType == actual.Type && (expectObject == nil || apiequality.Semantic.DeepEqual(expectObject, actual.Object)) {
|
||||
return actual, true
|
||||
@ -436,9 +435,9 @@ func waitForNextConfigMapEvent(watch watch.Interface) *v1.ConfigMap {
|
||||
if configMap, ok := event.Object.(*v1.ConfigMap); ok {
|
||||
return configMap
|
||||
}
|
||||
e2elog.Failf("expected config map")
|
||||
framework.Failf("expected config map")
|
||||
case <-time.After(10 * time.Second):
|
||||
e2elog.Failf("timed out waiting for watch event")
|
||||
framework.Failf("timed out waiting for watch event")
|
||||
}
|
||||
return nil // should never happen
|
||||
}
|
||||
@ -486,7 +485,7 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa
|
||||
framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
|
||||
existing = append(existing[:idx], existing[idx+1:]...)
|
||||
default:
|
||||
e2elog.Failf("Unsupported event operation: %d", op)
|
||||
framework.Failf("Unsupported event operation: %d", op)
|
||||
}
|
||||
select {
|
||||
case <-stopc:
|
||||
|
@ -42,7 +42,6 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -742,7 +741,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
|
||||
},
|
||||
})
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
e2elog.Logf("role binding %s already exists", roleBindingName)
|
||||
framework.Logf("role binding %s already exists", roleBindingName)
|
||||
} else {
|
||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||
}
|
||||
@ -1004,7 +1003,7 @@ func testMutatingConfigMapWebhook(f *framework.Framework) {
|
||||
"mutation-stage-2": "yes",
|
||||
}
|
||||
if !reflect.DeepEqual(expectedConfigMapData, mutatedConfigMap.Data) {
|
||||
e2elog.Failf("\nexpected %#v\n, got %#v\n", expectedConfigMapData, mutatedConfigMap.Data)
|
||||
framework.Failf("\nexpected %#v\n, got %#v\n", expectedConfigMapData, mutatedConfigMap.Data)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1065,13 +1064,13 @@ func testMutatingPodWebhook(f *framework.Framework) {
|
||||
mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
if len(mutatedPod.Spec.InitContainers) != 1 {
|
||||
e2elog.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers)
|
||||
framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers)
|
||||
}
|
||||
if got, expected := mutatedPod.Spec.InitContainers[0].Name, "webhook-added-init-container"; got != expected {
|
||||
e2elog.Failf("expect the init container name to be %q, got %q", expected, got)
|
||||
framework.Failf("expect the init container name to be %q, got %q", expected, got)
|
||||
}
|
||||
if got, expected := mutatedPod.Spec.InitContainers[0].TerminationMessagePolicy, v1.TerminationMessageReadFile; got != expected {
|
||||
e2elog.Failf("expect the init terminationMessagePolicy to be default to %q, got %q", expected, got)
|
||||
framework.Failf("expect the init terminationMessagePolicy to be default to %q, got %q", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1100,11 +1099,11 @@ func testWebhook(f *framework.Framework) {
|
||||
framework.ExpectError(err, "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name)
|
||||
expectedErrMsg1 := "the pod contains unwanted container name"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg1) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||
}
|
||||
expectedErrMsg2 := "the pod contains unwanted label"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg2) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg2, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg2, err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("create a pod that causes the webhook to hang")
|
||||
@ -1115,15 +1114,15 @@ func testWebhook(f *framework.Framework) {
|
||||
framework.ExpectError(err, "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name)
|
||||
// ensure the error is webhook-related, not client-side
|
||||
if !strings.Contains(err.Error(), "webhook") {
|
||||
e2elog.Failf("expect error %q, got %q", "webhook", err.Error())
|
||||
framework.Failf("expect error %q, got %q", "webhook", err.Error())
|
||||
}
|
||||
// ensure the error is a timeout
|
||||
if !strings.Contains(err.Error(), "deadline") {
|
||||
e2elog.Failf("expect error %q, got %q", "deadline", err.Error())
|
||||
framework.Failf("expect error %q, got %q", "deadline", err.Error())
|
||||
}
|
||||
// ensure the pod was not actually created
|
||||
if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}); !errors.IsNotFound(err) {
|
||||
e2elog.Failf("expect notfound error looking for rejected pod, got %v", err)
|
||||
framework.Failf("expect notfound error looking for rejected pod, got %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("create a configmap that should be denied by the webhook")
|
||||
@ -1133,7 +1132,7 @@ func testWebhook(f *framework.Framework) {
|
||||
framework.ExpectError(err, "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name)
|
||||
expectedErrMsg := "the configmap contains unwanted key and value"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("create a configmap that should be admitted by the webhook")
|
||||
@ -1159,7 +1158,7 @@ func testWebhook(f *framework.Framework) {
|
||||
_, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn)
|
||||
framework.ExpectError(err, "update (PUT) admitted configmap %s in namespace %s to a non-compliant one should be rejected by webhook", allowedConfigMapName, f.Namespace.Name)
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
@ -1167,7 +1166,7 @@ func testWebhook(f *framework.Framework) {
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
|
||||
framework.ExpectError(err, "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch)
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("create a namespace that bypass the webhook")
|
||||
@ -1200,7 +1199,7 @@ func testBlockingConfigmapDeletion(f *framework.Framework) {
|
||||
framework.ExpectError(err, "deleting configmap %s in namespace: %s should be denied", configmap.Name, f.Namespace.Name)
|
||||
expectedErrMsg1 := "the configmap cannot be deleted because it contains unwanted key and value"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg1) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("remove the offending key and value from the configmap data")
|
||||
@ -1233,7 +1232,7 @@ func testAttachingPodWebhook(f *framework.Framework) {
|
||||
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
|
||||
framework.ExpectError(err, "'kubectl attach' the pod, should be denied by the webhook")
|
||||
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
|
||||
e2elog.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
|
||||
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1329,7 +1328,7 @@ func testFailClosedWebhook(f *framework.Framework) {
|
||||
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
|
||||
framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
|
||||
if !errors.IsInternalError(err) {
|
||||
e2elog.Failf("expect an internal error, got %#v", err)
|
||||
framework.Failf("expect an internal error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1510,7 +1509,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
|
||||
if mutatedValidatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedValidatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue {
|
||||
e2elog.Failf("expected %s not to be mutated by mutating webhooks but it was", configName)
|
||||
framework.Failf("expected %s not to be mutated by mutating webhooks but it was", configName)
|
||||
}
|
||||
|
||||
err = waitWebhookConfigurationReady(f)
|
||||
@ -1566,7 +1565,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
|
||||
if mutatedMutatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedMutatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue {
|
||||
e2elog.Failf("expected %s not to be mutated by mutating webhooks but it was", configName)
|
||||
framework.Failf("expected %s not to be mutated by mutating webhooks but it was", configName)
|
||||
}
|
||||
|
||||
err = waitWebhookConfigurationReady(f)
|
||||
@ -1888,7 +1887,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.Cust
|
||||
framework.ExpectError(err, "create custom resource %s in namespace %s should be denied by webhook", crInstanceName, f.Namespace.Name)
|
||||
expectedErrMsg := "the custom resource contains unwanted data"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@ -1924,7 +1923,7 @@ func testBlockingCustomResourceUpdateDeletion(f *framework.Framework, crd *apiex
|
||||
|
||||
expectedErrMsg := "the custom resource contains unwanted data"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("Deleting the custom resource should be denied")
|
||||
@ -1932,7 +1931,7 @@ func testBlockingCustomResourceUpdateDeletion(f *framework.Framework, crd *apiex
|
||||
framework.ExpectError(err, "deleting custom resource %s in namespace: %s should be denied", crInstanceName, f.Namespace.Name)
|
||||
expectedErrMsg1 := "the custom resource cannot be deleted because it contains unwanted key and value"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg1) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||
}
|
||||
|
||||
ginkgo.By("Remove the offending key and value from the custom resource data")
|
||||
@ -1978,7 +1977,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
|
||||
expectedCRData["mutation-stage-2"] = "yes"
|
||||
}
|
||||
if !reflect.DeepEqual(expectedCRData, mutatedCR.Object["data"]) {
|
||||
e2elog.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
|
||||
framework.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
|
||||
}
|
||||
}
|
||||
|
||||
@ -2038,10 +2037,10 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.
|
||||
"mutation-stage-2": "yes",
|
||||
}
|
||||
if !reflect.DeepEqual(expectedCRData, mutatedCR.Object["data"]) {
|
||||
e2elog.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
|
||||
framework.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
|
||||
}
|
||||
if !reflect.DeepEqual("test", mutatedCR.Object["dummy"]) {
|
||||
e2elog.Failf("\nexpected %#v\n, got %#v\n", "test", mutatedCR.Object["dummy"])
|
||||
framework.Failf("\nexpected %#v\n, got %#v\n", "test", mutatedCR.Object["dummy"])
|
||||
}
|
||||
}
|
||||
|
||||
@ -2122,12 +2121,12 @@ func testCRDDenyWebhook(f *framework.Framework) {
|
||||
// Creating a custom resource definition for use by assorted tests.
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load config: %v", err)
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
return
|
||||
}
|
||||
apiExtensionClient, err := crdclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
return
|
||||
}
|
||||
crd := &apiextensionsv1.CustomResourceDefinition{
|
||||
@ -2158,7 +2157,7 @@ func testCRDDenyWebhook(f *framework.Framework) {
|
||||
framework.ExpectError(err, "create custom resource definition %s should be denied by webhook", crd.Name)
|
||||
expectedErrMsg := "the crd contains unwanted label"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@ -2241,7 +2240,7 @@ func testSlowWebhookTimeoutFailEarly(f *framework.Framework) {
|
||||
isTimeoutError := strings.Contains(err.Error(), `context deadline exceeded`) || strings.Contains(err.Error(), `timeout`)
|
||||
isErrorQueryingWebhook := strings.Contains(err.Error(), `/always-allow-delay-5s?timeout=1s`)
|
||||
if !isTimeoutError || !isErrorQueryingWebhook {
|
||||
e2elog.Failf("expect an HTTP/dial timeout error querying the slow webhook, got: %q", err.Error())
|
||||
framework.Failf("expect an HTTP/dial timeout error querying the slow webhook, got: %q", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@ -2307,7 +2306,7 @@ func createValidatingWebhookConfiguration(f *framework.Framework, config *admiss
|
||||
if webhook.ObjectSelector != nil && webhook.ObjectSelector.MatchLabels[f.UniqueName] == "true" {
|
||||
continue
|
||||
}
|
||||
e2elog.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName)
|
||||
framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName)
|
||||
}
|
||||
return f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(config)
|
||||
}
|
||||
@ -2322,7 +2321,7 @@ func createMutatingWebhookConfiguration(f *framework.Framework, config *admissio
|
||||
if webhook.ObjectSelector != nil && webhook.ObjectSelector.MatchLabels[f.UniqueName] == "true" {
|
||||
continue
|
||||
}
|
||||
e2elog.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName)
|
||||
framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName)
|
||||
}
|
||||
return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(config)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user