[e2e] move Failf from e2e/framework to e2e/framework/log

This commit is contained in:
SataQiu 2019-06-12 18:59:02 +08:00
parent 56b40066d5
commit f90e228e79
13 changed files with 231 additions and 210 deletions

View File

@ -80,11 +80,11 @@ var _ = SIGDescribe("Aggregator", func() {
if aggrclient == nil {
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("could not load config: %v", err)
e2elog.Failf("could not load config: %v", err)
}
aggrclient, err = aggregatorclient.NewForConfig(config)
if err != nil {
framework.Failf("could not create aggregator client: %v", err)
e2elog.Failf("could not create aggregator client: %v", err)
}
}
})
@ -401,7 +401,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
var statusCode int
result.StatusCode(&statusCode)
if statusCode != 201 {
framework.Failf("Flunders client creation response was status %d, not 201", statusCode)
e2elog.Failf("Flunders client creation response was status %d, not 201", statusCode)
}
pods, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
@ -415,7 +415,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
err = json.Unmarshal(contents, &flundersList)
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.k8s.io/v1alpha1")
if len(flundersList.Items) != 1 {
framework.Failf("failed to get back the correct flunders list %v", flundersList)
e2elog.Failf("failed to get back the correct flunders list %v", flundersList)
}
// kubectl delete flunder test-flunder -v 9
@ -430,7 +430,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
err = json.Unmarshal(contents, &flundersList)
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.k8s.io/v1alpha1")
if len(flundersList.Items) != 0 {
framework.Failf("failed to get back the correct deleted flunders list %v", flundersList)
e2elog.Failf("failed to get back the correct deleted flunders list %v", flundersList)
}
flunderName = generateFlunderName("dynamic-flunder")
@ -442,7 +442,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
gvr := schema.GroupVersionResource{Group: "wardle.k8s.io", Version: "v1alpha1", Resource: "flunders"}
_, ok := groupVersionResources[gvr]
if !ok {
framework.Failf("could not find group version resource for dynamic client and wardle/flunders (discovery error: %v, discovery results: %#v)", discoveryErr, groupVersionResources)
e2elog.Failf("could not find group version resource for dynamic client and wardle/flunders (discovery error: %v, discovery results: %#v)", discoveryErr, groupVersionResources)
}
dynamicClient := f.DynamicClient.Resource(gvr).Namespace(namespace)
@ -468,7 +468,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
unstructuredList, err := dynamicClient.List(metav1.ListOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
if len(unstructuredList.Items) != 1 {
framework.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList)
e2elog.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList)
}
// kubectl delete flunder test-flunder
@ -479,7 +479,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
unstructuredList, err = dynamicClient.List(metav1.ListOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
if len(unstructuredList.Items) != 0 {
framework.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList)
e2elog.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList)
}
cleanTest(client, aggrclient, namespace)
@ -512,7 +512,7 @@ func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodL
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)
}
framework.Failf(msg)
e2elog.Failf(msg)
}
}

View File

@ -23,7 +23,7 @@ import (
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/utils"
)
@ -38,27 +38,27 @@ type certContext struct {
func setupServerCert(namespaceName, serviceName string) *certContext {
certDir, err := ioutil.TempDir("", "test-e2e-server-cert")
if err != nil {
framework.Failf("Failed to create a temp dir for cert generation %v", err)
e2elog.Failf("Failed to create a temp dir for cert generation %v", err)
}
defer os.RemoveAll(certDir)
signingKey, err := utils.NewPrivateKey()
if err != nil {
framework.Failf("Failed to create CA private key %v", err)
e2elog.Failf("Failed to create CA private key %v", err)
}
signingCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "e2e-server-cert-ca"}, signingKey)
if err != nil {
framework.Failf("Failed to create CA cert for apiserver %v", err)
e2elog.Failf("Failed to create CA cert for apiserver %v", err)
}
caCertFile, err := ioutil.TempFile(certDir, "ca.crt")
if err != nil {
framework.Failf("Failed to create a temp file for ca cert generation %v", err)
e2elog.Failf("Failed to create a temp file for ca cert generation %v", err)
}
if err := ioutil.WriteFile(caCertFile.Name(), utils.EncodeCertPEM(signingCert), 0644); err != nil {
framework.Failf("Failed to write CA cert %v", err)
e2elog.Failf("Failed to write CA cert %v", err)
}
key, err := utils.NewPrivateKey()
if err != nil {
framework.Failf("Failed to create private key for %v", err)
e2elog.Failf("Failed to create private key for %v", err)
}
signedCert, err := utils.NewSignedCert(
&cert.Config{
@ -68,25 +68,25 @@ func setupServerCert(namespaceName, serviceName string) *certContext {
key, signingCert, signingKey,
)
if err != nil {
framework.Failf("Failed to create cert%v", err)
e2elog.Failf("Failed to create cert%v", err)
}
certFile, err := ioutil.TempFile(certDir, "server.crt")
if err != nil {
framework.Failf("Failed to create a temp file for cert generation %v", err)
e2elog.Failf("Failed to create a temp file for cert generation %v", err)
}
keyFile, err := ioutil.TempFile(certDir, "server.key")
if err != nil {
framework.Failf("Failed to create a temp file for key generation %v", err)
e2elog.Failf("Failed to create a temp file for key generation %v", err)
}
if err = ioutil.WriteFile(certFile.Name(), utils.EncodeCertPEM(signedCert), 0600); err != nil {
framework.Failf("Failed to write cert file %v", err)
e2elog.Failf("Failed to write cert file %v", err)
}
privateKeyPEM, err := keyutil.MarshalPrivateKeyToPEM(key)
if err != nil {
framework.Failf("Failed to marshal key %v", err)
e2elog.Failf("Failed to marshal key %v", err)
}
if err = ioutil.WriteFile(keyFile.Name(), privateKeyPEM, 0644); err != nil {
framework.Failf("Failed to write key file %v", err)
e2elog.Failf("Failed to write key file %v", err)
}
return &certContext{
cert: utils.EncodeCertPEM(signedCert),

View File

@ -40,6 +40,7 @@ import (
"k8s.io/client-go/rest"
openapiutil "k8s.io/kube-openapi/pkg/util"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/utils/crd"
"sigs.k8s.io/yaml"
)
@ -59,7 +60,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI", func() {
ginkgo.It("works for CRD with validation schema", func() {
crd, err := setupCRD(f, schemaFoo, "foo", "v1")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-foo")
@ -68,66 +69,66 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI", func() {
ginkgo.By("client-side validation (kubectl create and apply) allows request with known and required properties")
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
if _, err := framework.RunKubectlInput(validCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create valid CR %s: %v", validCR, err)
e2elog.Failf("failed to create valid CR %s: %v", validCR, err)
}
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
framework.Failf("failed to delete valid CR: %v", err)
e2elog.Failf("failed to delete valid CR: %v", err)
}
if _, err := framework.RunKubectlInput(validCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply valid CR %s: %v", validCR, err)
e2elog.Failf("failed to apply valid CR %s: %v", validCR, err)
}
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-foo"); err != nil {
framework.Failf("failed to delete valid CR: %v", err)
e2elog.Failf("failed to delete valid CR: %v", err)
}
ginkgo.By("client-side validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema")
unknownCR := fmt.Sprintf(`{%s,"spec":{"foo":true}}`, meta)
if _, err := framework.RunKubectlInput(unknownCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) {
framework.Failf("unexpected no error when creating CR with unknown field: %v", err)
e2elog.Failf("unexpected no error when creating CR with unknown field: %v", err)
}
if _, err := framework.RunKubectlInput(unknownCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `unknown field "foo"`) {
framework.Failf("unexpected no error when applying CR with unknown field: %v", err)
e2elog.Failf("unexpected no error when applying CR with unknown field: %v", err)
}
ginkgo.By("client-side validation (kubectl create and apply) rejects request without required properties")
noRequireCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"age":"10"}]}}`, meta)
if _, err := framework.RunKubectlInput(noRequireCR, ns, "create", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) {
framework.Failf("unexpected no error when creating CR without required field: %v", err)
e2elog.Failf("unexpected no error when creating CR without required field: %v", err)
}
if _, err := framework.RunKubectlInput(noRequireCR, ns, "apply", "-f", "-"); err == nil || !strings.Contains(err.Error(), `missing required field "name"`) {
framework.Failf("unexpected no error when applying CR without required field: %v", err)
e2elog.Failf("unexpected no error when applying CR without required field: %v", err)
}
ginkgo.By("kubectl explain works to explain CR properties")
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*Foo CRD for Testing.*FIELDS:.*apiVersion.*<string>.*APIVersion defines.*spec.*<Object>.*Specification of Foo`); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("kubectl explain works to explain CR properties recursively")
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".metadata", `(?s)DESCRIPTION:.*Standard object's metadata.*FIELDS:.*creationTimestamp.*<string>.*CreationTimestamp is a timestamp`); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".spec", `(?s)DESCRIPTION:.*Specification of Foo.*FIELDS:.*bars.*<\[\]Object>.*List of Bars and their specs`); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural+".spec.bars", `(?s)RESOURCE:.*bars.*<\[\]Object>.*DESCRIPTION:.*List of Bars and their specs.*FIELDS:.*bazs.*<\[\]string>.*List of Bazs.*name.*<string>.*Name of Bar`); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("kubectl explain works to return error when explain is called on property that doesn't exist")
if _, err := framework.RunKubectl("explain", crd.Crd.Spec.Names.Plural+".spec.bars2"); err == nil || !strings.Contains(err.Error(), `field "bars2" does not exist`) {
framework.Failf("unexpected no error when explaining property that doesn't exist: %v", err)
e2elog.Failf("unexpected no error when explaining property that doesn't exist: %v", err)
}
if err := cleanupCRD(f, crd); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
})
ginkgo.It("works for CRD without validation schema", func() {
crd, err := setupCRD(f, nil, "empty", "v1")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
@ -136,25 +137,25 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI", func() {
ginkgo.By("client-side validation (kubectl create and apply) allows request with any unknown properties")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if _, err := framework.RunKubectlInput(randomCR, ns, "create", "-f", "-"); err != nil {
framework.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err)
e2elog.Failf("failed to create random CR %s for CRD without schema: %v", randomCR, err)
}
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err)
e2elog.Failf("failed to delete random CR: %v", err)
}
if _, err := framework.RunKubectlInput(randomCR, ns, "apply", "-f", "-"); err != nil {
framework.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
e2elog.Failf("failed to apply random CR %s for CRD without schema: %v", randomCR, err)
}
if _, err := framework.RunKubectl(ns, "delete", crd.Crd.Spec.Names.Plural, "test-cr"); err != nil {
framework.Failf("failed to delete random CR: %v", err)
e2elog.Failf("failed to delete random CR: %v", err)
}
ginkgo.By("kubectl explain works to explain CR without validation schema")
if err := verifyKubectlExplain(crd.Crd.Spec.Names.Plural, `(?s)DESCRIPTION:.*<empty>`); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := cleanupCRD(f, crd); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
})
@ -162,26 +163,26 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI", func() {
ginkgo.By("CRs in different groups (two CRDs) show up in OpenAPI documentation")
crdFoo, err := setupCRD(f, schemaFoo, "foo", "v1")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
crdWaldo, err := setupCRD(f, schemaWaldo, "waldo", "v1beta1")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if crdFoo.Crd.Spec.Group == crdWaldo.Crd.Spec.Group {
framework.Failf("unexpected: CRDs should be of different group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
e2elog.Failf("unexpected: CRDs should be of different group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdWaldo, "v1beta1"), schemaWaldo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v1"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := cleanupCRD(f, crdFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := cleanupCRD(f, crdWaldo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
})
@ -189,41 +190,41 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI", func() {
ginkgo.By("CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation")
crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v3"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := cleanupCRD(f, crdMultiVer); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation")
crdFoo, err := setupCRD(f, schemaFoo, "common-group", "v4")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
crdWaldo, err := setupCRD(f, schemaWaldo, "common-group", "v5")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if crdFoo.Crd.Spec.Group != crdWaldo.Crd.Spec.Group {
framework.Failf("unexpected: CRDs should be of the same group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
e2elog.Failf("unexpected: CRDs should be of the same group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdWaldo, "v5"), schemaWaldo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v4"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := cleanupCRD(f, crdFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := cleanupCRD(f, crdWaldo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
})
@ -231,26 +232,26 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI", func() {
ginkgo.By("CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation")
crdFoo, err := setupCRD(f, schemaFoo, "common-group", "v6")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
crdWaldo, err := setupCRD(f, schemaWaldo, "common-group", "v6")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if crdFoo.Crd.Spec.Group != crdWaldo.Crd.Spec.Group {
framework.Failf("unexpected: CRDs should be of the same group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
e2elog.Failf("unexpected: CRDs should be of the same group %v, %v", crdFoo.Crd.Spec.Group, crdWaldo.Crd.Spec.Group)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdWaldo, "v6"), schemaWaldo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdFoo, "v6"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := cleanupCRD(f, crdFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := cleanupCRD(f, crdWaldo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
})
@ -258,40 +259,40 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI", func() {
ginkgo.By("set up a multi version CRD")
crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v3"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("rename a version")
patch := []byte(`{"spec":{"versions":[{"name":"v2","served":true,"storage":true},{"name":"v4","served":true,"storage":false}]}}`)
crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(crdMultiVer.Crd.Name, types.MergePatchType, patch)
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("check the new version name is served")
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v4"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("check the old version name is removed")
if err := waitForDefinitionCleanup(f.ClientSet, definitionName(crdMultiVer, "v3")); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("check the other version is not changed")
if err := waitForDefinition(f.ClientSet, definitionName(crdMultiVer, "v2"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
// TestCrd.Versions is different from TestCrd.Crd.Versions, we have to manually
// update the name there. Used by cleanupCRD
crdMultiVer.Crd.Spec.Versions[1].Name = "v4"
if err := cleanupCRD(f, crdMultiVer); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
})
@ -299,38 +300,38 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI", func() {
ginkgo.By("set up a multi version CRD")
crd, err := setupCRD(f, schemaFoo, "multi-to-single-ver", "v5", "v6alpha1")
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
// just double check. setupCRD() checked this for us already
if err := waitForDefinition(f.ClientSet, definitionName(crd, "v6alpha1"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := waitForDefinition(f.ClientSet, definitionName(crd, "v5"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("mark a version not serverd")
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Crd.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
crd.Crd.Spec.Versions[1].Served = false
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd.Crd)
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("check the unserved version gets removed")
if err := waitForDefinitionCleanup(f.ClientSet, definitionName(crd, "v6alpha1")); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("check the other version is not changed")
if err := waitForDefinition(f.ClientSet, definitionName(crd, "v5"), schemaFoo); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if err := cleanupCRD(f, crd); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
})
})

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
)
@ -54,24 +55,24 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("failed to load config: %v", err)
e2elog.Failf("failed to load config: %v", err)
}
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
}
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
e2elog.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = fixtures.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
e2elog.Failf("failed to delete CustomResourceDefinition: %v", err)
}
}()

View File

@ -22,6 +22,7 @@ import (
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
)
@ -42,12 +43,12 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("failed to load config: %v", err)
e2elog.Failf("failed to load config: %v", err)
}
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
}
randomDefinition := fixtures.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
@ -55,13 +56,13 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
//create CRD and waits for the resource to be recognized and available.
randomDefinition, err = fixtures.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
e2elog.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = fixtures.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
e2elog.Failf("failed to delete CustomResourceDefinition: %v", err)
}
}()
})

View File

@ -20,6 +20,7 @@ import (
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apiserver/pkg/endpoints/discovery"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/utils/crd"
"github.com/onsi/ginkgo"
@ -49,7 +50,7 @@ var _ = SIGDescribe("Discovery", func() {
spec := testcrd.Crd.Spec
resources, err := testcrd.APIExtensionClient.Discovery().ServerResourcesForGroupVersion(spec.Group + "/" + spec.Versions[0].Name)
if err != nil {
framework.Failf("failed to find the discovery doc for %v: %v", resources, err)
e2elog.Failf("failed to find the discovery doc for %v: %v", resources, err)
}
found := false
var storageVersion string
@ -68,12 +69,12 @@ var _ = SIGDescribe("Discovery", func() {
if r.Name == spec.Names.Plural {
found = true
if r.StorageVersionHash != expected {
framework.Failf("expected storageVersionHash of %s/%s/%s to be %s, got %s", r.Group, r.Version, r.Name, expected, r.StorageVersionHash)
e2elog.Failf("expected storageVersionHash of %s/%s/%s to be %s, got %s", r.Group, r.Version, r.Name, expected, r.StorageVersionHash)
}
}
}
if !found {
framework.Failf("didn't find resource %s in the discovery doc", spec.Names.Plural)
e2elog.Failf("didn't find resource %s in the discovery doc", spec.Names.Plural)
}
})
})

View File

@ -101,7 +101,7 @@ func masterExec(cmd string) {
framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
if result.Code != 0 {
e2essh.LogResult(result)
framework.Failf("master exec command returned non-zero")
e2elog.Failf("master exec command returned non-zero")
}
}

View File

@ -317,7 +317,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
e2elog.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
@ -335,13 +335,13 @@ var _ = SIGDescribe("Garbage collector", func() {
return false, nil
}); err != nil {
framework.Failf("failed to wait for the rc to create some pods: %v", err)
e2elog.Failf("failed to wait for the rc to create some pods: %v", err)
}
ginkgo.By("delete the rc")
deleteOptions := getBackgroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
e2elog.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
@ -349,12 +349,12 @@ var _ = SIGDescribe("Garbage collector", func() {
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
}); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err)
e2elog.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(metav1.ListOptions{})
if err != nil {
framework.Failf("failed to list pods post mortem: %v", err)
e2elog.Failf("failed to list pods post mortem: %v", err)
} else {
framework.Failf("remaining pods are: %#v", remainingPods)
e2elog.Failf("remaining pods are: %#v", remainingPods)
}
}
gatherMetrics(f)
@ -375,7 +375,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
e2elog.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
@ -389,13 +389,13 @@ var _ = SIGDescribe("Garbage collector", func() {
return false, nil
}); err != nil {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
e2elog.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
ginkgo.By("delete the rc")
deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
e2elog.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for the rc to be deleted")
// Orphaning the 100 pods takes 100 PATCH operations. The default qps of
@ -415,16 +415,16 @@ var _ = SIGDescribe("Garbage collector", func() {
}
return true, nil
}); err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
time.Sleep(30 * time.Second)
pods, err := podClient.List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to list pods: %v", err)
e2elog.Failf("Failed to list pods: %v", err)
}
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
framework.Failf("expect %d pods, got %d pods", e, a)
e2elog.Failf("expect %d pods, got %d pods", e, a)
}
gatherMetrics(f)
})
@ -441,7 +441,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
e2elog.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
@ -454,22 +454,22 @@ var _ = SIGDescribe("Garbage collector", func() {
}
return false, nil
}); err != nil {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
e2elog.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
ginkgo.By("delete the rc")
deleteOptions := &metav1.DeleteOptions{}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
e2elog.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
time.Sleep(30 * time.Second)
pods, err := podClient.List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to list pods: %v", err)
e2elog.Failf("Failed to list pods: %v", err)
}
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
framework.Failf("expect %d pods, got %d pods", e, a)
e2elog.Failf("expect %d pods, got %d pods", e, a)
}
gatherMetrics(f)
})
@ -489,7 +489,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("create the deployment")
createdDeployment, err := deployClient.Create(deployment)
if err != nil {
framework.Failf("Failed to create deployment: %v", err)
e2elog.Failf("Failed to create deployment: %v", err)
}
// wait for deployment to create some rs
ginkgo.By("Wait for the Deployment to create new ReplicaSet")
@ -502,14 +502,14 @@ var _ = SIGDescribe("Garbage collector", func() {
})
if err != nil {
framework.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
e2elog.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
}
ginkgo.By("delete the deployment")
deleteOptions := getBackgroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err)
e2elog.Failf("failed to delete the deployment: %v", err)
}
ginkgo.By("wait for all rs to be garbage collected")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
@ -526,7 +526,7 @@ var _ = SIGDescribe("Garbage collector", func() {
errList = append(errList, fmt.Errorf("remaining rs are: %#v", remainingRSs))
}
aggregatedError := utilerrors.NewAggregate(errList)
framework.Failf("Failed to wait for all rs to be garbage collected: %v", aggregatedError)
e2elog.Failf("Failed to wait for all rs to be garbage collected: %v", aggregatedError)
}
@ -548,7 +548,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("create the deployment")
createdDeployment, err := deployClient.Create(deployment)
if err != nil {
framework.Failf("Failed to create deployment: %v", err)
e2elog.Failf("Failed to create deployment: %v", err)
}
// wait for deployment to create some rs
ginkgo.By("Wait for the Deployment to create new ReplicaSet")
@ -561,21 +561,21 @@ var _ = SIGDescribe("Garbage collector", func() {
})
if err != nil {
framework.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
e2elog.Failf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
}
ginkgo.By("delete the deployment")
deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err)
e2elog.Failf("failed to delete the deployment: %v", err)
}
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
time.Sleep(30 * time.Second)
objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2}
ok, err := verifyRemainingObjects(f, objects)
if err != nil {
framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
e2elog.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
}
if !ok {
errList := make([]error, 0)
@ -592,15 +592,15 @@ var _ = SIGDescribe("Garbage collector", func() {
errList = append(errList, fmt.Errorf("remaining deployment's post mortem: %#v", remainingDSs))
}
aggregatedError := utilerrors.NewAggregate(errList)
framework.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError)
e2elog.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError)
}
rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to list ReplicaSet %v", err)
e2elog.Failf("Failed to list ReplicaSet %v", err)
}
for _, replicaSet := range rs.Items {
if metav1.GetControllerOf(&replicaSet.ObjectMeta) != nil {
framework.Failf("Found ReplicaSet with non nil ownerRef %v", replicaSet)
e2elog.Failf("Found ReplicaSet with non nil ownerRef %v", replicaSet)
}
}
@ -622,7 +622,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
e2elog.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
@ -635,13 +635,13 @@ var _ = SIGDescribe("Garbage collector", func() {
}
return false, nil
}); err != nil {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
e2elog.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
ginkgo.By("delete the rc")
deleteOptions := getForegroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
e2elog.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for the rc to be deleted")
// default client QPS is 20, deleting each pod requires 2 requests, so 30s should be enough
@ -671,22 +671,22 @@ var _ = SIGDescribe("Garbage collector", func() {
}); err != nil {
pods, err2 := podClient.List(metav1.ListOptions{})
if err2 != nil {
framework.Failf("%v", err2)
e2elog.Failf("%v", err2)
}
e2elog.Logf("%d remaining pods are:", len(pods.Items))
e2elog.Logf("The ObjectMeta of the remaining pods are:")
for _, pod := range pods.Items {
e2elog.Logf("%#v", pod.ObjectMeta)
}
framework.Failf("failed to delete the rc: %v", err)
e2elog.Failf("failed to delete the rc: %v", err)
}
// There shouldn't be any pods
pods, err := podClient.List(metav1.ListOptions{})
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if len(pods.Items) != 0 {
framework.Failf("expected no pods, got %#v", pods)
e2elog.Failf("expected no pods, got %#v", pods)
}
gatherMetrics(f)
})
@ -709,7 +709,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("create the rc1")
rc1, err := rcClient.Create(rc1)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
e2elog.Failf("Failed to create replication controller: %v", err)
}
rc2Name := "simpletest-rc-to-stay"
uniqLabelsStay := getUniqLabel("gctest_s", "valid_and_pending_owners_s")
@ -717,7 +717,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("create the rc2")
rc2, err = rcClient.Create(rc2)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
e2elog.Failf("Failed to create replication controller: %v", err)
}
// wait for rc1 to be stable
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
@ -730,7 +730,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
return false, nil
}); err != nil {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
e2elog.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
pods, err := podClient.List(metav1.ListOptions{})
@ -746,7 +746,7 @@ var _ = SIGDescribe("Garbage collector", func() {
deleteOptions := getForegroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc1.UID))
if err := rcClient.Delete(rc1.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
e2elog.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for the rc to be deleted")
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
@ -773,30 +773,30 @@ var _ = SIGDescribe("Garbage collector", func() {
}); err != nil {
pods, err2 := podClient.List(metav1.ListOptions{})
if err2 != nil {
framework.Failf("%v", err2)
e2elog.Failf("%v", err2)
}
e2elog.Logf("%d remaining pods are:", len(pods.Items))
e2elog.Logf("ObjectMeta of remaining pods are:")
for _, pod := range pods.Items {
e2elog.Logf("%#v", pod.ObjectMeta)
}
framework.Failf("failed to delete rc %s, err: %v", rc1Name, err)
e2elog.Failf("failed to delete rc %s, err: %v", rc1Name, err)
}
// half of the pods should still exist,
pods, err = podClient.List(metav1.ListOptions{})
if err != nil {
framework.Failf("%v", err)
e2elog.Failf("%v", err)
}
if len(pods.Items) != halfReplicas {
framework.Failf("expected %d pods, got %d", halfReplicas, len(pods.Items))
e2elog.Failf("expected %d pods, got %d", halfReplicas, len(pods.Items))
}
for _, pod := range pods.Items {
if pod.ObjectMeta.DeletionTimestamp != nil {
framework.Failf("expected pod DeletionTimestamp to be nil, got %#v", pod.ObjectMeta)
e2elog.Failf("expected pod DeletionTimestamp to be nil, got %#v", pod.ObjectMeta)
}
// they should only have 1 ownerReference left
if len(pod.ObjectMeta.OwnerReferences) != 1 {
framework.Failf("expected pod to only have 1 owner, got %#v", pod.ObjectMeta.OwnerReferences)
e2elog.Failf("expected pod to only have 1 owner, got %#v", pod.ObjectMeta.OwnerReferences)
}
}
gatherMetrics(f)
@ -859,19 +859,19 @@ var _ = SIGDescribe("Garbage collector", func() {
return false, nil
}); err != nil {
e2elog.Logf("pods are %#v", pods.Items)
framework.Failf("failed to wait for all pods to be deleted: %v", err)
e2elog.Failf("failed to wait for all pods to be deleted: %v", err)
}
})
ginkgo.It("should support cascading deletion of custom resources", func() {
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("failed to load config: %v", err)
e2elog.Failf("failed to load config: %v", err)
}
apiExtensionClient, err := apiextensionsclientset.NewForConfig(config)
if err != nil {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
}
// Create a random custom resource definition and ensure it's available for
@ -880,12 +880,12 @@ var _ = SIGDescribe("Garbage collector", func() {
defer func() {
err = apiextensionstestserver.DeleteCustomResourceDefinition(definition, apiExtensionClient)
if err != nil && !errors.IsNotFound(err) {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
e2elog.Failf("failed to delete CustomResourceDefinition: %v", err)
}
}()
definition, err = apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
e2elog.Failf("failed to create CustomResourceDefinition: %v", err)
}
// Get a client for the custom resource.
@ -907,7 +907,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
e2elog.Failf("failed to create owner resource %q: %v", ownerName, err)
}
e2elog.Logf("created owner resource %q", ownerName)
@ -932,7 +932,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
persistedDependent, err := resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
e2elog.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
e2elog.Logf("created dependent resource %q", dependentName)
@ -940,7 +940,7 @@ var _ = SIGDescribe("Garbage collector", func() {
background := metav1.DeletePropagationBackground
err = resourceClient.Delete(ownerName, &metav1.DeleteOptions{PropagationPolicy: &background})
if err != nil {
framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
e2elog.Failf("failed to delete owner resource %q: %v", ownerName, err)
}
// Ensure the dependent is deleted.
@ -950,16 +950,16 @@ var _ = SIGDescribe("Garbage collector", func() {
}); err != nil {
e2elog.Logf("owner: %#v", persistedOwner)
e2elog.Logf("dependent: %#v", persistedDependent)
framework.Failf("failed waiting for dependent resource %q to be deleted", dependentName)
e2elog.Failf("failed waiting for dependent resource %q to be deleted", dependentName)
}
// Ensure the owner is deleted.
_, err = resourceClient.Get(ownerName, metav1.GetOptions{})
if err == nil {
framework.Failf("expected owner resource %q to be deleted", ownerName)
e2elog.Failf("expected owner resource %q to be deleted", ownerName)
} else {
if !errors.IsNotFound(err) {
framework.Failf("unexpected error getting owner resource %q: %v", ownerName, err)
e2elog.Failf("unexpected error getting owner resource %q: %v", ownerName, err)
}
}
})
@ -967,12 +967,12 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.It("should support orphan deletion of custom resources", func() {
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("failed to load config: %v", err)
e2elog.Failf("failed to load config: %v", err)
}
apiExtensionClient, err := apiextensionsclientset.NewForConfig(config)
if err != nil {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
}
// Create a random custom resource definition and ensure it's available for
@ -981,12 +981,12 @@ var _ = SIGDescribe("Garbage collector", func() {
defer func() {
err = apiextensionstestserver.DeleteCustomResourceDefinition(definition, apiExtensionClient)
if err != nil && !errors.IsNotFound(err) {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
e2elog.Failf("failed to delete CustomResourceDefinition: %v", err)
}
}()
definition, err = apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
e2elog.Failf("failed to create CustomResourceDefinition: %v", err)
}
// Get a client for the custom resource.
@ -1008,7 +1008,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
e2elog.Failf("failed to create owner resource %q: %v", ownerName, err)
}
e2elog.Logf("created owner resource %q", ownerName)
@ -1033,14 +1033,14 @@ var _ = SIGDescribe("Garbage collector", func() {
}
_, err = resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
e2elog.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
e2elog.Logf("created dependent resource %q", dependentName)
// Delete the owner and orphan the dependent.
err = resourceClient.Delete(ownerName, getOrphanOptions())
if err != nil {
framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
e2elog.Failf("failed to delete owner resource %q: %v", ownerName, err)
}
ginkgo.By("wait for the owner to be deleted")
@ -1054,7 +1054,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
return true, nil
}); err != nil {
framework.Failf("timeout in waiting for the owner to be deleted: %v", err)
e2elog.Failf("timeout in waiting for the owner to be deleted: %v", err)
}
// Wait 30s and ensure the dependent is not deleted.
@ -1063,7 +1063,7 @@ var _ = SIGDescribe("Garbage collector", func() {
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
return false, err
}); err != nil && err != wait.ErrWaitTimeout {
framework.Failf("failed to ensure the dependent is not deleted: %v", err)
e2elog.Failf("failed to ensure the dependent is not deleted: %v", err)
}
})
@ -1084,12 +1084,12 @@ var _ = SIGDescribe("Garbage collector", func() {
return len(jobs.Items) > 0, nil
})
if err != nil {
framework.Failf("Failed to wait for the CronJob to create some Jobs: %v", err)
e2elog.Failf("Failed to wait for the CronJob to create some Jobs: %v", err)
}
ginkgo.By("Delete the cronjob")
if err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Delete(cronJob.Name, getBackgroundOptions()); err != nil {
framework.Failf("Failed to delete the CronJob: %v", err)
e2elog.Failf("Failed to delete the CronJob: %v", err)
}
ginkgo.By("Verify if cronjob does not leave jobs nor pods behind")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
@ -1097,7 +1097,7 @@ var _ = SIGDescribe("Garbage collector", func() {
return verifyRemainingObjects(f, objects)
})
if err != nil {
framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)
e2elog.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)
}
gatherMetrics(f)

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -70,10 +71,10 @@ func observeCreation(w watch.Interface) {
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
framework.Failf("Failed to observe the creation: %v", event)
e2elog.Failf("Failed to observe the creation: %v", event)
}
case <-time.After(30 * time.Second):
framework.Failf("Timeout while waiting for observing the creation")
e2elog.Failf("Timeout while waiting for observing the creation")
}
}
@ -94,7 +95,7 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool)
}
}
if !updated {
framework.Failf("Failed to observe pod update")
e2elog.Failf("Failed to observe pod update")
}
return
}
@ -113,7 +114,7 @@ var _ = SIGDescribe("Generated clientset", func() {
options := metav1.ListOptions{LabelSelector: selector}
pods, err := podClient.List(options)
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
e2elog.Failf("Failed to query for pods: %v", err)
}
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{
@ -122,13 +123,13 @@ var _ = SIGDescribe("Generated clientset", func() {
}
w, err := podClient.Watch(options)
if err != nil {
framework.Failf("Failed to set up watch: %v", err)
e2elog.Failf("Failed to set up watch: %v", err)
}
ginkgo.By("creating the pod")
pod, err = podClient.Create(pod)
if err != nil {
framework.Failf("Failed to create pod: %v", err)
e2elog.Failf("Failed to create pod: %v", err)
}
ginkgo.By("verifying the pod is in kubernetes")
@ -138,7 +139,7 @@ var _ = SIGDescribe("Generated clientset", func() {
}
pods, err = podClient.List(options)
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
e2elog.Failf("Failed to query for pods: %v", err)
}
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
@ -152,7 +153,7 @@ var _ = SIGDescribe("Generated clientset", func() {
ginkgo.By("deleting the pod gracefully")
gracePeriod := int64(31)
if err := podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil {
framework.Failf("Failed to delete pod: %v", err)
e2elog.Failf("Failed to delete pod: %v", err)
}
ginkgo.By("verifying the deletionTimestamp and deletionGracePeriodSeconds of the pod is set")
@ -229,7 +230,7 @@ var _ = SIGDescribe("Generated clientset", func() {
options := metav1.ListOptions{LabelSelector: selector}
cronJobs, err := cronJobClient.List(options)
if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err)
e2elog.Failf("Failed to query for cronJobs: %v", err)
}
gomega.Expect(len(cronJobs.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{
@ -238,13 +239,13 @@ var _ = SIGDescribe("Generated clientset", func() {
}
w, err := cronJobClient.Watch(options)
if err != nil {
framework.Failf("Failed to set up watch: %v", err)
e2elog.Failf("Failed to set up watch: %v", err)
}
ginkgo.By("creating the cronJob")
cronJob, err = cronJobClient.Create(cronJob)
if err != nil {
framework.Failf("Failed to create cronJob: %v", err)
e2elog.Failf("Failed to create cronJob: %v", err)
}
ginkgo.By("verifying the cronJob is in kubernetes")
@ -254,7 +255,7 @@ var _ = SIGDescribe("Generated clientset", func() {
}
cronJobs, err = cronJobClient.List(options)
if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err)
e2elog.Failf("Failed to query for cronJobs: %v", err)
}
gomega.Expect(len(cronJobs.Items)).To(gomega.Equal(1))
@ -265,13 +266,13 @@ var _ = SIGDescribe("Generated clientset", func() {
// Use DeletePropagationBackground so the CronJob is really gone when the call returns.
propagationPolicy := metav1.DeletePropagationBackground
if err := cronJobClient.Delete(cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
framework.Failf("Failed to delete cronJob: %v", err)
e2elog.Failf("Failed to delete cronJob: %v", err)
}
options = metav1.ListOptions{LabelSelector: selector}
cronJobs, err = cronJobClient.List(options)
if err != nil {
framework.Failf("Failed to list cronJobs to verify deletion: %v", err)
e2elog.Failf("Failed to list cronJobs to verify deletion: %v", err)
}
gomega.Expect(len(cronJobs.Items)).To(gomega.Equal(0))
})

View File

@ -216,7 +216,7 @@ var _ = SIGDescribe("Watchers", func() {
expectEvent(testWatchBroken, watch.Added, testConfigMap)
lastEvent, ok := waitForEvent(testWatchBroken, watch.Modified, nil, 1*time.Minute)
if !ok {
framework.Failf("Timed out waiting for second watch notification")
e2elog.Failf("Timed out waiting for second watch notification")
}
testWatchBroken.Stop()
@ -229,7 +229,7 @@ var _ = SIGDescribe("Watchers", func() {
ginkgo.By("creating a new watch on configmaps from the last resource version observed by the first watch")
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
if !ok {
framework.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent)
e2elog.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent)
}
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
@ -352,7 +352,7 @@ var _ = SIGDescribe("Watchers", func() {
for _, wc := range wcs[1:] {
e := waitForNextConfigMapEvent(wc)
if resourceVersion != e.ResourceVersion {
framework.Failf("resource version mismatch, expected %s but got %s", resourceVersion, e.ResourceVersion)
e2elog.Failf("resource version mismatch, expected %s but got %s", resourceVersion, e.ResourceVersion)
}
}
}
@ -396,13 +396,13 @@ func setConfigMapData(cm *v1.ConfigMap, key, value string) {
func expectEvent(w watch.Interface, eventType watch.EventType, object runtime.Object) {
if event, ok := waitForEvent(w, eventType, object, 1*time.Minute); !ok {
framework.Failf("Timed out waiting for expected watch notification: %v", event)
e2elog.Failf("Timed out waiting for expected watch notification: %v", event)
}
}
func expectNoEvent(w watch.Interface, eventType watch.EventType, object runtime.Object) {
if event, ok := waitForEvent(w, eventType, object, 10*time.Second); ok {
framework.Failf("Unexpected watch notification observed: %v", event)
e2elog.Failf("Unexpected watch notification observed: %v", event)
}
}
@ -415,7 +415,7 @@ func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject ru
if ok {
e2elog.Logf("Got : %v %v", actual.Type, actual.Object)
} else {
framework.Failf("Watch closed unexpectedly")
e2elog.Failf("Watch closed unexpectedly")
}
if expectType == actual.Type && (expectObject == nil || apiequality.Semantic.DeepEqual(expectObject, actual.Object)) {
return actual, true
@ -436,9 +436,9 @@ func waitForNextConfigMapEvent(watch watch.Interface) *v1.ConfigMap {
if configMap, ok := event.Object.(*v1.ConfigMap); ok {
return configMap
}
framework.Failf("expected config map")
e2elog.Failf("expected config map")
case <-time.After(10 * time.Second):
framework.Failf("timed out waiting for watch event")
e2elog.Failf("timed out waiting for watch event")
}
return nil // should never happen
}
@ -486,7 +486,7 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa
framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
existing = append(existing[:idx], existing[idx+1:]...)
default:
framework.Failf("Unsupported event operation: %d", op)
e2elog.Failf("Unsupported event operation: %d", op)
}
select {
case <-stopc:

View File

@ -615,7 +615,7 @@ func testMutatingConfigMapWebhook(f *framework.Framework) {
"mutation-stage-2": "yes",
}
if !reflect.DeepEqual(expectedConfigMapData, mutatedConfigMap.Data) {
framework.Failf("\nexpected %#v\n, got %#v\n", expectedConfigMapData, mutatedConfigMap.Data)
e2elog.Failf("\nexpected %#v\n, got %#v\n", expectedConfigMapData, mutatedConfigMap.Data)
}
}
@ -668,13 +668,13 @@ func testMutatingPodWebhook(f *framework.Framework) {
mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(configMap)
gomega.Expect(err).To(gomega.BeNil())
if len(mutatedPod.Spec.InitContainers) != 1 {
framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers)
e2elog.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers)
}
if got, expected := mutatedPod.Spec.InitContainers[0].Name, "webhook-added-init-container"; got != expected {
framework.Failf("expect the init container name to be %q, got %q", expected, got)
e2elog.Failf("expect the init container name to be %q, got %q", expected, got)
}
if got, expected := mutatedPod.Spec.InitContainers[0].TerminationMessagePolicy, v1.TerminationMessageReadFile; got != expected {
framework.Failf("expect the init terminationMessagePolicy to be default to %q, got %q", expected, got)
e2elog.Failf("expect the init terminationMessagePolicy to be default to %q, got %q", expected, got)
}
}
@ -703,11 +703,11 @@ func testWebhook(f *framework.Framework) {
framework.ExpectError(err, "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name)
expectedErrMsg1 := "the pod contains unwanted container name"
if !strings.Contains(err.Error(), expectedErrMsg1) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
}
expectedErrMsg2 := "the pod contains unwanted label"
if !strings.Contains(err.Error(), expectedErrMsg2) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg2, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg2, err.Error())
}
ginkgo.By("create a pod that causes the webhook to hang")
@ -718,7 +718,7 @@ func testWebhook(f *framework.Framework) {
framework.ExpectError(err, "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name)
expectedTimeoutErr := "request did not complete within"
if !strings.Contains(err.Error(), expectedTimeoutErr) {
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
e2elog.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
}
ginkgo.By("create a configmap that should be denied by the webhook")
@ -728,7 +728,7 @@ func testWebhook(f *framework.Framework) {
framework.ExpectError(err, "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name)
expectedErrMsg := "the configmap contains unwanted key and value"
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
ginkgo.By("create a configmap that should be admitted by the webhook")
@ -754,7 +754,7 @@ func testWebhook(f *framework.Framework) {
_, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn)
framework.ExpectError(err, "update (PUT) admitted configmap %s in namespace %s to a non-compliant one should be rejected by webhook", allowedConfigMapName, f.Namespace.Name)
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
ginkgo.By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
@ -762,7 +762,7 @@ func testWebhook(f *framework.Framework) {
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
framework.ExpectError(err, "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch)
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
ginkgo.By("create a namespace that bypass the webhook")
@ -794,7 +794,7 @@ func testBlockingConfigmapDeletion(f *framework.Framework) {
framework.ExpectError(err, "deleting configmap %s in namespace: %s should be denied", configmap.Name, f.Namespace.Name)
expectedErrMsg1 := "the configmap cannot be deleted because it contains unwanted key and value"
if !strings.Contains(err.Error(), expectedErrMsg1) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
}
ginkgo.By("remove the offending key and value from the configmap data")
@ -827,7 +827,7 @@ func testAttachingPodWebhook(f *framework.Framework) {
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
framework.ExpectError(err, "'kubectl attach' the pod, should be denied by the webhook")
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
e2elog.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
}
}
@ -917,7 +917,7 @@ func testFailClosedWebhook(f *framework.Framework) {
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
if !errors.IsInternalError(err) {
framework.Failf("expect an internal error, got %#v", err)
e2elog.Failf("expect an internal error, got %#v", err)
}
}
@ -1073,7 +1073,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework) {
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", dummyValidatingWebhookConfigName, namespace)
if mutatedValidatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedValidatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue {
framework.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyValidatingWebhookConfigName)
e2elog.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyValidatingWebhookConfigName)
}
// The webhook configuration is honored in 10s.
@ -1121,7 +1121,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework) {
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", dummyMutatingWebhookConfigName, namespace)
if mutatedMutatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedMutatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue {
framework.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyMutatingWebhookConfigName)
e2elog.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyMutatingWebhookConfigName)
}
// The webhook configuration is honored in 10s.
@ -1411,7 +1411,7 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
framework.ExpectError(err, "create custom resource %s in namespace %s should be denied by webhook", crInstanceName, f.Namespace.Name)
expectedErrMsg := "the custom resource contains unwanted data"
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
}
@ -1439,7 +1439,7 @@ func testBlockingCustomResourceDeletion(f *framework.Framework, crd *apiextensio
framework.ExpectError(err, "deleting custom resource %s in namespace: %s should be denied", crInstanceName, f.Namespace.Name)
expectedErrMsg1 := "the custom resource cannot be deleted because it contains unwanted key and value"
if !strings.Contains(err.Error(), expectedErrMsg1) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
}
ginkgo.By("Remove the offending key and value from the custom resource data")
@ -1485,7 +1485,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
expectedCRData["mutation-stage-2"] = "yes"
}
if !reflect.DeepEqual(expectedCRData, mutatedCR.Object["data"]) {
framework.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
e2elog.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
}
}
@ -1583,12 +1583,12 @@ func testCRDDenyWebhook(f *framework.Framework) {
// Creating a custom resource definition for use by assorted tests.
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("failed to load config: %v", err)
e2elog.Failf("failed to load config: %v", err)
return
}
apiExtensionClient, err := crdclientset.NewForConfig(config)
if err != nil {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
return
}
crd := &apiextensionsv1beta1.CustomResourceDefinition{
@ -1616,7 +1616,7 @@ func testCRDDenyWebhook(f *framework.Framework) {
framework.ExpectError(err, "create custom resource definition %s should be denied by webhook", crd.Name)
expectedErrMsg := "the crd contains unwanted label"
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
}
@ -1688,7 +1688,7 @@ func testSlowWebhookTimeoutFailEarly(f *framework.Framework) {
framework.ExpectError(err, "create configmap in namespace %s should have timed-out reaching slow webhook", f.Namespace.Name)
expectedErrMsg := `/always-allow-delay-5s?timeout=1s: context deadline exceeded`
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
e2elog.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
}

View File

@ -6,6 +6,7 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/log",
visibility = ["//visibility:public"],
deps = [
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)

View File

@ -21,6 +21,8 @@ import (
"time"
"github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
@ -35,3 +37,16 @@ func log(level string, format string, args ...interface{}) {
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
// Failf logs the fail info.
func Failf(format string, args ...interface{}) {
FailfWithOffset(1, format, args...)
}
// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}