e2e: use log functions of core framework on pv, testfiles and volume sub packages

This commit is contained in:
SataQiu 2019-11-27 14:18:18 +08:00
parent 4a8205b6fd
commit 4325e8a452
8 changed files with 31 additions and 37 deletions

View File

@ -68,7 +68,6 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",

View File

@ -39,7 +39,6 @@ import (
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/manifest"
@ -86,7 +85,7 @@ func RunE2ETests(t *testing.T) {
logs.InitLogs()
defer logs.FlushLogs()
gomega.RegisterFailHandler(e2elog.Fail)
gomega.RegisterFailHandler(framework.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`

View File

@ -16,7 +16,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],

View File

@ -31,7 +31,6 @@ import (
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
@ -143,7 +142,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
}
} else {
e2elog.Logf("pvc is nil")
framework.Logf("pvc is nil")
}
if pv != nil {
err := DeletePersistentVolume(c, pv.Name)
@ -151,7 +150,7 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
}
} else {
e2elog.Logf("pv is nil")
framework.Logf("pv is nil")
}
return errs
}
@ -185,7 +184,7 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa
// DeletePersistentVolume deletes the PV.
func DeletePersistentVolume(c clientset.Interface, pvName string) error {
if c != nil && len(pvName) > 0 {
e2elog.Logf("Deleting PersistentVolume %q", pvName)
framework.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err)
@ -197,7 +196,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error {
// DeletePersistentVolumeClaim deletes the Claim.
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
if c != nil && len(pvcName) > 0 {
e2elog.Logf("Deleting PersistentVolumeClaim %q", pvcName)
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
if err != nil && !apierrs.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err)
@ -211,14 +210,14 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
// phase value to expect for the pv bound to the to-be-deleted claim.
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
pvname := pvc.Spec.VolumeName
e2elog.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
if err != nil {
return err
}
// Wait for the PV's phase to return to be `expectPVPhase`
e2elog.Logf("Waiting for reclaim process to complete.")
framework.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, PVReclaimingTimeout)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
@ -243,7 +242,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
}
}
e2elog.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
framework.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
return nil
}
@ -360,7 +359,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
if preBind {
preBindMsg = " pre-bound"
}
e2elog.Logf("Creating a PV followed by a%s PVC", preBindMsg)
framework.Logf("Creating a PV followed by a%s PVC", preBindMsg)
// make the pv and pvc definitions
pv := MakePersistentVolume(pvConfig)
@ -433,7 +432,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV
e2elog.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, ClaimBindingTimeout)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
@ -489,8 +488,8 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, PVBindingTimeout)
if err != nil && len(pvols) > len(claims) {
e2elog.Logf("WARN: pv %v is not bound after max wait", pvName)
e2elog.Logf(" This may be ok since there are more pvs than pvcs")
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
framework.Logf(" This may be ok since there are more pvs than pvcs")
continue
}
if err != nil {
@ -604,7 +603,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
}
if cfg.VolumeMode != nil && *cfg.VolumeMode == "" {
e2elog.Logf("Warning: Making PVC: VolumeMode specified as invalid empty string, treating as nil")
framework.Logf("Warning: Making PVC: VolumeMode specified as invalid empty string, treating as nil")
cfg.VolumeMode = nil
}
@ -634,10 +633,10 @@ func createPDWithRetry(zone string) (string, error) {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
newDiskName, err = createPD(zone)
if err != nil {
e2elog.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
framework.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
continue
}
e2elog.Logf("Successfully created a new PD: %q.", newDiskName)
framework.Logf("Successfully created a new PD: %q.", newDiskName)
return newDiskName, nil
}
return "", err
@ -659,10 +658,10 @@ func DeletePDWithRetry(diskName string) error {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
err = deletePD(diskName)
if err != nil {
e2elog.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
framework.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
continue
}
e2elog.Logf("Successfully deleted PD %q.", diskName)
framework.Logf("Successfully deleted PD %q.", diskName)
return nil
}
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
@ -710,18 +709,18 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
e2elog.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
framework.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
}
if pv.Status.Phase == phase {
e2elog.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
framework.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
}
e2elog.Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
framework.Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
@ -737,22 +736,22 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c
if len(pvcNames) == 0 {
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
}
e2elog.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
framework.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
phaseFoundInAllClaims := true
for _, pvcName := range pvcNames {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
}
if pvc.Status.Phase == phase {
e2elog.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
framework.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
if matchAny {
return nil
}
} else {
e2elog.Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
framework.Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
phaseFoundInAllClaims = false
}
}
@ -808,7 +807,7 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
if len(scName) == 0 {
return "", fmt.Errorf("No default storage class found")
}
e2elog.Logf("Default storage class: %q", scName)
framework.Logf("Default storage class: %q", scName)
return scName, nil
}

View File

@ -5,7 +5,7 @@ go_library(
srcs = ["testfiles.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/testfiles",
visibility = ["//visibility:public"],
deps = ["//test/e2e/framework/log:go_default_library"],
deps = ["//test/e2e/framework:go_default_library"],
)
filegroup(

View File

@ -34,7 +34,7 @@ import (
"sort"
"strings"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)
var filesources []FileSource
@ -73,7 +73,7 @@ type FileSource interface {
func ReadOrDie(filePath string) []byte {
data, err := Read(filePath)
if err != nil {
e2elog.Fail(err.Error(), 1)
framework.Fail(err.Error(), 1)
}
return data
}
@ -110,7 +110,7 @@ func Exists(filePath string) bool {
for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath)
if err != nil {
e2elog.Fail(fmt.Sprintf("fatal error looking for test file %s: %s", filePath, err), 1)
framework.Fail(fmt.Sprintf("fatal error looking for test file %s: %s", filePath, err), 1)
}
if data != nil {
return true

View File

@ -13,7 +13,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/storage/utils:go_default_library",

View File

@ -51,7 +51,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -419,7 +418,7 @@ func CleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, se
}
}
e2elog.Logf("Deleting server pod %q...", serverPod.Name)
framework.Logf("Deleting server pod %q...", serverPod.Name)
err := e2epod.DeletePodWithWait(cs, serverPod)
if err != nil {
framework.Logf("Server pod delete failed: %v", err)