mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #41373 from msau42/e2e-pvutil
Automatic merge from submit-queue (batch tested with PRs 38957, 41819, 41851, 40667, 41373) Move pvutil.go from e2e package to framework package **What this PR does / why we need it**: This PR moves pvutil.go to the e2e/framework package. I am working on a PV upgrade test, and would like to use some of the wrapper functions in pvutil.go. However, the upgrade test is in the upgrade package, and not the e2e package, and it cannot import the e2e package because it would create a circular dependency. So pvutil.go needs to be moved out of e2e in order to break the circular dependency. This is a refactoring name change, no logic has been modified. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: **Special notes for your reviewer**: **Release note**: NONE
This commit is contained in:
commit
9cbaff9e0f
@ -79,7 +79,6 @@ go_library(
|
||||
"proxy.go",
|
||||
"pv_reclaimpolicy.go",
|
||||
"pvc_label_selector.go",
|
||||
"pvutil.go",
|
||||
"rc.go",
|
||||
"reboot.go",
|
||||
"replica_set.go",
|
||||
|
@ -25,6 +25,7 @@ go_library(
|
||||
"nodes_util.go",
|
||||
"perf_util.go",
|
||||
"pods.go",
|
||||
"pv_util.go",
|
||||
"resource_usage_gatherer.go",
|
||||
"service_util.go",
|
||||
"statefulset_utils.go",
|
||||
@ -69,6 +70,7 @@ go_library(
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
|
||||
@ -38,7 +37,7 @@ import (
|
||||
// in the PV's name and if it is present. We must always Get the pv object before
|
||||
// referencing any of its values, eg its ClaimRef.
|
||||
type pvval struct{}
|
||||
type pvmap map[string]pvval
|
||||
type PVMap map[string]pvval
|
||||
|
||||
// Map of all PVCs used in the multi pv-pvc tests. The key is "namespace/pvc.Name". The
|
||||
// value is {} (empty struct) since we're only interested in the PVC's name and if it is
|
||||
@ -47,7 +46,7 @@ type pvmap map[string]pvval
|
||||
// Note: It's unsafe to add keys to a map in a loop. Their insertion in the map is
|
||||
// unpredictable and can result in the same key being iterated over again.
|
||||
type pvcval struct{}
|
||||
type pvcmap map[types.NamespacedName]pvcval
|
||||
type PVCMap map[types.NamespacedName]pvcval
|
||||
|
||||
// Configuration for a persistent volume. To create PVs for varying storage options (NFS, ceph, glusterFS, etc.)
|
||||
// define the pvSource as below. prebind holds a pre-bound PVC if there is one.
|
||||
@ -56,50 +55,37 @@ type pvcmap map[types.NamespacedName]pvcval
|
||||
// ...
|
||||
// },
|
||||
// }
|
||||
type persistentVolumeConfig struct {
|
||||
pvSource v1.PersistentVolumeSource
|
||||
prebind *v1.PersistentVolumeClaim
|
||||
reclaimPolicy v1.PersistentVolumeReclaimPolicy
|
||||
namePrefix string
|
||||
}
|
||||
|
||||
// Delete the nfs-server pod. Only done once per KubeDescription().
|
||||
func nfsServerPodCleanup(c clientset.Interface, config VolumeTestConfig) {
|
||||
defer GinkgoRecover()
|
||||
|
||||
podClient := c.Core().Pods(config.namespace)
|
||||
|
||||
if config.serverImage != "" {
|
||||
podName := config.prefix + "-server"
|
||||
err := podClient.Delete(podName, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
type PersistentVolumeConfig struct {
|
||||
PVSource v1.PersistentVolumeSource
|
||||
Prebind *v1.PersistentVolumeClaim
|
||||
ReclaimPolicy v1.PersistentVolumeReclaimPolicy
|
||||
NamePrefix string
|
||||
}
|
||||
|
||||
// Clean up a pv and pvc in a single pv/pvc test case.
|
||||
func pvPvcCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
deletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
deletePersistentVolume(c, pv.Name)
|
||||
func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
DeletePersistentVolume(c, pv.Name)
|
||||
}
|
||||
|
||||
// Clean up pvs and pvcs in multi-pv-pvc test cases. All entries found in the pv and
|
||||
// claims maps are deleted.
|
||||
func pvPvcMapCleanup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap) {
|
||||
func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap) {
|
||||
for pvcKey := range claims {
|
||||
deletePersistentVolumeClaim(c, pvcKey.Name, ns)
|
||||
DeletePersistentVolumeClaim(c, pvcKey.Name, ns)
|
||||
delete(claims, pvcKey)
|
||||
}
|
||||
|
||||
for pvKey := range pvols {
|
||||
deletePersistentVolume(c, pvKey)
|
||||
DeletePersistentVolume(c, pvKey)
|
||||
delete(pvols, pvKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the PV.
|
||||
func deletePersistentVolume(c clientset.Interface, pvName string) {
|
||||
func DeletePersistentVolume(c clientset.Interface, pvName string) {
|
||||
if c != nil && len(pvName) > 0 {
|
||||
framework.Logf("Deleting PersistentVolume %v", pvName)
|
||||
Logf("Deleting PersistentVolume %v", pvName)
|
||||
err := c.Core().PersistentVolumes().Delete(pvName, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -108,9 +94,9 @@ func deletePersistentVolume(c clientset.Interface, pvName string) {
|
||||
}
|
||||
|
||||
// Delete the Claim
|
||||
func deletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) {
|
||||
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) {
|
||||
if c != nil && len(pvcName) > 0 {
|
||||
framework.Logf("Deleting PersistentVolumeClaim %v", pvcName)
|
||||
Logf("Deleting PersistentVolumeClaim %v", pvcName)
|
||||
err := c.Core().PersistentVolumeClaims(ns).Delete(pvcName, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -121,19 +107,19 @@ func deletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
|
||||
// Delete the PVC and wait for the PV to enter its expected phase. Validate that the PV
|
||||
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
|
||||
// phase value to expect for the pv bound to the to-be-deleted claim.
|
||||
func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) {
|
||||
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) {
|
||||
|
||||
pvname := pvc.Spec.VolumeName
|
||||
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
|
||||
deletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
|
||||
DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
|
||||
// Check that the PVC is really deleted.
|
||||
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
||||
Expect(apierrs.IsNotFound(err)).To(BeTrue())
|
||||
|
||||
// Wait for the PV's phase to return to be `expectPVPhase`
|
||||
framework.Logf("Waiting for reclaim process to complete.")
|
||||
err = framework.WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
|
||||
Logf("Waiting for reclaim process to complete.")
|
||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// examine the pv's ClaimRef and UID and compare to expected values
|
||||
@ -149,7 +135,7 @@ func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
Expect(cr.UID).NotTo(BeEmpty())
|
||||
}
|
||||
|
||||
framework.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
|
||||
Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
|
||||
}
|
||||
|
||||
// Wraps deletePVCandValidatePV() by calling the function in a loop over the PV map. Only
|
||||
@ -157,7 +143,7 @@ func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
// Bound).
|
||||
// Note: if there are more claims than pvs then some of the remaining claims will bind to
|
||||
// the just-made-available pvs.
|
||||
func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap, expectPVPhase v1.PersistentVolumePhase) {
|
||||
func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) {
|
||||
|
||||
var boundPVs, deletedPVCs int
|
||||
|
||||
@ -175,7 +161,7 @@ func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap,
|
||||
Expect(found).To(BeTrue())
|
||||
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
|
||||
Expect(apierrs.IsNotFound(err)).To(BeFalse())
|
||||
deletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase)
|
||||
DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase)
|
||||
delete(claims, pvcKey)
|
||||
deletedPVCs++
|
||||
}
|
||||
@ -192,7 +178,7 @@ func createPV(c clientset.Interface, pv *v1.PersistentVolume) *v1.PersistentVolu
|
||||
}
|
||||
|
||||
// create the PVC resource. Fails test on error.
|
||||
func createPVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
|
||||
pvc, err := c.Core().PersistentVolumeClaims(ns).Create(pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -205,22 +191,22 @@ func createPVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim)
|
||||
// Note: in the pre-bind case the real PVC name, which is generated, is not
|
||||
// known until after the PVC is instantiated. This is why the pvc is created
|
||||
// before the pv.
|
||||
func createPVCPV(c clientset.Interface, pvConfig persistentVolumeConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
|
||||
var preBindMsg string
|
||||
|
||||
// make the pvc definition first
|
||||
pvc := makePersistentVolumeClaim(ns)
|
||||
pvc := MakePersistentVolumeClaim(ns)
|
||||
if preBind {
|
||||
preBindMsg = " pre-bound"
|
||||
pvConfig.prebind = pvc
|
||||
pvConfig.Prebind = pvc
|
||||
}
|
||||
// make the pv spec
|
||||
pv := makePersistentVolume(pvConfig)
|
||||
|
||||
By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg))
|
||||
// instantiate the pvc
|
||||
pvc = createPVC(c, ns, pvc)
|
||||
pvc = CreatePVC(c, ns, pvc)
|
||||
|
||||
// instantiate the pv, handle pre-binding by ClaimRef if needed
|
||||
if preBind {
|
||||
@ -238,17 +224,17 @@ func createPVCPV(c clientset.Interface, pvConfig persistentVolumeConfig, ns stri
|
||||
// Note: in the pre-bind case the real PV name, which is generated, is not
|
||||
// known until after the PV is instantiated. This is why the pv is created
|
||||
// before the pvc.
|
||||
func createPVPVC(c clientset.Interface, pvConfig persistentVolumeConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
|
||||
preBindMsg := ""
|
||||
if preBind {
|
||||
preBindMsg = " pre-bound"
|
||||
}
|
||||
framework.Logf("Creating a PV followed by a%s PVC", preBindMsg)
|
||||
Logf("Creating a PV followed by a%s PVC", preBindMsg)
|
||||
|
||||
// make the pv and pvc definitions
|
||||
pv := makePersistentVolume(pvConfig)
|
||||
pvc := makePersistentVolumeClaim(ns)
|
||||
pvc := MakePersistentVolumeClaim(ns)
|
||||
|
||||
// instantiate the pv
|
||||
pv = createPV(c, pv)
|
||||
@ -256,7 +242,7 @@ func createPVPVC(c clientset.Interface, pvConfig persistentVolumeConfig, ns stri
|
||||
if preBind {
|
||||
pvc.Spec.VolumeName = pv.Name
|
||||
}
|
||||
pvc = createPVC(c, ns, pvc)
|
||||
pvc = CreatePVC(c, ns, pvc)
|
||||
|
||||
return pv, pvc
|
||||
}
|
||||
@ -264,13 +250,13 @@ func createPVPVC(c clientset.Interface, pvConfig persistentVolumeConfig, ns stri
|
||||
// Create the desired number of PVs and PVCs and return them in separate maps. If the
|
||||
// number of PVs != the number of PVCs then the min of those two counts is the number of
|
||||
// PVs expected to bind.
|
||||
func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig persistentVolumeConfig) (pvmap, pvcmap) {
|
||||
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig PersistentVolumeConfig) (PVMap, PVCMap) {
|
||||
|
||||
var i int
|
||||
var pv *v1.PersistentVolume
|
||||
var pvc *v1.PersistentVolumeClaim
|
||||
pvMap := make(pvmap, numpvs)
|
||||
pvcMap := make(pvcmap, numpvcs)
|
||||
pvMap := make(PVMap, numpvs)
|
||||
pvcMap := make(PVCMap, numpvcs)
|
||||
|
||||
var extraPVs, extraPVCs int
|
||||
extraPVs = numpvs - numpvcs
|
||||
@ -282,7 +268,7 @@ func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
||||
|
||||
// create pvs and pvcs
|
||||
for i = 0; i < pvsToCreate; i++ {
|
||||
pv, pvc = createPVPVC(c, pvConfig, ns, false)
|
||||
pv, pvc = CreatePVPVC(c, pvConfig, ns, false)
|
||||
pvMap[pv.Name] = pvval{}
|
||||
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
|
||||
}
|
||||
@ -294,8 +280,8 @@ func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
||||
pvMap[pv.Name] = pvval{}
|
||||
}
|
||||
for i = 0; i < extraPVCs; i++ {
|
||||
pvc = makePersistentVolumeClaim(ns)
|
||||
pvc = createPVC(c, ns, pvc)
|
||||
pvc = MakePersistentVolumeClaim(ns)
|
||||
pvc = CreatePVC(c, ns, pvc)
|
||||
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
|
||||
}
|
||||
|
||||
@ -303,16 +289,16 @@ func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
||||
}
|
||||
|
||||
// Wait for the pv and pvc to bind to each other.
|
||||
func waitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||
|
||||
// Wait for newly created PVC to bind to the PV
|
||||
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
|
||||
Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
|
||||
// since the PVC is already bound.
|
||||
err = framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Re-get the pv and pvc objects
|
||||
@ -337,7 +323,7 @@ func waitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
|
||||
// to situations where the maximum wait times are reached several times in succession,
|
||||
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
|
||||
// small.
|
||||
func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pvcmap, testExpected bool) {
|
||||
func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, testExpected bool) {
|
||||
|
||||
var actualBinds int
|
||||
expectedBinds := len(pvols)
|
||||
@ -346,10 +332,10 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
|
||||
}
|
||||
|
||||
for pvName := range pvols {
|
||||
err := framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
|
||||
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
|
||||
if err != nil && len(pvols) > len(claims) {
|
||||
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
|
||||
framework.Logf(" This may be ok since there are more pvs than pvcs")
|
||||
Logf("WARN: pv %v is not bound after max wait", pvName)
|
||||
Logf(" This may be ok since there are more pvs than pvcs")
|
||||
continue
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -363,7 +349,7 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
|
||||
_, found := claims[pvcKey]
|
||||
Expect(found).To(BeTrue())
|
||||
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
|
||||
err = WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
actualBinds++
|
||||
}
|
||||
@ -378,19 +364,19 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
|
||||
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) {
|
||||
|
||||
By("Pod should terminate with exitcode 0 (success)")
|
||||
err := framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
|
||||
err := WaitForPodSuccessInNamespace(c, pod.Name, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Pod %v succeeded ", pod.Name)
|
||||
Logf("Pod %v succeeded ", pod.Name)
|
||||
}
|
||||
|
||||
// Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
|
||||
// not existing.
|
||||
func deletePodWithWait(f *framework.Framework, c clientset.Interface, pod *v1.Pod) {
|
||||
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) {
|
||||
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Deleting pod %v", pod.Name)
|
||||
Logf("Deleting pod %v", pod.Name)
|
||||
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, nil)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
@ -403,34 +389,34 @@ func deletePodWithWait(f *framework.Framework, c clientset.Interface, pod *v1.Po
|
||||
err = f.WaitForPodTerminated(pod.Name, "")
|
||||
Expect(err).To(HaveOccurred())
|
||||
if !apierrs.IsNotFound(err) {
|
||||
framework.Logf("Error! Expected IsNotFound error deleting pod %q, instead got: %v", pod.Name, err)
|
||||
Logf("Error! Expected IsNotFound error deleting pod %q, instead got: %v", pod.Name, err)
|
||||
Expect(apierrs.IsNotFound(err)).To(BeTrue())
|
||||
}
|
||||
framework.Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
|
||||
Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
|
||||
}
|
||||
|
||||
// Create the test pod, wait for (hopefully) success, and then delete the pod.
|
||||
func createWaitAndDeletePod(f *framework.Framework, c clientset.Interface, ns string, claimName string) {
|
||||
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, claimName string) {
|
||||
|
||||
framework.Logf("Creating nfs test pod")
|
||||
Logf("Creating nfs test pod")
|
||||
|
||||
// Make pod spec
|
||||
pod := makeWritePod(ns, claimName)
|
||||
pod := MakeWritePod(ns, claimName)
|
||||
|
||||
// Instantiate pod (Create)
|
||||
runPod, err := c.Core().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(runPod).NotTo(BeNil())
|
||||
|
||||
defer deletePodWithWait(f, c, runPod)
|
||||
defer DeletePodWithWait(f, c, runPod)
|
||||
|
||||
// Wait for the test pod to complete its lifecycle
|
||||
testPodSuccessOrFail(c, ns, runPod)
|
||||
}
|
||||
|
||||
// Sanity check for GCE testing. Verify the persistent disk attached to the node.
|
||||
func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
|
||||
gceCloud, err := getGCECloud()
|
||||
func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
|
||||
gceCloud, err := GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -450,33 +436,33 @@ func makePvcKey(ns, name string) types.NamespacedName {
|
||||
// (instantiated) and thus the PV's ClaimRef cannot be completely filled-in in
|
||||
// this func. Therefore, the ClaimRef's name is added later in
|
||||
// createPVCPV.
|
||||
func makePersistentVolume(pvConfig persistentVolumeConfig) *v1.PersistentVolume {
|
||||
func makePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume {
|
||||
// Specs are expected to match this test's PersistentVolumeClaim
|
||||
|
||||
var claimRef *v1.ObjectReference
|
||||
// If the reclaimPolicy is not provided, assume Retain
|
||||
if pvConfig.reclaimPolicy == "" {
|
||||
pvConfig.reclaimPolicy = v1.PersistentVolumeReclaimRetain
|
||||
if pvConfig.ReclaimPolicy == "" {
|
||||
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRetain
|
||||
}
|
||||
if pvConfig.prebind != nil {
|
||||
if pvConfig.Prebind != nil {
|
||||
claimRef = &v1.ObjectReference{
|
||||
Name: pvConfig.prebind.Name,
|
||||
Namespace: pvConfig.prebind.Namespace,
|
||||
Name: pvConfig.Prebind.Name,
|
||||
Namespace: pvConfig.Prebind.Namespace,
|
||||
}
|
||||
}
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: pvConfig.namePrefix,
|
||||
GenerateName: pvConfig.NamePrefix,
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeGidAnnotationKey: "777",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: pvConfig.reclaimPolicy,
|
||||
PersistentVolumeReclaimPolicy: pvConfig.ReclaimPolicy,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
|
||||
},
|
||||
PersistentVolumeSource: pvConfig.pvSource,
|
||||
PersistentVolumeSource: pvConfig.PVSource,
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
@ -489,9 +475,9 @@ func makePersistentVolume(pvConfig persistentVolumeConfig) *v1.PersistentVolume
|
||||
|
||||
// Returns a PVC definition based on the namespace.
|
||||
// Note: if this PVC is intended to be pre-bound to a PV, whose name is not
|
||||
// known until the PV is instantiated, then the func createPVPVC will add
|
||||
// known until the PV is instantiated, then the func CreatePVPVC will add
|
||||
// pvc.Spec.VolumeName to this claim.
|
||||
func makePersistentVolumeClaim(ns string) *v1.PersistentVolumeClaim {
|
||||
func MakePersistentVolumeClaim(ns string) *v1.PersistentVolumeClaim {
|
||||
// Specs are expected to match this test's PersistentVolume
|
||||
|
||||
return &v1.PersistentVolumeClaim{
|
||||
@ -519,13 +505,13 @@ func makePersistentVolumeClaim(ns string) *v1.PersistentVolumeClaim {
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name.
|
||||
func makeWritePod(ns string, pvcName string) *v1.Pod {
|
||||
return makePod(ns, pvcName, "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')")
|
||||
func MakeWritePod(ns string, pvcName string) *v1.Pod {
|
||||
return MakePod(ns, pvcName, "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')")
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func makePod(ns string, pvcName string, command ...string) *v1.Pod {
|
||||
func MakePod(ns string, pvcName string, command ...string) *v1.Pod {
|
||||
|
||||
if len(command) == 0 {
|
||||
command = []string{"while true; do sleep 1; done"}
|
||||
@ -574,13 +560,13 @@ func makePod(ns string, pvcName string, command ...string) *v1.Pod {
|
||||
}
|
||||
|
||||
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
|
||||
func createClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
||||
clientPod := makePod(ns, pvc.Name)
|
||||
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
||||
clientPod := MakePod(ns, pvc.Name)
|
||||
clientPod, err := c.Core().Pods(ns).Create(clientPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify the pod is running before returning it
|
||||
err = framework.WaitForPodRunningInNamespace(c, clientPod)
|
||||
err = WaitForPodRunningInNamespace(c, clientPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
clientPod, err = c.Core().Pods(ns).Get(clientPod.Name, metav1.GetOptions{})
|
||||
Expect(apierrs.IsNotFound(err)).To(BeFalse())
|
@ -4594,6 +4594,14 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
|
||||
return string(logs), err
|
||||
}
|
||||
|
||||
func GetGCECloud() (*gcecloud.GCECloud, error) {
|
||||
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
|
||||
}
|
||||
return gceCloud, nil
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
|
||||
// are actually cleaned up. Currently only implemented for GCE/GKE.
|
||||
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
@ -4604,9 +4612,9 @@ func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
}
|
||||
|
||||
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
project := TestContext.CloudConfig.ProjectID
|
||||
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
|
||||
@ -5094,9 +5102,9 @@ func (p *E2ETestNodePreparer) CleanupNodes() error {
|
||||
// the given name. The name is usually the UUID of the Service prefixed with an
|
||||
// alpha-numeric character ('a') to work around cloudprovider rules.
|
||||
func CleanupGCEResources(loadBalancerName string) (retErr error) {
|
||||
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := gceCloud.DeleteFirewall(loadBalancerName); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
|
@ -415,8 +415,8 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
deletePodWithWait(f, c, pod)
|
||||
deletePodWithWait(f, c, nfsServerPod)
|
||||
framework.DeletePodWithWait(f, c, pod)
|
||||
framework.DeletePodWithWait(f, c, nfsServerPod)
|
||||
})
|
||||
|
||||
// execute It blocks from above table of tests
|
||||
@ -427,11 +427,11 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
||||
pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd)
|
||||
|
||||
By("Delete the NFS server pod")
|
||||
deletePodWithWait(f, c, nfsServerPod)
|
||||
framework.DeletePodWithWait(f, c, nfsServerPod)
|
||||
nfsServerPod = nil
|
||||
|
||||
By("Delete the pod mounted to the NFS volume")
|
||||
deletePodWithWait(f, c, pod)
|
||||
framework.DeletePodWithWait(f, c, pod)
|
||||
// pod object is now stale, but is intentionally not nil
|
||||
|
||||
By("Check if host running deleted pod has been cleaned up -- expect not")
|
||||
|
@ -589,7 +589,7 @@ func createPD() (string, error) {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID()))
|
||||
|
||||
gceCloud, err := getGCECloud()
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -624,7 +624,7 @@ func createPD() (string, error) {
|
||||
|
||||
func deletePD(pdName string) error {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
gceCloud, err := getGCECloud()
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -663,7 +663,7 @@ func deletePD(pdName string) error {
|
||||
|
||||
func detachPD(nodeName types.NodeName, pdName string) error {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
gceCloud, err := getGCECloud()
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -771,7 +771,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
|
||||
func waitForPDDetach(diskName string, nodeName types.NodeName) error {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
|
||||
gceCloud, err := getGCECloud()
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -798,16 +798,6 @@ func waitForPDDetach(diskName string, nodeName types.NodeName) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getGCECloud() (*gcecloud.GCECloud, error) {
|
||||
gceCloud, ok := framework.TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", framework.TestContext.CloudConfig.Provider)
|
||||
}
|
||||
|
||||
return gceCloud, nil
|
||||
}
|
||||
|
||||
func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
|
||||
for _, host := range hosts {
|
||||
framework.Logf("Detaching GCE PD %q from node %q.", diskName, host)
|
||||
|
@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]",
|
||||
c clientset.Interface
|
||||
ns string
|
||||
nfsServerPod *v1.Pod
|
||||
nfsPVconfig persistentVolumeConfig
|
||||
nfsPVconfig framework.PersistentVolumeConfig
|
||||
nfsServerIP, clientNodeIP string
|
||||
clientNode *v1.Node
|
||||
)
|
||||
@ -72,9 +72,9 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]",
|
||||
framework.Logf("[BeforeEach] Configuring PersistentVolume")
|
||||
nfsServerIP = nfsServerPod.Status.PodIP
|
||||
Expect(nfsServerIP).NotTo(BeEmpty())
|
||||
nfsPVconfig = persistentVolumeConfig{
|
||||
namePrefix: "nfs-",
|
||||
pvSource: v1.PersistentVolumeSource{
|
||||
nfsPVconfig = framework.PersistentVolumeConfig{
|
||||
NamePrefix: "nfs-",
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: nfsServerIP,
|
||||
Path: "/exports",
|
||||
@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]",
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
deletePodWithWait(f, c, nfsServerPod)
|
||||
framework.DeletePodWithWait(f, c, nfsServerPod)
|
||||
})
|
||||
|
||||
Context("when kubelet restarts", func() {
|
||||
@ -175,7 +175,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew
|
||||
|
||||
By("Restarting the kubelet.")
|
||||
kubeletCommand(kStop, c, clientPod)
|
||||
deletePodWithWait(f, c, clientPod)
|
||||
framework.DeletePodWithWait(f, c, clientPod)
|
||||
kubeletCommand(kStart, c, clientPod)
|
||||
|
||||
By("Expecting the volume mount not to be found.")
|
||||
@ -187,9 +187,9 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew
|
||||
}
|
||||
|
||||
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test
|
||||
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persistentVolumeConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
pv, pvc := createPVPVC(c, pvConfig, ns, false)
|
||||
pod := makePod(ns, pvc.Name)
|
||||
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
pv, pvc := framework.CreatePVPVC(c, pvConfig, ns, false)
|
||||
pod := framework.MakePod(ns, pvc.Name)
|
||||
pod.Spec.NodeName = nodeName
|
||||
framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName)
|
||||
pod, err := c.Core().Pods(ns).Create(pod)
|
||||
@ -208,9 +208,9 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig persis
|
||||
|
||||
// tearDownTestCase destroy resources created by initTestCase.
|
||||
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, pod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
|
||||
deletePodWithWait(f, c, pod)
|
||||
deletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
deletePersistentVolume(c, pv.Name)
|
||||
framework.DeletePodWithWait(f, c, pod)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
framework.DeletePersistentVolume(c, pv.Name)
|
||||
}
|
||||
|
||||
// kubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod.
|
||||
|
@ -38,7 +38,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
||||
pv *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
clientPod *v1.Pod
|
||||
pvConfig persistentVolumeConfig
|
||||
pvConfig framework.PersistentVolumeConfig
|
||||
vsp *vsphere.VSphere
|
||||
err error
|
||||
node types.NodeName
|
||||
@ -69,23 +69,23 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
||||
if volumePath == "" {
|
||||
volumePath, err = createVSphereVolume(vsp, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pvConfig = persistentVolumeConfig{
|
||||
namePrefix: "vspherepv-",
|
||||
pvSource: v1.PersistentVolumeSource{
|
||||
pvConfig = framework.PersistentVolumeConfig{
|
||||
NamePrefix: "vspherepv-",
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
||||
VolumePath: volumePath,
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
prebind: nil,
|
||||
Prebind: nil,
|
||||
}
|
||||
}
|
||||
By("Creating the PV and PVC")
|
||||
pv, pvc = createPVPVC(c, pvConfig, ns, false)
|
||||
waitOnPVandPVC(c, ns, pv, pvc)
|
||||
pv, pvc = framework.CreatePVPVC(c, pvConfig, ns, false)
|
||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
||||
|
||||
By("Creating the Client Pod")
|
||||
clientPod = createClientPod(c, ns, pvc)
|
||||
clientPod = framework.CreateClientPod(c, ns, pvc)
|
||||
node := types.NodeName(clientPod.Spec.NodeName)
|
||||
|
||||
By("Verify disk should be attached to the node")
|
||||
@ -100,15 +100,15 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
||||
if clientPod != nil {
|
||||
clientPod, err = c.CoreV1().Pods(ns).Get(clientPod.Name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
deletePodWithWait(f, c, clientPod)
|
||||
framework.DeletePodWithWait(f, c, clientPod)
|
||||
}
|
||||
}
|
||||
|
||||
if pv != nil {
|
||||
deletePersistentVolume(c, pv.Name)
|
||||
framework.DeletePersistentVolume(c, pv.Name)
|
||||
}
|
||||
if pvc != nil {
|
||||
deletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -136,7 +136,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
||||
|
||||
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
|
||||
By("Deleting the Claim")
|
||||
deletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
||||
}
|
||||
pvc = nil
|
||||
By("Deleting the Pod")
|
||||
deletePodWithWait(f, c, clientPod)
|
||||
framework.DeletePodWithWait(f, c, clientPod)
|
||||
|
||||
})
|
||||
|
||||
@ -157,13 +157,13 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
||||
*/
|
||||
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
|
||||
By("Deleting the Persistent Volume")
|
||||
deletePersistentVolume(c, pv.Name)
|
||||
framework.DeletePersistentVolume(c, pv.Name)
|
||||
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
pv = nil
|
||||
By("Deleting the pod")
|
||||
deletePodWithWait(f, c, clientPod)
|
||||
framework.DeletePodWithWait(f, c, clientPod)
|
||||
})
|
||||
})
|
||||
|
@ -35,16 +35,16 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
|
||||
|
||||
// 1. verify that the PV and PVC have bound correctly
|
||||
By("Validating the PV-PVC binding")
|
||||
waitOnPVandPVC(c, ns, pv, pvc)
|
||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
||||
|
||||
// 2. create the nfs writer pod, test if the write was successful,
|
||||
// then delete the pod and verify that it was deleted
|
||||
By("Checking pod has write access to PersistentVolume")
|
||||
createWaitAndDeletePod(f, c, ns, pvc.Name)
|
||||
framework.CreateWaitAndDeletePod(f, c, ns, pvc.Name)
|
||||
|
||||
// 3. delete the PVC, wait for PV to become "Released"
|
||||
By("Deleting the PVC to invoke the reclaim policy.")
|
||||
deletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased)
|
||||
framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased)
|
||||
}
|
||||
|
||||
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
|
||||
@ -52,7 +52,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
|
||||
// Note: the PV is deleted in the AfterEach, not here.
|
||||
// Note: this func is serialized, we wait for each pod to be deleted before creating the
|
||||
// next pod. Adding concurrency is a TODO item.
|
||||
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols pvmap, claims pvcmap, expectPhase v1.PersistentVolumePhase) {
|
||||
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols framework.PVMap, claims framework.PVCMap, expectPhase v1.PersistentVolumePhase) {
|
||||
|
||||
// 1. verify each PV permits write access to a client pod
|
||||
By("Checking pod has write access to PersistentVolumes")
|
||||
@ -66,22 +66,22 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
|
||||
_, found := pvols[pvc.Spec.VolumeName]
|
||||
Expect(found).To(BeTrue())
|
||||
// TODO: currently a serialized test of each PV
|
||||
createWaitAndDeletePod(f, c, pvcKey.Namespace, pvcKey.Name)
|
||||
framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvcKey.Name)
|
||||
}
|
||||
|
||||
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
|
||||
By("Deleting PVCs to invoke recycler")
|
||||
deletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase)
|
||||
framework.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase)
|
||||
}
|
||||
|
||||
// Creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
|
||||
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig persistentVolumeConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.PersistentVolumeConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
By("Creating the PV and PVC")
|
||||
pv, pvc := createPVPVC(c, pvConfig, ns, isPrebound)
|
||||
waitOnPVandPVC(c, ns, pv, pvc)
|
||||
pv, pvc := framework.CreatePVPVC(c, pvConfig, ns, isPrebound)
|
||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
||||
|
||||
By("Creating the Client Pod")
|
||||
clientPod := createClientPod(c, ns, pvc)
|
||||
clientPod := framework.CreateClientPod(c, ns, pvc)
|
||||
return clientPod, pv, pvc
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
var (
|
||||
nfsServerPod *v1.Pod
|
||||
serverIP string
|
||||
pvConfig persistentVolumeConfig
|
||||
pvConfig framework.PersistentVolumeConfig
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
@ -127,9 +127,9 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
nfsServerPod = initNFSserverPod(c, ns)
|
||||
serverIP = nfsServerPod.Status.PodIP
|
||||
framework.Logf("[BeforeEach] Configuring PersistentVolume")
|
||||
pvConfig = persistentVolumeConfig{
|
||||
namePrefix: "nfs-",
|
||||
pvSource: v1.PersistentVolumeSource{
|
||||
pvConfig = framework.PersistentVolumeConfig{
|
||||
NamePrefix: "nfs-",
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/exports",
|
||||
@ -140,7 +140,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
deletePodWithWait(f, c, nfsServerPod)
|
||||
framework.DeletePodWithWait(f, c, nfsServerPod)
|
||||
})
|
||||
|
||||
Context("with Single PV - PVC pairs", func() {
|
||||
@ -151,7 +151,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
// Note: this is the only code where the pv is deleted.
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up test resources.")
|
||||
pvPvcCleanup(c, ns, pv, pvc)
|
||||
framework.PVPVCCleanup(c, ns, pv, pvc)
|
||||
})
|
||||
|
||||
// Individual tests follow:
|
||||
@ -160,7 +160,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
// contains the claim. Verify that the PV and PVC bind correctly, and
|
||||
// that the pod can write to the nfs volume.
|
||||
It("should create a non-pre-bound PV and PVC: test write access ", func() {
|
||||
pv, pvc = createPVPVC(c, pvConfig, ns, false)
|
||||
pv, pvc = framework.CreatePVPVC(c, pvConfig, ns, false)
|
||||
completeTest(f, c, ns, pv, pvc)
|
||||
})
|
||||
|
||||
@ -168,7 +168,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
// pod that contains the claim. Verify that the PV and PVC bind
|
||||
// correctly, and that the pod can write to the nfs volume.
|
||||
It("create a PVC and non-pre-bound PV: test write access", func() {
|
||||
pv, pvc = createPVCPV(c, pvConfig, ns, false)
|
||||
pv, pvc = framework.CreatePVCPV(c, pvConfig, ns, false)
|
||||
completeTest(f, c, ns, pv, pvc)
|
||||
})
|
||||
|
||||
@ -176,7 +176,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
// and a pod that contains the claim. Verify that the PV and PVC bind
|
||||
// correctly, and that the pod can write to the nfs volume.
|
||||
It("create a PVC and a pre-bound PV: test write access", func() {
|
||||
pv, pvc = createPVCPV(c, pvConfig, ns, true)
|
||||
pv, pvc = framework.CreatePVCPV(c, pvConfig, ns, true)
|
||||
completeTest(f, c, ns, pv, pvc)
|
||||
})
|
||||
|
||||
@ -184,7 +184,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
// and a pod that contains the claim. Verify that the PV and PVC bind
|
||||
// correctly, and that the pod can write to the nfs volume.
|
||||
It("create a PV and a pre-bound PVC: test write access", func() {
|
||||
pv, pvc = createPVPVC(c, pvConfig, ns, true)
|
||||
pv, pvc = framework.CreatePVPVC(c, pvConfig, ns, true)
|
||||
completeTest(f, c, ns, pv, pvc)
|
||||
})
|
||||
})
|
||||
@ -204,20 +204,20 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
const maxNumPVs = 10
|
||||
const maxNumPVCs = 10
|
||||
// create the pv and pvc maps to be reused in the It blocks
|
||||
pvols := make(pvmap, maxNumPVs)
|
||||
claims := make(pvcmap, maxNumPVCs)
|
||||
pvols := make(framework.PVMap, maxNumPVs)
|
||||
claims := make(framework.PVCMap, maxNumPVCs)
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
|
||||
pvPvcMapCleanup(c, ns, pvols, claims)
|
||||
framework.PVPVCMapCleanup(c, ns, pvols, claims)
|
||||
})
|
||||
|
||||
// Create 2 PVs and 4 PVCs.
|
||||
// Note: PVs are created before claims and no pre-binding
|
||||
It("should create 2 PVs and 4 PVCs: test write access", func() {
|
||||
numPVs, numPVCs := 2, 4
|
||||
pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, pvConfig)
|
||||
waitAndVerifyBinds(c, ns, pvols, claims, true)
|
||||
pvols, claims = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig)
|
||||
framework.WaitAndVerifyBinds(c, ns, pvols, claims, true)
|
||||
completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)
|
||||
})
|
||||
|
||||
@ -225,8 +225,8 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
// Note: PVs are created before claims and no pre-binding
|
||||
It("should create 3 PVs and 3 PVCs: test write access", func() {
|
||||
numPVs, numPVCs := 3, 3
|
||||
pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, pvConfig)
|
||||
waitAndVerifyBinds(c, ns, pvols, claims, true)
|
||||
pvols, claims = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig)
|
||||
framework.WaitAndVerifyBinds(c, ns, pvols, claims, true)
|
||||
completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)
|
||||
})
|
||||
|
||||
@ -234,8 +234,8 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
// Note: PVs are created before claims and no pre-binding.
|
||||
It("should create 4 PVs and 2 PVCs: test write access", func() {
|
||||
numPVs, numPVCs := 4, 2
|
||||
pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, pvConfig)
|
||||
waitAndVerifyBinds(c, ns, pvols, claims, true)
|
||||
pvols, claims = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig)
|
||||
framework.WaitAndVerifyBinds(c, ns, pvols, claims, true)
|
||||
completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)
|
||||
})
|
||||
})
|
||||
@ -248,14 +248,14 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
var pvc *v1.PersistentVolumeClaim
|
||||
|
||||
BeforeEach(func() {
|
||||
pvConfig.reclaimPolicy = v1.PersistentVolumeReclaimRecycle
|
||||
pv, pvc = createPVPVC(c, pvConfig, ns, false)
|
||||
waitOnPVandPVC(c, ns, pv, pvc)
|
||||
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
|
||||
pv, pvc = framework.CreatePVPVC(c, pvConfig, ns, false)
|
||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up test resources.")
|
||||
pvPvcCleanup(c, ns, pv, pvc)
|
||||
framework.PVPVCCleanup(c, ns, pv, pvc)
|
||||
})
|
||||
|
||||
// This It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked
|
||||
@ -263,24 +263,24 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
// (and test) succeed.
|
||||
It("should test that a PV becomes Available and is clean after the PVC is deleted. [Volume][Serial][Flaky]", func() {
|
||||
By("Writing to the volume.")
|
||||
pod := makeWritePod(ns, pvc.Name)
|
||||
pod := framework.MakeWritePod(ns, pvc.Name)
|
||||
pod, err := c.Core().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)
|
||||
framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)
|
||||
|
||||
By("Re-mounting the volume.")
|
||||
pvc = makePersistentVolumeClaim(ns)
|
||||
pvc = createPVC(c, ns, pvc)
|
||||
pvc = framework.MakePersistentVolumeClaim(ns)
|
||||
pvc = framework.CreatePVC(c, ns, pvc)
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// If a file is detected in /mnt, fail the pod and do not restart it.
|
||||
By("Verifying the mount has been cleaned.")
|
||||
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
|
||||
pod = makePod(ns, pvc.Name, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
|
||||
pod = framework.MakePod(ns, pvc.Name, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
|
||||
|
||||
pod, err = c.Core().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -303,7 +303,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
pv *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
clientPod *v1.Pod
|
||||
pvConfig persistentVolumeConfig
|
||||
pvConfig framework.PersistentVolumeConfig
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
@ -312,16 +312,16 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
if diskName == "" {
|
||||
diskName, err = createPDWithRetry()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pvConfig = persistentVolumeConfig{
|
||||
namePrefix: "gce-",
|
||||
pvSource: v1.PersistentVolumeSource{
|
||||
pvConfig = framework.PersistentVolumeConfig{
|
||||
NamePrefix: "gce-",
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
prebind: nil,
|
||||
Prebind: nil,
|
||||
}
|
||||
}
|
||||
clientPod, pv, pvc = initializeGCETestSpec(c, ns, pvConfig, false)
|
||||
@ -331,8 +331,8 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up test resources")
|
||||
if c != nil {
|
||||
deletePodWithWait(f, c, clientPod)
|
||||
pvPvcCleanup(c, ns, pv, pvc)
|
||||
framework.DeletePodWithWait(f, c, clientPod)
|
||||
framework.PVPVCCleanup(c, ns, pv, pvc)
|
||||
clientPod = nil
|
||||
pvc = nil
|
||||
pv = nil
|
||||
@ -351,11 +351,11 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
|
||||
|
||||
By("Deleting the Claim")
|
||||
deletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
verifyGCEDiskAttached(diskName, node)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
framework.VerifyGCEDiskAttached(diskName, node)
|
||||
|
||||
By("Deleting the Pod")
|
||||
deletePodWithWait(f, c, clientPod)
|
||||
framework.DeletePodWithWait(f, c, clientPod)
|
||||
|
||||
By("Verifying Persistent Disk detach")
|
||||
err = waitForPDDetach(diskName, node)
|
||||
@ -367,11 +367,11 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
|
||||
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
|
||||
|
||||
By("Deleting the Persistent Volume")
|
||||
deletePersistentVolume(c, pv.Name)
|
||||
verifyGCEDiskAttached(diskName, node)
|
||||
framework.DeletePersistentVolume(c, pv.Name)
|
||||
framework.VerifyGCEDiskAttached(diskName, node)
|
||||
|
||||
By("Deleting the client pod")
|
||||
deletePodWithWait(f, c, clientPod)
|
||||
framework.DeletePodWithWait(f, c, clientPod)
|
||||
|
||||
By("Verifying Persistent Disk detaches")
|
||||
err = waitForPDDetach(diskName, node)
|
||||
|
@ -119,14 +119,14 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func
|
||||
writeContentToVSpherePV(c, pvc, volumeFileContent)
|
||||
|
||||
By("Delete PVC")
|
||||
deletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
pvc = nil
|
||||
|
||||
By("Verify PV is retained")
|
||||
framework.Logf("Waiting for PV %v to become Released", pv.Name)
|
||||
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
deletePersistentVolume(c, pv.Name)
|
||||
framework.DeletePersistentVolume(c, pv.Name)
|
||||
|
||||
By("Creating the PV for same volume path")
|
||||
pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil)
|
||||
@ -139,7 +139,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("wait for the pv and pvc to bind")
|
||||
waitOnPVandPVC(c, ns, pv, pvc)
|
||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
||||
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
|
||||
|
||||
})
|
||||
@ -173,10 +173,10 @@ func testCleanupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset
|
||||
vsp.DeleteVolume(volumePath)
|
||||
}
|
||||
if pv != nil {
|
||||
deletePersistentVolume(c, pv.Name)
|
||||
framework.DeletePersistentVolume(c, pv.Name)
|
||||
}
|
||||
if pvc != nil {
|
||||
deletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
}
|
||||
}
|
||||
|
||||
@ -185,10 +185,10 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu
|
||||
var err error
|
||||
|
||||
By("wait for the pv and pvc to bind")
|
||||
waitOnPVandPVC(c, ns, pv, pvc)
|
||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
||||
|
||||
By("delete pvc")
|
||||
deletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -81,14 +81,14 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:LabelSelector]", func
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("wait for the pvc_ssd to bind with pv_ssd")
|
||||
waitOnPVandPVC(c, ns, pv_ssd, pvc_ssd)
|
||||
framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd)
|
||||
|
||||
By("Verify status of pvc_vvol is pending")
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("delete pvc_ssd")
|
||||
deletePersistentVolumeClaim(c, pvc_ssd.Name, ns)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns)
|
||||
|
||||
By("verify pv_ssd is deleted")
|
||||
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
|
||||
@ -96,7 +96,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:LabelSelector]", func
|
||||
volumePath = ""
|
||||
|
||||
By("delete pvc_vvol")
|
||||
deletePersistentVolumeClaim(c, pvc_vvol.Name, ns)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns)
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -139,12 +139,12 @@ func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, volume
|
||||
vsp.DeleteVolume(volumePath)
|
||||
}
|
||||
if pvc_ssd != nil {
|
||||
deletePersistentVolumeClaim(c, pvc_ssd.Name, ns)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns)
|
||||
}
|
||||
if pvc_vvol != nil {
|
||||
deletePersistentVolumeClaim(c, pvc_vvol.Name, ns)
|
||||
framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns)
|
||||
}
|
||||
if pv_ssd != nil {
|
||||
deletePersistentVolume(c, pv_ssd.Name)
|
||||
framework.DeletePersistentVolume(c, pv_ssd.Name)
|
||||
}
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
|
||||
allZones := sets.NewString() // all zones in the project
|
||||
managedZones := sets.NewString() // subset of allZones
|
||||
|
||||
gceCloud, err := getGCECloud()
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get all k8s managed zones
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -84,24 +85,24 @@ func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeNam
|
||||
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
|
||||
func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
|
||||
var (
|
||||
pvConfig persistentVolumeConfig
|
||||
pvConfig framework.PersistentVolumeConfig
|
||||
pv *v1.PersistentVolume
|
||||
claimRef *v1.ObjectReference
|
||||
)
|
||||
pvConfig = persistentVolumeConfig{
|
||||
namePrefix: "vspherepv-",
|
||||
pvSource: v1.PersistentVolumeSource{
|
||||
pvConfig = framework.PersistentVolumeConfig{
|
||||
NamePrefix: "vspherepv-",
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
||||
VolumePath: volumePath,
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
prebind: nil,
|
||||
Prebind: nil,
|
||||
}
|
||||
|
||||
pv = &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: pvConfig.namePrefix,
|
||||
GenerateName: pvConfig.NamePrefix,
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeGidAnnotationKey: "777",
|
||||
},
|
||||
@ -111,7 +112,7 @@ func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPo
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
|
||||
},
|
||||
PersistentVolumeSource: pvConfig.pvSource,
|
||||
PersistentVolumeSource: pvConfig.PVSource,
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user