mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 01:40:13 +00:00
The recently introduced failure handling in ExpectNoError depends on error wrapping: if an error prefix gets added with `fmt.Errorf("foo: %v", err)`, then ExpectNoError cannot detect that the root cause is an assertion failure and then will add another useless "unexpected error" prefix and will not dump the additional failure information (currently the backtrace inside the E2E framework). Instead of manually deciding on a case-by-case basis where %w is needed, all error wrapping was updated automatically with sed -i "s/fmt.Errorf\(.*\): '*\(%s\|%v\)'*\",\(.* err)\)/fmt.Errorf\1: %w\",\3/" $(git grep -l 'fmt.Errorf' test/e2e*) This may be unnecessary in some cases, but it's not wrong.
236 lines
8.7 KiB
Go
236 lines
8.7 KiB
Go
/*
|
|
Copyright 2018 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package storage
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"path"
|
|
|
|
"github.com/onsi/ginkgo/v2"
|
|
"github.com/onsi/gomega"
|
|
v1 "k8s.io/api/core/v1"
|
|
storagev1 "k8s.io/api/storage/v1"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
|
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
|
admissionapi "k8s.io/pod-security-admission/api"
|
|
)
|
|
|
|
var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expand [Slow]", func() {
|
|
var (
|
|
c clientset.Interface
|
|
ns string
|
|
err error
|
|
pvc *v1.PersistentVolumeClaim
|
|
resizableSc *storagev1.StorageClass
|
|
nodeName string
|
|
nodeKeyValueLabel map[string]string
|
|
nodeLabelValue string
|
|
nodeKey string
|
|
node *v1.Node
|
|
)
|
|
|
|
f := framework.NewDefaultFramework("mounted-flexvolume-expand")
|
|
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
|
e2eskipper.SkipUnlessProviderIs("aws", "gce", "local")
|
|
e2eskipper.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
|
|
e2eskipper.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
|
|
e2eskipper.SkipUnlessSSHKeyPresent()
|
|
c = f.ClientSet
|
|
ns = f.Namespace.Name
|
|
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
|
|
var err error
|
|
|
|
node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
|
framework.ExpectNoError(err)
|
|
nodeName = node.Name
|
|
|
|
nodeKey = "mounted_flexvolume_expand_" + ns
|
|
nodeLabelValue = ns
|
|
nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
|
|
e2enode.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
|
|
ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, nodeKey)
|
|
|
|
test := testsuites.StorageClassTest{
|
|
Name: "flexvolume-resize",
|
|
Timeouts: f.Timeouts,
|
|
ClaimSize: "2Gi",
|
|
AllowVolumeExpansion: true,
|
|
Provisioner: "flex-expand",
|
|
}
|
|
|
|
resizableSc, err = c.StorageV1().StorageClasses().Create(ctx, newStorageClass(test, ns, "resizing"), metav1.CreateOptions{})
|
|
if err != nil {
|
|
fmt.Printf("storage class creation error: %v\n", err)
|
|
}
|
|
framework.ExpectNoError(err, "Error creating resizable storage class: %v", err)
|
|
if !*resizableSc.AllowVolumeExpansion {
|
|
framework.Failf("Class %s does not allow volume expansion", resizableSc.Name)
|
|
}
|
|
|
|
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
|
StorageClassName: &(resizableSc.Name),
|
|
ClaimSize: "2Gi",
|
|
}, ns)
|
|
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
|
|
framework.ExpectNoError(err, "Error creating pvc: %v", err)
|
|
ginkgo.DeferCleanup(func(ctx context.Context) {
|
|
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
|
|
if errs := e2epv.PVPVCCleanup(ctx, c, ns, nil, pvc); len(errs) > 0 {
|
|
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
|
}
|
|
})
|
|
})
|
|
|
|
ginkgo.It("should be resizable when mounted", func(ctx context.Context) {
|
|
e2eskipper.SkipUnlessSSHKeyPresent()
|
|
|
|
driver := "dummy-attachable"
|
|
|
|
ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver))
|
|
installFlex(ctx, c, node, "k8s", driver, path.Join(driverDir, driver))
|
|
ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver))
|
|
installFlex(ctx, c, nil, "k8s", driver, path.Join(driverDir, driver))
|
|
|
|
pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
|
|
PVSource: v1.PersistentVolumeSource{
|
|
FlexVolume: &v1.FlexPersistentVolumeSource{
|
|
Driver: "k8s/" + driver,
|
|
}},
|
|
NamePrefix: "pv-",
|
|
StorageClassName: resizableSc.Name,
|
|
VolumeMode: pvc.Spec.VolumeMode,
|
|
})
|
|
|
|
_, err = e2epv.CreatePV(ctx, c, f.Timeouts, pv)
|
|
framework.ExpectNoError(err, "Error creating pv %v", err)
|
|
|
|
ginkgo.By("Waiting for PVC to be in bound phase")
|
|
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
|
|
var pvs []*v1.PersistentVolume
|
|
|
|
pvs, err = e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout)
|
|
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
|
|
framework.ExpectEqual(len(pvs), 1)
|
|
|
|
var pod *v1.Pod
|
|
ginkgo.By("Creating pod")
|
|
pod, err = createNginxPod(ctx, c, ns, nodeKeyValueLabel, pvcClaims)
|
|
framework.ExpectNoError(err, "Failed to create pod %v", err)
|
|
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, pod)
|
|
|
|
ginkgo.By("Waiting for pod to go to 'running' state")
|
|
err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name)
|
|
framework.ExpectNoError(err, "Pod didn't go to 'running' state %v", err)
|
|
|
|
ginkgo.By("Expanding current pvc")
|
|
newSize := resource.MustParse("6Gi")
|
|
newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, newSize, c)
|
|
framework.ExpectNoError(err, "While updating pvc for more size")
|
|
pvc = newPVC
|
|
gomega.Expect(pvc).NotTo(gomega.BeNil())
|
|
|
|
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
|
|
if pvcSize.Cmp(newSize) != 0 {
|
|
framework.Failf("error updating pvc size %q", pvc.Name)
|
|
}
|
|
|
|
ginkgo.By("Waiting for cloudprovider resize to finish")
|
|
err = testsuites.WaitForControllerVolumeResize(ctx, pvc, c, totalResizeWaitPeriod)
|
|
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
|
|
|
|
ginkgo.By("Waiting for file system resize to finish")
|
|
pvc, err = testsuites.WaitForFSResize(ctx, pvc, c)
|
|
framework.ExpectNoError(err, "while waiting for fs resize to finish")
|
|
|
|
pvcConditions := pvc.Status.Conditions
|
|
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
|
|
})
|
|
})
|
|
|
|
// createNginxPod creates an nginx pod.
|
|
func createNginxPod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
|
pod := makeNginxPod(namespace, nodeSelector, pvclaims)
|
|
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
|
|
if err != nil {
|
|
return nil, fmt.Errorf("pod Create API error: %w", err)
|
|
}
|
|
// Waiting for pod to be running
|
|
err = e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
|
|
if err != nil {
|
|
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
|
|
}
|
|
// get fresh pod info
|
|
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
|
if err != nil {
|
|
return pod, fmt.Errorf("pod Get API error: %w", err)
|
|
}
|
|
return pod, nil
|
|
}
|
|
|
|
// makeNginxPod returns a pod definition based on the namespace using nginx image
|
|
func makeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod {
|
|
podSpec := &v1.Pod{
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Pod",
|
|
APIVersion: "v1",
|
|
},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
GenerateName: "pvc-tester-",
|
|
Namespace: ns,
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "write-pod",
|
|
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
|
Ports: []v1.ContainerPort{
|
|
{
|
|
Name: "http-server",
|
|
ContainerPort: 80,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
|
var volumes = make([]v1.Volume, len(pvclaims))
|
|
for index, pvclaim := range pvclaims {
|
|
volumename := fmt.Sprintf("volume%v", index+1)
|
|
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
|
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
|
}
|
|
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
|
podSpec.Spec.Volumes = volumes
|
|
if nodeSelector != nil {
|
|
podSpec.Spec.NodeSelector = nodeSelector
|
|
}
|
|
return podSpec
|
|
}
|