mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-16 23:29:21 +00:00
Merge pull request #126326 from manishym/group_snapshot_e2e
Add end-to-end tests for Volume Group Snapshot
This commit is contained in:
commit
d598a3ec0f
@ -402,6 +402,10 @@ var (
|
||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
||||
VolumeSnapshotDataSource = framework.WithFeature(framework.ValidFeatures.Add("VolumeSnapshotDataSource"))
|
||||
|
||||
// Owner: sig-storage
|
||||
// Volume group snapshot tests
|
||||
VolumeGroupSnapshotDataSource = framework.WithFeature(framework.ValidFeatures.Add("volumegroupsnapshot"))
|
||||
|
||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
||||
VolumeSourceXFS = framework.WithFeature(framework.ValidFeatures.Add("VolumeSourceXFS"))
|
||||
|
||||
|
@ -158,6 +158,7 @@ func InitHostPathCSIDriver() storageframework.TestDriver {
|
||||
storageframework.CapReadWriteOncePod: true,
|
||||
storageframework.CapMultiplePVsSameID: true,
|
||||
storageframework.CapFSResizeFromSourceNotSupported: true,
|
||||
storageframework.CapVolumeGroupSnapshot: true,
|
||||
|
||||
// This is needed for the
|
||||
// testsuites/volumelimits.go `should support volume limits`
|
||||
@ -223,6 +224,12 @@ func (h *hostpathCSIDriver) GetVolumeAttributesClass(_ context.Context, config *
|
||||
},
|
||||
}, config.Framework.Namespace.Name, "e2e-vac-hostpath")
|
||||
}
|
||||
func (h *hostpathCSIDriver) GetVolumeGroupSnapshotClass(ctx context.Context, config *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured {
|
||||
snapshotter := config.GetUniqueDriverName()
|
||||
ns := config.Framework.Namespace.Name
|
||||
|
||||
return utils.GenerateVolumeGroupSnapshotClassSpec(snapshotter, parameters, ns)
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
|
@ -131,6 +131,12 @@ type SnapshottableTestDriver interface {
|
||||
GetSnapshotClass(ctx context.Context, config *PerTestConfig, parameters map[string]string) *unstructured.Unstructured
|
||||
}
|
||||
|
||||
type VoulmeGroupSnapshottableTestDriver interface {
|
||||
TestDriver
|
||||
// GetVolumeGroupSnapshotClass returns a VolumeGroupSnapshotClass to create group snapshot.
|
||||
GetVolumeGroupSnapshotClass(ctx context.Context, config *PerTestConfig, parameters map[string]string) *unstructured.Unstructured
|
||||
}
|
||||
|
||||
// VolumeAttributesClassTestDriver represents an interface for a TestDriver that supports
|
||||
// creating and modifying volumes via VolumeAttributesClass objects
|
||||
type VolumeAttributesClassTestDriver interface {
|
||||
@ -159,13 +165,14 @@ type Capability string
|
||||
|
||||
// Constants related to capabilities and behavior of the driver.
|
||||
const (
|
||||
CapPersistence Capability = "persistence" // data is persisted across pod restarts
|
||||
CapBlock Capability = "block" // raw block mode
|
||||
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
|
||||
CapVolumeMountGroup Capability = "volumeMountGroup" // Driver has the VolumeMountGroup CSI node capability. Because this is a FSGroup feature, the fsGroup capability must also be set to true.
|
||||
CapExec Capability = "exec" // exec a file in the volume
|
||||
CapSnapshotDataSource Capability = "snapshotDataSource" // support populate data from snapshot
|
||||
CapPVCDataSource Capability = "pvcDataSource" // support populate data from pvc
|
||||
CapPersistence Capability = "persistence" // data is persisted across pod restarts
|
||||
CapBlock Capability = "block" // raw block mode
|
||||
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
|
||||
CapVolumeMountGroup Capability = "volumeMountGroup" // Driver has the VolumeMountGroup CSI node capability. Because this is a FSGroup feature, the fsGroup capability must also be set to true.
|
||||
CapExec Capability = "exec" // exec a file in the volume
|
||||
CapSnapshotDataSource Capability = "snapshotDataSource" // support populate data from snapshot
|
||||
CapVolumeGroupSnapshot Capability = "groupSnapshot" // support group snapshot
|
||||
CapPVCDataSource Capability = "pvcDataSource" // support populate data from pvc
|
||||
|
||||
// multiple pods on a node can use the same volume concurrently;
|
||||
// for CSI, see:
|
||||
|
@ -60,6 +60,8 @@ var (
|
||||
DynamicCreatedSnapshot TestSnapshotType = "DynamicSnapshot"
|
||||
// PreprovisionedCreatedSnapshot represents a snapshot type for pre-provisioned snapshot
|
||||
PreprovisionedCreatedSnapshot TestSnapshotType = "PreprovisionedSnapshot"
|
||||
|
||||
VolumeGroupSnapshot TestSnapshotType = "VolumeGroupSnapshot"
|
||||
)
|
||||
|
||||
// TestSnapshotDeletionPolicy represents the deletion policy of the snapshot class
|
||||
@ -318,6 +320,14 @@ var (
|
||||
SnapshotDeletionPolicy: DeleteSnapshot,
|
||||
VolType: DynamicPV,
|
||||
}
|
||||
|
||||
// VolumeGroupSnapshotDelete is TestPattern for "VolumeGroupSnapshot"
|
||||
VolumeGroupSnapshotDelete = TestPattern{
|
||||
Name: " (delete policy)",
|
||||
SnapshotType: VolumeGroupSnapshot,
|
||||
SnapshotDeletionPolicy: DeleteSnapshot,
|
||||
VolType: DynamicPV,
|
||||
}
|
||||
// PreprovisionedSnapshotDelete is TestPattern for "Pre-provisioned snapshot"
|
||||
PreprovisionedSnapshotDelete = TestPattern{
|
||||
Name: "Pre-provisioned Snapshot (delete policy)",
|
||||
|
126
test/e2e/storage/framework/volume_group_snapshot_resource.go
Normal file
126
test/e2e/storage/framework/volume_group_snapshot_resource.go
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
func getVolumeGroupSnapshot(labels map[string]interface{}, ns, snapshotClassName string) *unstructured.Unstructured {
|
||||
snapshot := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeGroupSnapshot",
|
||||
"apiVersion": utils.VolumeGroupSnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
"generateName": "group-snapshot-",
|
||||
"namespace": ns,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"volumeGroupSnapshotClassName": snapshotClassName,
|
||||
"source": map[string]interface{}{
|
||||
"selector": map[string]interface{}{
|
||||
"matchLabels": labels,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return snapshot
|
||||
}
|
||||
|
||||
// VolumeGroupSnapshotResource represents a volumegroupsnapshot class, a volumegroupsnapshot and its bound contents for a specific test case
|
||||
type VolumeGroupSnapshotResource struct {
|
||||
Config *PerTestConfig
|
||||
Pattern TestPattern
|
||||
|
||||
Vgs *unstructured.Unstructured
|
||||
Vgscontent *unstructured.Unstructured
|
||||
Vgsclass *unstructured.Unstructured
|
||||
}
|
||||
|
||||
// CreateVolumeGroupSnapshot creates a VolumeGroupSnapshotClass with given SnapshotDeletionPolicy and a VolumeGroupSnapshot
|
||||
// from the VolumeGroupSnapshotClass using a dynamic client.
|
||||
// Returns the unstructured VolumeGroupSnapshotClass and VolumeGroupSnapshot objects.
|
||||
func CreateVolumeGroupSnapshot(ctx context.Context, sDriver VoulmeGroupSnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, groupName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) (*unstructured.Unstructured, *unstructured.Unstructured) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
var err error
|
||||
if pattern.SnapshotType != VolumeGroupSnapshot {
|
||||
err = fmt.Errorf("SnapshotType must be set to VolumeGroupSnapshot")
|
||||
framework.ExpectNoError(err, "SnapshotType is set to VolumeGroupSnapshot")
|
||||
}
|
||||
dc := config.Framework.DynamicClient
|
||||
|
||||
ginkgo.By("creating a VolumeGroupSnapshotClass")
|
||||
gsclass := sDriver.GetVolumeGroupSnapshotClass(ctx, config, parameters)
|
||||
if gsclass == nil {
|
||||
framework.Failf("Failed to get volume group snapshot class based on test config")
|
||||
}
|
||||
gsclass.Object["deletionPolicy"] = pattern.SnapshotDeletionPolicy.String()
|
||||
|
||||
gsclass, err = dc.Resource(utils.VolumeGroupSnapshotClassGVR).Create(ctx, gsclass, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create volume group snapshot class")
|
||||
gsclass, err = dc.Resource(utils.VolumeGroupSnapshotClassGVR).Get(ctx, gsclass.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get volume group snapshot class")
|
||||
|
||||
ginkgo.By("creating a dynamic VolumeGroupSnapshot")
|
||||
// Prepare a dynamically provisioned group volume snapshot with certain data
|
||||
volumeGroupSnapshot := getVolumeGroupSnapshot(map[string]interface{}{
|
||||
"group": groupName,
|
||||
}, pvcNamespace, gsclass.GetName())
|
||||
|
||||
volumeGroupSnapshot, err = dc.Resource(utils.VolumeGroupSnapshotGVR).Namespace(volumeGroupSnapshot.GetNamespace()).Create(ctx, volumeGroupSnapshot, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create volume group snapshot")
|
||||
ginkgo.By("Waiting for group snapshot to be ready")
|
||||
err = utils.WaitForVolumeGroupSnapshotReady(ctx, dc, volumeGroupSnapshot.GetNamespace(), volumeGroupSnapshot.GetName(), framework.Poll, timeouts.SnapshotCreate*10)
|
||||
framework.ExpectNoError(err, "Group snapshot is not ready to use within the timeout")
|
||||
ginkgo.By("Getting group snapshot and content")
|
||||
volumeGroupSnapshot, err = dc.Resource(utils.VolumeGroupSnapshotGVR).Namespace(volumeGroupSnapshot.GetNamespace()).Get(ctx, volumeGroupSnapshot.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get volume group snapshot after creation")
|
||||
|
||||
return gsclass, volumeGroupSnapshot
|
||||
}
|
||||
|
||||
// CleanupResource deletes the VolumeGroupSnapshotClass and VolumeGroupSnapshot objects using a dynamic client.
|
||||
func (r *VolumeGroupSnapshotResource) CleanupResource(ctx context.Context, timeouts *framework.TimeoutContext) error {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
dc := r.Config.Framework.DynamicClient
|
||||
err := dc.Resource(utils.VolumeGroupSnapshotClassGVR).Delete(ctx, r.Vgsclass.GetName(), metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete volume group snapshot class")
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateVolumeGroupSnapshotResource creates a VolumeGroupSnapshotResource object with the given parameters.
|
||||
func CreateVolumeGroupSnapshotResource(ctx context.Context, sDriver VoulmeGroupSnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) *VolumeGroupSnapshotResource {
|
||||
vgsclass, snapshot := CreateVolumeGroupSnapshot(ctx, sDriver, config, pattern, pvcName, pvcNamespace, timeouts, parameters)
|
||||
vgs := &VolumeGroupSnapshotResource{
|
||||
Config: config,
|
||||
Pattern: pattern,
|
||||
Vgs: snapshot,
|
||||
Vgsclass: vgsclass,
|
||||
Vgscontent: nil,
|
||||
}
|
||||
return vgs
|
||||
}
|
@ -68,6 +68,7 @@ var BaseSuites = []func() storageframework.TestSuite{
|
||||
InitTopologyTestSuite,
|
||||
InitVolumeStressTestSuite,
|
||||
InitFsGroupChangePolicyTestSuite,
|
||||
InitVolumeGroupSnapshottableTestSuite,
|
||||
func() storageframework.TestSuite {
|
||||
return InitCustomEphemeralTestSuite(GenericEphemeralTestPatterns())
|
||||
},
|
||||
@ -79,6 +80,7 @@ var CSISuites = append(BaseSuites,
|
||||
return InitCustomEphemeralTestSuite(CSIEphemeralTestPatterns())
|
||||
},
|
||||
InitSnapshottableTestSuite,
|
||||
InitVolumeGroupSnapshottableTestSuite,
|
||||
InitSnapshottableStressTestSuite,
|
||||
InitVolumePerformanceTestSuite,
|
||||
InitPvcDeletionPerformanceTestSuite,
|
||||
|
221
test/e2e/storage/testsuites/volume_group_snapshottable.go
Normal file
221
test/e2e/storage/testsuites/volume_group_snapshottable.go
Normal file
@ -0,0 +1,221 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testsuites
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
type volumeGroupSnapshottableTest struct {
|
||||
config *storageframework.PerTestConfig
|
||||
pods []*v1.Pod
|
||||
volumeGroup [3][]*storageframework.VolumeResource
|
||||
snapshots []*storageframework.VolumeGroupSnapshotResource
|
||||
numPods int
|
||||
numVolumes int
|
||||
}
|
||||
|
||||
type VolumeGroupSnapshottableTestSuite struct {
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
func InitVolumeGroupSnapshottableTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.VolumeGroupSnapshotDelete,
|
||||
}
|
||||
return InitCustomGroupSnapshottableTestSuite(patterns)
|
||||
}
|
||||
|
||||
func InitCustomGroupSnapshottableTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &VolumeGroupSnapshottableTestSuite{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "volumegroupsnapshottable",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
Min: "1Mi",
|
||||
},
|
||||
TestTags: []interface{}{feature.VolumeGroupSnapshotDataSource},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *VolumeGroupSnapshottableTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
// Check preconditions.
|
||||
dInfo := driver.GetDriverInfo()
|
||||
ok := false
|
||||
_, ok = driver.(storageframework.VoulmeGroupSnapshottableTestDriver)
|
||||
if !dInfo.Capabilities[storageframework.CapVolumeGroupSnapshot] || !ok {
|
||||
e2eskipper.Skipf("Driver %q does not support group snapshots - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *VolumeGroupSnapshottableTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *VolumeGroupSnapshottableTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
labelKey := "group"
|
||||
labelValue := "test-group"
|
||||
f := framework.NewDefaultFramework("volumegroupsnapshottable")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
ginkgo.Describe("VolumeGroupSnapshottable", func() {
|
||||
|
||||
ginkgo.Context("", func() {
|
||||
var (
|
||||
snapshottableDriver storageframework.VoulmeGroupSnapshottableTestDriver
|
||||
cs clientset.Interface
|
||||
groupTest *volumeGroupSnapshottableTest
|
||||
)
|
||||
init := func(ctx context.Context) {
|
||||
snapshottableDriver = driver.(storageframework.VoulmeGroupSnapshottableTestDriver)
|
||||
cs = f.ClientSet
|
||||
config := driver.PrepareTest(ctx, f)
|
||||
|
||||
groupTest = &volumeGroupSnapshottableTest{
|
||||
config: config,
|
||||
volumeGroup: [3][]*storageframework.VolumeResource{},
|
||||
snapshots: []*storageframework.VolumeGroupSnapshotResource{},
|
||||
pods: []*v1.Pod{},
|
||||
numPods: 1,
|
||||
numVolumes: 3,
|
||||
}
|
||||
}
|
||||
|
||||
createGroupLabel := func(ctx context.Context, pvc *v1.PersistentVolumeClaim, labelKey, labelValue string) {
|
||||
if pvc.Labels == nil {
|
||||
pvc.Labels = map[string]string{}
|
||||
}
|
||||
pvc.Labels[labelKey] = labelValue
|
||||
_, err := cs.CoreV1().PersistentVolumeClaims(pvc.GetNamespace()).Update(ctx, pvc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "failed to update PVC %s", pvc.Name)
|
||||
}
|
||||
|
||||
createPodsAndVolumes := func(ctx context.Context) {
|
||||
for i := 0; i < groupTest.numPods; i++ {
|
||||
framework.Logf("Creating resources for pod %d/%d", i, groupTest.numPods-1)
|
||||
for j := 0; j < groupTest.numVolumes; j++ {
|
||||
volume := storageframework.CreateVolumeResource(ctx, driver, groupTest.config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
|
||||
groupTest.volumeGroup[i] = append(groupTest.volumeGroup[i], volume)
|
||||
createGroupLabel(ctx, volume.Pvc, labelKey, labelValue)
|
||||
|
||||
}
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for _, volume := range groupTest.volumeGroup[i] {
|
||||
pvcs = append(pvcs, volume.Pvc)
|
||||
}
|
||||
// Create a pod with multiple volumes
|
||||
podConfig := e2epod.Config{
|
||||
NS: f.Namespace.Name,
|
||||
PVCs: pvcs,
|
||||
SeLinuxLabel: e2epv.SELinuxLabel,
|
||||
}
|
||||
pod, err := e2epod.MakeSecPod(&podConfig)
|
||||
framework.ExpectNoError(err, "failed to create pod")
|
||||
groupTest.pods = append(groupTest.pods, pod)
|
||||
}
|
||||
for i, pod := range groupTest.pods {
|
||||
pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create pod-%d [%+v]. Error: %v", i, pod, err)
|
||||
}
|
||||
if err = e2epod.WaitForPodRunningInNamespace(ctx, cs, pod); err != nil {
|
||||
framework.Failf("Failed to wait for pod-%d [%+v] to turn into running status. Error: %v", i, pod, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cleanup := func(ctx context.Context) {
|
||||
for _, pod := range groupTest.pods {
|
||||
framework.Logf("Deleting pod %s", pod.Name)
|
||||
err := e2epod.DeletePodWithWait(ctx, cs, pod)
|
||||
framework.ExpectNoError(err, "failed to delete pod %s", pod.Name)
|
||||
}
|
||||
for _, group := range groupTest.volumeGroup {
|
||||
for _, volume := range group {
|
||||
framework.Logf("Deleting volume %s", volume.Pvc.Name)
|
||||
err := volume.CleanupResource(ctx)
|
||||
framework.ExpectNoError(err, "failed to delete volume %s", volume.Pvc.Name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
ginkgo.It("should create snapshots for multiple volumes in a pod", func(ctx context.Context) {
|
||||
init(ctx)
|
||||
createPodsAndVolumes(ctx)
|
||||
ginkgo.DeferCleanup(cleanup)
|
||||
|
||||
snapshot := storageframework.CreateVolumeGroupSnapshotResource(ctx, snapshottableDriver, groupTest.config, pattern, labelValue, groupTest.volumeGroup[0][0].Pvc.GetNamespace(), f.Timeouts, map[string]string{"deletionPolicy": pattern.SnapshotDeletionPolicy.String()})
|
||||
groupTest.snapshots = append(groupTest.snapshots, snapshot)
|
||||
ginkgo.By("verifying the snapshots in the group are ready to use")
|
||||
status := snapshot.Vgs.Object["status"]
|
||||
err := framework.Gomega().Expect(status).NotTo(gomega.BeNil())
|
||||
framework.ExpectNoError(err, "failed to get status of group snapshot")
|
||||
volumes := status.(map[string]interface{})["pvcVolumeSnapshotRefList"]
|
||||
err = framework.Gomega().Expect(volumes).NotTo(gomega.BeNil())
|
||||
framework.ExpectNoError(err, "failed to get volume snapshot list")
|
||||
volumeList := volumes.([]interface{})
|
||||
err = framework.Gomega().Expect(len(volumeList)).To(gomega.Equal(groupTest.numVolumes))
|
||||
framework.ExpectNoError(err, "failed to get volume snapshot list")
|
||||
claimSize := groupTest.volumeGroup[0][0].Pvc.Spec.Resources.Requests.Storage().String()
|
||||
for _, volume := range volumeList {
|
||||
// Create a PVC from the snapshot
|
||||
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
StorageClassName: &groupTest.volumeGroup[0][0].Sc.Name,
|
||||
ClaimSize: claimSize,
|
||||
}, f.Namespace.Name)
|
||||
|
||||
group := "snapshot.storage.k8s.io"
|
||||
|
||||
pvc.Spec.DataSource = &v1.TypedLocalObjectReference{
|
||||
APIGroup: &group,
|
||||
Kind: "VolumeSnapshot",
|
||||
Name: volume.(map[string]interface{})["volumeSnapshotRef"].(map[string]interface{})["name"].(string),
|
||||
}
|
||||
|
||||
volSrc := v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
|
||||
Spec: pvc.Spec,
|
||||
},
|
||||
},
|
||||
}
|
||||
pvc, err := cs.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(ctx, pvc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create PVC from snapshot")
|
||||
pod := StartInPodWithVolumeSource(ctx, cs, volSrc, pvc.Namespace, "snapshot-pod", "sleep 300", groupTest.config.ClientNodeSelection)
|
||||
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, pod)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow), "Pod did not start in expected time")
|
||||
}
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
102
test/e2e/storage/utils/volume_group_snapshot.go
Normal file
102
test/e2e/storage/utils/volume_group_snapshot.go
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
// VolumeGroupSnapshot is the group snapshot api
|
||||
VolumeGroupSnapshotAPIGroup = "groupsnapshot.storage.k8s.io"
|
||||
// VolumeGroupSnapshotAPIVersion is the group snapshot api version
|
||||
VolumeGroupSnapshotAPIVersion = "groupsnapshot.storage.k8s.io/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
// VolumeGroupSnapshotGVR is GroupVersionResource for volumegroupsnapshots
|
||||
VolumeGroupSnapshotGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshots"}
|
||||
// VolumeGroupSnapshotClassGVR is GroupVersionResource for volumegroupsnapshotsclasses
|
||||
VolumeGroupSnapshotClassGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshotclasses"}
|
||||
)
|
||||
|
||||
// WaitForVolumeGroupSnapshotReady waits for a VolumeGroupSnapshot to be ready to use or until timeout occurs, whichever comes first.
|
||||
func WaitForVolumeGroupSnapshotReady(ctx context.Context, c dynamic.Interface, ns string, volumeGroupSnapshotName string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for VolumeGroupSnapshot %s to become ready", timeout, volumeGroupSnapshotName)
|
||||
|
||||
if successful := WaitUntil(poll, timeout, func() bool {
|
||||
volumeGroupSnapshot, err := c.Resource(VolumeGroupSnapshotGVR).Namespace(ns).Get(ctx, volumeGroupSnapshotName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get group snapshot %q, retrying in %v. Error: %v", volumeGroupSnapshotName, poll, err)
|
||||
return false
|
||||
}
|
||||
|
||||
status := volumeGroupSnapshot.Object["status"]
|
||||
if status == nil {
|
||||
framework.Logf("VolumeGroupSnapshot %s found but is not ready.", volumeGroupSnapshotName)
|
||||
return false
|
||||
}
|
||||
value := status.(map[string]interface{})
|
||||
if value["readyToUse"] == true {
|
||||
framework.Logf("VolumeSnapshot %s found and is ready", volumeGroupSnapshotName)
|
||||
return true
|
||||
}
|
||||
|
||||
framework.Logf("VolumeSnapshot %s found but is not ready.", volumeGroupSnapshotName)
|
||||
return false
|
||||
}); successful {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("VolumeSnapshot %s is not ready within %v", volumeGroupSnapshotName, timeout)
|
||||
}
|
||||
|
||||
func GenerateVolumeGroupSnapshotClassSpec(
|
||||
snapshotter string,
|
||||
parameters map[string]string,
|
||||
ns string,
|
||||
) *unstructured.Unstructured {
|
||||
deletionPolicy, ok := parameters["deletionPolicy"]
|
||||
if !ok {
|
||||
deletionPolicy = "Delete"
|
||||
}
|
||||
volumeGroupSnapshotClass := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeGroupSnapshotClass",
|
||||
"apiVersion": VolumeGroupSnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
// Name must be unique, so let's base it on namespace name and use GenerateName
|
||||
"name": names.SimpleNameGenerator.GenerateName(ns),
|
||||
},
|
||||
"driver": snapshotter,
|
||||
"parameters": parameters,
|
||||
"deletionPolicy": deletionPolicy,
|
||||
},
|
||||
}
|
||||
|
||||
return volumeGroupSnapshotClass
|
||||
}
|
@ -0,0 +1,94 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814"
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io
|
||||
spec:
|
||||
group: groupsnapshot.storage.k8s.io
|
||||
names:
|
||||
kind: VolumeGroupSnapshotClass
|
||||
listKind: VolumeGroupSnapshotClassList
|
||||
plural: volumegroupsnapshotclasses
|
||||
shortNames:
|
||||
- vgsclass
|
||||
- vgsclasses
|
||||
singular: volumegroupsnapshotclass
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .driver
|
||||
name: Driver
|
||||
type: string
|
||||
- description: Determines whether a VolumeGroupSnapshotContent created through
|
||||
the VolumeGroupSnapshotClass should be deleted when its bound VolumeGroupSnapshot
|
||||
is deleted.
|
||||
jsonPath: .deletionPolicy
|
||||
name: DeletionPolicy
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
VolumeGroupSnapshotClass specifies parameters that a underlying storage system
|
||||
uses when creating a volume group snapshot. A specific VolumeGroupSnapshotClass
|
||||
is used by specifying its name in a VolumeGroupSnapshot object.
|
||||
VolumeGroupSnapshotClasses are non-namespaced.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
deletionPolicy:
|
||||
description: |-
|
||||
DeletionPolicy determines whether a VolumeGroupSnapshotContent created
|
||||
through the VolumeGroupSnapshotClass should be deleted when its bound
|
||||
VolumeGroupSnapshot is deleted.
|
||||
Supported values are "Retain" and "Delete".
|
||||
"Retain" means that the VolumeGroupSnapshotContent and its physical group
|
||||
snapshot on underlying storage system are kept.
|
||||
"Delete" means that the VolumeGroupSnapshotContent and its physical group
|
||||
snapshot on underlying storage system are deleted.
|
||||
Required.
|
||||
enum:
|
||||
- Delete
|
||||
- Retain
|
||||
type: string
|
||||
driver:
|
||||
description: |-
|
||||
Driver is the name of the storage driver expected to handle this VolumeGroupSnapshotClass.
|
||||
Required.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
parameters:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Parameters is a key-value map with storage driver specific parameters for
|
||||
creating group snapshots.
|
||||
These values are opaque to Kubernetes and are passed directly to the driver.
|
||||
type: object
|
||||
required:
|
||||
- deletionPolicy
|
||||
- driver
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources: {}
|
@ -0,0 +1,335 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068"
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io
|
||||
spec:
|
||||
group: groupsnapshot.storage.k8s.io
|
||||
names:
|
||||
kind: VolumeGroupSnapshotContent
|
||||
listKind: VolumeGroupSnapshotContentList
|
||||
plural: volumegroupsnapshotcontents
|
||||
shortNames:
|
||||
- vgsc
|
||||
- vgscs
|
||||
singular: volumegroupsnapshotcontent
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Indicates if all the individual snapshots in the group are ready
|
||||
to be used to restore a group of volumes.
|
||||
jsonPath: .status.readyToUse
|
||||
name: ReadyToUse
|
||||
type: boolean
|
||||
- description: Determines whether this VolumeGroupSnapshotContent and its physical
|
||||
group snapshot on the underlying storage system should be deleted when its
|
||||
bound VolumeGroupSnapshot is deleted.
|
||||
jsonPath: .spec.deletionPolicy
|
||||
name: DeletionPolicy
|
||||
type: string
|
||||
- description: Name of the CSI driver used to create the physical group snapshot
|
||||
on the underlying storage system.
|
||||
jsonPath: .spec.driver
|
||||
name: Driver
|
||||
type: string
|
||||
- description: Name of the VolumeGroupSnapshotClass from which this group snapshot
|
||||
was (or will be) created.
|
||||
jsonPath: .spec.volumeGroupSnapshotClassName
|
||||
name: VolumeGroupSnapshotClass
|
||||
type: string
|
||||
- description: Namespace of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent
|
||||
object is bound.
|
||||
jsonPath: .spec.volumeGroupSnapshotRef.namespace
|
||||
name: VolumeGroupSnapshotNamespace
|
||||
type: string
|
||||
- description: Name of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent
|
||||
object is bound.
|
||||
jsonPath: .spec.volumeGroupSnapshotRef.name
|
||||
name: VolumeGroupSnapshot
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
VolumeGroupSnapshotContent represents the actual "on-disk" group snapshot object
|
||||
in the underlying storage system
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: |-
|
||||
Spec defines properties of a VolumeGroupSnapshotContent created by the underlying storage system.
|
||||
Required.
|
||||
properties:
|
||||
deletionPolicy:
|
||||
description: |-
|
||||
DeletionPolicy determines whether this VolumeGroupSnapshotContent and the
|
||||
physical group snapshot on the underlying storage system should be deleted
|
||||
when the bound VolumeGroupSnapshot is deleted.
|
||||
Supported values are "Retain" and "Delete".
|
||||
"Retain" means that the VolumeGroupSnapshotContent and its physical group
|
||||
snapshot on underlying storage system are kept.
|
||||
"Delete" means that the VolumeGroupSnapshotContent and its physical group
|
||||
snapshot on underlying storage system are deleted.
|
||||
For dynamically provisioned group snapshots, this field will automatically
|
||||
be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field
|
||||
defined in the corresponding VolumeGroupSnapshotClass.
|
||||
For pre-existing snapshots, users MUST specify this field when creating the
|
||||
VolumeGroupSnapshotContent object.
|
||||
Required.
|
||||
enum:
|
||||
- Delete
|
||||
- Retain
|
||||
type: string
|
||||
driver:
|
||||
description: |-
|
||||
Driver is the name of the CSI driver used to create the physical group snapshot on
|
||||
the underlying storage system.
|
||||
This MUST be the same as the name returned by the CSI GetPluginName() call for
|
||||
that driver.
|
||||
Required.
|
||||
type: string
|
||||
source:
|
||||
description: |-
|
||||
Source specifies whether the snapshot is (or should be) dynamically provisioned
|
||||
or already exists, and just requires a Kubernetes object representation.
|
||||
This field is immutable after creation.
|
||||
Required.
|
||||
properties:
|
||||
groupSnapshotHandles:
|
||||
description: |-
|
||||
GroupSnapshotHandles specifies the CSI "group_snapshot_id" of a pre-existing
|
||||
group snapshot and a list of CSI "snapshot_id" of pre-existing snapshots
|
||||
on the underlying storage system for which a Kubernetes object
|
||||
representation was (or should be) created.
|
||||
This field is immutable.
|
||||
properties:
|
||||
volumeGroupSnapshotHandle:
|
||||
description: |-
|
||||
VolumeGroupSnapshotHandle specifies the CSI "group_snapshot_id" of a pre-existing
|
||||
group snapshot on the underlying storage system for which a Kubernetes object
|
||||
representation was (or should be) created.
|
||||
This field is immutable.
|
||||
Required.
|
||||
type: string
|
||||
volumeSnapshotHandles:
|
||||
description: |-
|
||||
VolumeSnapshotHandles is a list of CSI "snapshot_id" of pre-existing
|
||||
snapshots on the underlying storage system for which Kubernetes objects
|
||||
representation were (or should be) created.
|
||||
This field is immutable.
|
||||
Required.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- volumeGroupSnapshotHandle
|
||||
- volumeSnapshotHandles
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: groupSnapshotHandles is immutable
|
||||
rule: self == oldSelf
|
||||
volumeHandles:
|
||||
description: |-
|
||||
VolumeHandles is a list of volume handles on the backend to be snapshotted
|
||||
together. It is specified for dynamic provisioning of the VolumeGroupSnapshot.
|
||||
This field is immutable.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-validations:
|
||||
- message: volumeHandles is immutable
|
||||
rule: self == oldSelf
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: volumeHandles is required once set
|
||||
rule: '!has(oldSelf.volumeHandles) || has(self.volumeHandles)'
|
||||
- message: groupSnapshotHandles is required once set
|
||||
rule: '!has(oldSelf.groupSnapshotHandles) || has(self.groupSnapshotHandles)'
|
||||
- message: exactly one of volumeHandles and groupSnapshotHandles must
|
||||
be set
|
||||
rule: (has(self.volumeHandles) && !has(self.groupSnapshotHandles))
|
||||
|| (!has(self.volumeHandles) && has(self.groupSnapshotHandles))
|
||||
volumeGroupSnapshotClassName:
|
||||
description: |-
|
||||
VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass from
|
||||
which this group snapshot was (or will be) created.
|
||||
Note that after provisioning, the VolumeGroupSnapshotClass may be deleted or
|
||||
recreated with different set of values, and as such, should not be referenced
|
||||
post-snapshot creation.
|
||||
For dynamic provisioning, this field must be set.
|
||||
This field may be unset for pre-provisioned snapshots.
|
||||
type: string
|
||||
volumeGroupSnapshotRef:
|
||||
description: |-
|
||||
VolumeGroupSnapshotRef specifies the VolumeGroupSnapshot object to which this
|
||||
VolumeGroupSnapshotContent object is bound.
|
||||
VolumeGroupSnapshot.Spec.VolumeGroupSnapshotContentName field must reference to
|
||||
this VolumeGroupSnapshotContent's name for the bidirectional binding to be valid.
|
||||
For a pre-existing VolumeGroupSnapshotContent object, name and namespace of the
|
||||
VolumeGroupSnapshot object MUST be provided for binding to happen.
|
||||
This field is immutable after creation.
|
||||
Required.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent.
|
||||
type: string
|
||||
fieldPath:
|
||||
description: |-
|
||||
If referring to a piece of an object instead of an entire object, this string
|
||||
should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
|
||||
For example, if the object reference is to a container within a pod, this would take on a value like:
|
||||
"spec.containers{name}" (where "name" refers to the name of the container that triggered
|
||||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
TODO: this design is not final and this field is subject to change in the future.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind of the referent.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
namespace:
|
||||
description: |-
|
||||
Namespace of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
|
||||
type: string
|
||||
resourceVersion:
|
||||
description: |-
|
||||
Specific resourceVersion to which this reference is made, if any.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
|
||||
type: string
|
||||
uid:
|
||||
description: |-
|
||||
UID of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: both volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace
|
||||
must be set
|
||||
rule: has(self.name) && has(self.__namespace__)
|
||||
- message: volumeGroupSnapshotRef is immutable
|
||||
rule: self == oldSelf
|
||||
required:
|
||||
- deletionPolicy
|
||||
- driver
|
||||
- source
|
||||
- volumeGroupSnapshotRef
|
||||
type: object
|
||||
status:
|
||||
description: status represents the current information of a group snapshot.
|
||||
properties:
|
||||
creationTime:
|
||||
description: |-
|
||||
CreationTime is the timestamp when the point-in-time group snapshot is taken
|
||||
by the underlying storage system.
|
||||
If not specified, it indicates the creation time is unknown.
|
||||
If not specified, it means the readiness of a group snapshot is unknown.
|
||||
The format of this field is a Unix nanoseconds time encoded as an int64.
|
||||
On Unix, the command date +%s%N returns the current time in nanoseconds
|
||||
since 1970-01-01 00:00:00 UTC.
|
||||
format: int64
|
||||
type: integer
|
||||
error:
|
||||
description: |-
|
||||
Error is the last observed error during group snapshot creation, if any.
|
||||
Upon success after retry, this error field will be cleared.
|
||||
properties:
|
||||
message:
|
||||
description: |-
|
||||
message is a string detailing the encountered error during snapshot
|
||||
creation if specified.
|
||||
NOTE: message may be logged, and it should not contain sensitive
|
||||
information.
|
||||
type: string
|
||||
time:
|
||||
description: time is the timestamp when the error was encountered.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
pvVolumeSnapshotContentList:
|
||||
description: |-
|
||||
PVVolumeSnapshotContentList is the list of pairs of PV and
|
||||
VolumeSnapshotContent for this group snapshot
|
||||
The maximum number of allowed snapshots in the group is 100.
|
||||
items:
|
||||
description: |-
|
||||
PVVolumeSnapshotContentPair represent a pair of PV names and
|
||||
VolumeSnapshotContent names
|
||||
properties:
|
||||
persistentVolumeRef:
|
||||
description: PersistentVolumeRef is a reference to the persistent
|
||||
volume resource
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
volumeSnapshotContentRef:
|
||||
description: VolumeSnapshotContentRef is a reference to the
|
||||
volume snapshot content resource
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
type: array
|
||||
readyToUse:
|
||||
description: |-
|
||||
ReadyToUse indicates if all the individual snapshots in the group are ready to be
|
||||
used to restore a group of volumes.
|
||||
ReadyToUse becomes true when ReadyToUse of all individual snapshots become true.
|
||||
type: boolean
|
||||
volumeGroupSnapshotHandle:
|
||||
description: |-
|
||||
VolumeGroupSnapshotHandle is a unique id returned by the CSI driver
|
||||
to identify the VolumeGroupSnapshot on the storage system.
|
||||
If a storage system does not provide such an id, the
|
||||
CSI driver can choose to return the VolumeGroupSnapshot name.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
@ -0,0 +1,273 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068"
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: volumegroupsnapshots.groupsnapshot.storage.k8s.io
|
||||
spec:
|
||||
group: groupsnapshot.storage.k8s.io
|
||||
names:
|
||||
kind: VolumeGroupSnapshot
|
||||
listKind: VolumeGroupSnapshotList
|
||||
plural: volumegroupsnapshots
|
||||
shortNames:
|
||||
- vgs
|
||||
singular: volumegroupsnapshot
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Indicates if all the individual snapshots in the group are ready
|
||||
to be used to restore a group of volumes.
|
||||
jsonPath: .status.readyToUse
|
||||
name: ReadyToUse
|
||||
type: boolean
|
||||
- description: The name of the VolumeGroupSnapshotClass requested by the VolumeGroupSnapshot.
|
||||
jsonPath: .spec.volumeGroupSnapshotClassName
|
||||
name: VolumeGroupSnapshotClass
|
||||
type: string
|
||||
- description: Name of the VolumeGroupSnapshotContent object to which the VolumeGroupSnapshot
|
||||
object intends to bind to. Please note that verification of binding actually
|
||||
requires checking both VolumeGroupSnapshot and VolumeGroupSnapshotContent
|
||||
to ensure both are pointing at each other. Binding MUST be verified prior
|
||||
to usage of this object.
|
||||
jsonPath: .status.boundVolumeGroupSnapshotContentName
|
||||
name: VolumeGroupSnapshotContent
|
||||
type: string
|
||||
- description: Timestamp when the point-in-time group snapshot was taken by the
|
||||
underlying storage system.
|
||||
jsonPath: .status.creationTime
|
||||
name: CreationTime
|
||||
type: date
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
VolumeGroupSnapshot is a user's request for creating either a point-in-time
|
||||
group snapshot or binding to a pre-existing group snapshot.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: |-
|
||||
Spec defines the desired characteristics of a group snapshot requested by a user.
|
||||
Required.
|
||||
properties:
|
||||
source:
|
||||
description: |-
|
||||
Source specifies where a group snapshot will be created from.
|
||||
This field is immutable after creation.
|
||||
Required.
|
||||
properties:
|
||||
selector:
|
||||
description: |-
|
||||
Selector is a label query over persistent volume claims that are to be
|
||||
grouped together for snapshotting.
|
||||
This labelSelector will be used to match the label added to a PVC.
|
||||
If the label is added or removed to a volume after a group snapshot
|
||||
is created, the existing group snapshots won't be modified.
|
||||
Once a VolumeGroupSnapshotContent is created and the sidecar starts to process
|
||||
it, the volume list will not change with retries.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector
|
||||
requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: selector is immutable
|
||||
rule: self == oldSelf
|
||||
volumeGroupSnapshotContentName:
|
||||
description: |-
|
||||
VolumeGroupSnapshotContentName specifies the name of a pre-existing VolumeGroupSnapshotContent
|
||||
object representing an existing volume group snapshot.
|
||||
This field should be set if the volume group snapshot already exists and
|
||||
only needs a representation in Kubernetes.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: volumeGroupSnapshotContentName is immutable
|
||||
rule: self == oldSelf
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: selector is required once set
|
||||
rule: '!has(oldSelf.selector) || has(self.selector)'
|
||||
- message: volumeGroupSnapshotContentName is required once set
|
||||
rule: '!has(oldSelf.volumeGroupSnapshotContentName) || has(self.volumeGroupSnapshotContentName)'
|
||||
- message: exactly one of selector and volumeGroupSnapshotContentName
|
||||
must be set
|
||||
rule: (has(self.selector) && !has(self.volumeGroupSnapshotContentName))
|
||||
|| (!has(self.selector) && has(self.volumeGroupSnapshotContentName))
|
||||
volumeGroupSnapshotClassName:
|
||||
description: |-
|
||||
VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass
|
||||
requested by the VolumeGroupSnapshot.
|
||||
VolumeGroupSnapshotClassName may be left nil to indicate that the default
|
||||
class will be used.
|
||||
Empty string is not allowed for this field.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: volumeGroupSnapshotClassName must not be the empty string
|
||||
when set
|
||||
rule: size(self) > 0
|
||||
required:
|
||||
- source
|
||||
type: object
|
||||
status:
|
||||
description: |-
|
||||
Status represents the current information of a group snapshot.
|
||||
Consumers must verify binding between VolumeGroupSnapshot and
|
||||
VolumeGroupSnapshotContent objects is successful (by validating that both
|
||||
VolumeGroupSnapshot and VolumeGroupSnapshotContent point to each other) before
|
||||
using this object.
|
||||
properties:
|
||||
boundVolumeGroupSnapshotContentName:
|
||||
description: |-
|
||||
BoundVolumeGroupSnapshotContentName is the name of the VolumeGroupSnapshotContent
|
||||
object to which this VolumeGroupSnapshot object intends to bind to.
|
||||
If not specified, it indicates that the VolumeGroupSnapshot object has not
|
||||
been successfully bound to a VolumeGroupSnapshotContent object yet.
|
||||
NOTE: To avoid possible security issues, consumers must verify binding between
|
||||
VolumeGroupSnapshot and VolumeGroupSnapshotContent objects is successful
|
||||
(by validating that both VolumeGroupSnapshot and VolumeGroupSnapshotContent
|
||||
point at each other) before using this object.
|
||||
type: string
|
||||
creationTime:
|
||||
description: |-
|
||||
CreationTime is the timestamp when the point-in-time group snapshot is taken
|
||||
by the underlying storage system.
|
||||
If not specified, it may indicate that the creation time of the group snapshot
|
||||
is unknown.
|
||||
The format of this field is a Unix nanoseconds time encoded as an int64.
|
||||
On Unix, the command date +%s%N returns the current time in nanoseconds
|
||||
since 1970-01-01 00:00:00 UTC.
|
||||
format: date-time
|
||||
type: string
|
||||
error:
|
||||
description: |-
|
||||
Error is the last observed error during group snapshot creation, if any.
|
||||
This field could be helpful to upper level controllers (i.e., application
|
||||
controller) to decide whether they should continue on waiting for the group
|
||||
snapshot to be created based on the type of error reported.
|
||||
The snapshot controller will keep retrying when an error occurs during the
|
||||
group snapshot creation. Upon success, this error field will be cleared.
|
||||
properties:
|
||||
message:
|
||||
description: |-
|
||||
message is a string detailing the encountered error during snapshot
|
||||
creation if specified.
|
||||
NOTE: message may be logged, and it should not contain sensitive
|
||||
information.
|
||||
type: string
|
||||
time:
|
||||
description: time is the timestamp when the error was encountered.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
pvcVolumeSnapshotRefList:
|
||||
description: |-
|
||||
VolumeSnapshotRefList is the list of PVC and VolumeSnapshot pairs that
|
||||
is part of this group snapshot.
|
||||
The maximum number of allowed snapshots in the group is 100.
|
||||
items:
|
||||
description: PVCVolumeSnapshotPair defines a pair of a PVC reference
|
||||
and a Volume Snapshot Reference
|
||||
properties:
|
||||
persistentVolumeClaimRef:
|
||||
description: PersistentVolumeClaimRef is a reference to the
|
||||
PVC this pair is referring to
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
volumeSnapshotRef:
|
||||
description: VolumeSnapshotRef is a reference to the VolumeSnapshot
|
||||
this pair is referring to
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
type: array
|
||||
readyToUse:
|
||||
description: |-
|
||||
ReadyToUse indicates if all the individual snapshots in the group are ready
|
||||
to be used to restore a group of volumes.
|
||||
ReadyToUse becomes true when ReadyToUse of all individual snapshots become true.
|
||||
If not specified, it means the readiness of a group snapshot is unknown.
|
||||
type: boolean
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
@ -0,0 +1,397 @@
|
||||
# All of the individual sidecar RBAC roles get bound
|
||||
# to this account.
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: serviceaccount
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: attacher-cluster-role
|
||||
name: csi-hostpathplugin-attacher-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: health-monitor-controller-cluster-role
|
||||
name: csi-hostpathplugin-health-monitor-controller-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-health-monitor-controller-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: provisioner-cluster-role
|
||||
name: csi-hostpathplugin-provisioner-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-provisioner-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: resizer-cluster-role
|
||||
name: csi-hostpathplugin-resizer-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-resizer-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: snapshotter-cluster-role
|
||||
name: csi-hostpathplugin-snapshotter-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-snapshotter-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: attacher-role
|
||||
name: csi-hostpathplugin-attacher-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-attacher-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: health-monitor-controller-role
|
||||
name: csi-hostpathplugin-health-monitor-controller-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-health-monitor-controller-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: provisioner-role
|
||||
name: csi-hostpathplugin-provisioner-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-provisioner-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: resizer-role
|
||||
name: csi-hostpathplugin-resizer-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-resizer-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: snapshotter-role
|
||||
name: csi-hostpathplugin-snapshotter-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-snapshotter-leaderelection
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpathplugin
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: plugin
|
||||
spec:
|
||||
serviceName: "csi-hostpathplugin"
|
||||
# One replica only:
|
||||
# Host path driver only works when everything runs
|
||||
# on a single node.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: plugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: plugin
|
||||
spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
privileged: true
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 2
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: Bidirectional
|
||||
name: mountpoint-dir
|
||||
- mountPath: /var/lib/kubelet/plugins
|
||||
mountPropagation: Bidirectional
|
||||
name: plugins-dir
|
||||
- mountPath: /csi-data-dir
|
||||
name: csi-data-dir
|
||||
- mountPath: /dev
|
||||
name: dev-dir
|
||||
|
||||
- name: csi-external-health-monitor-controller
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.12.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- mountPath: /csi-data-dir
|
||||
name: csi-data-dir
|
||||
|
||||
- name: liveness-probe
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --feature-gates=Topology=true
|
||||
# end csi-provisioner args
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --enable-volume-group-snapshots=true
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: DirectoryOrCreate
|
||||
name: mountpoint-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
name: registration-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins
|
||||
type: Directory
|
||||
name: plugins-dir
|
||||
- hostPath:
|
||||
# 'path' is where PV data is persisted on host.
|
||||
# using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
|
||||
path: /var/lib/csi-hostpath-data/
|
||||
type: DirectoryOrCreate
|
||||
name: csi-data-dir
|
||||
- hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
name: dev-dir
|
@ -0,0 +1,329 @@
|
||||
#!/bin/sh
|
||||
# Copyright 2024 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# hack script for running a kind e2e
|
||||
# must be run with a kubernetes checkout in $PWD (IE from the checkout)
|
||||
# Usage: SKIP="ginkgo skip regex" FOCUS="ginkgo focus regex" kind-e2e.sh
|
||||
|
||||
set -o errexit -o nounset -o xtrace
|
||||
|
||||
# Settings:
|
||||
# SKIP: ginkgo skip regex
|
||||
# FOCUS: ginkgo focus regex
|
||||
# LABEL_FILTER: ginkgo label query for selecting tests (see "Spec Labels" in https://onsi.github.io/ginkgo/#filtering-specs)
|
||||
#
|
||||
# The default is to focus on conformance tests. Serial tests get skipped when
|
||||
# parallel testing is enabled. Using LABEL_FILTER instead of combining SKIP and
|
||||
# FOCUS is recommended (more expressive, easier to read than regexp).
|
||||
#
|
||||
# GA_ONLY: true - limit to GA APIs/features as much as possible
|
||||
# false - (default) APIs and features left at defaults
|
||||
# FEATURE_GATES:
|
||||
# JSON or YAML encoding of a string/bool map: {"FeatureGateA": true, "FeatureGateB": false}
|
||||
# Enables or disables feature gates in the entire cluster.
|
||||
# Cannot be used when GA_ONLY=true.
|
||||
# RUNTIME_CONFIG:
|
||||
# JSON or YAML encoding of a string/string (!) map: {"apia.example.com/v1alpha1": "true", "apib.example.com/v1beta1": "false"}
|
||||
# Enables API groups in the apiserver via --runtime-config.
|
||||
# Cannot be used when GA_ONLY=true.
|
||||
|
||||
# cleanup logic for cleanup on exit
|
||||
CLEANED_UP=false
|
||||
cleanup() {
|
||||
if [ "$CLEANED_UP" = "true" ]; then
|
||||
return
|
||||
fi
|
||||
# KIND_CREATE_ATTEMPTED is true once we: kind create
|
||||
if [ "${KIND_CREATE_ATTEMPTED:-}" = true ]; then
|
||||
kind "export" logs "${ARTIFACTS}" || true
|
||||
kind delete cluster || true
|
||||
fi
|
||||
rm -f _output/bin/e2e.test || true
|
||||
# remove our tempdir, this needs to be last, or it will prevent kind delete
|
||||
if [ -n "${TMP_DIR:-}" ]; then
|
||||
rm -rf "${TMP_DIR:?}"
|
||||
fi
|
||||
CLEANED_UP=true
|
||||
}
|
||||
|
||||
# setup signal handlers
|
||||
# shellcheck disable=SC2317 # this is not unreachable code
|
||||
signal_handler() {
|
||||
if [ -n "${GINKGO_PID:-}" ]; then
|
||||
kill -TERM "$GINKGO_PID" || true
|
||||
fi
|
||||
cleanup
|
||||
}
|
||||
trap signal_handler INT TERM
|
||||
|
||||
# build kubernetes / node image, e2e binaries
|
||||
build() {
|
||||
# build the node image w/ kubernetes
|
||||
kind build node-image -v 1
|
||||
# Ginkgo v1 is used by Kubernetes 1.24 and earlier, fallback if v2 is not available.
|
||||
GINKGO_SRC_DIR="vendor/github.com/onsi/ginkgo/v2/ginkgo"
|
||||
if [ ! -d "$GINKGO_SRC_DIR" ]; then
|
||||
GINKGO_SRC_DIR="vendor/github.com/onsi/ginkgo/ginkgo"
|
||||
fi
|
||||
# make sure we have e2e requirements
|
||||
make all WHAT="cmd/kubectl test/e2e/e2e.test ${GINKGO_SRC_DIR}"
|
||||
|
||||
# Ensure the built kubectl is used instead of system
|
||||
export PATH="${PWD}/_output/bin:$PATH"
|
||||
}
|
||||
|
||||
check_structured_log_support() {
|
||||
case "${KUBE_VERSION}" in
|
||||
v1.1[0-8].*)
|
||||
echo "$1 is only supported on versions >= v1.19, got ${KUBE_VERSION}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# up a cluster with kind
|
||||
create_cluster() {
|
||||
# Grab the version of the cluster we're about to start
|
||||
KUBE_VERSION="$(docker run --rm --entrypoint=cat "kindest/node:latest" /kind/version)"
|
||||
|
||||
# Default Log level for all components in test clusters
|
||||
KIND_CLUSTER_LOG_LEVEL=${KIND_CLUSTER_LOG_LEVEL:-4}
|
||||
|
||||
# potentially enable --logging-format
|
||||
CLUSTER_LOG_FORMAT=${CLUSTER_LOG_FORMAT:-}
|
||||
scheduler_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\""
|
||||
controllerManager_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\""
|
||||
apiServer_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\""
|
||||
if [ -n "$CLUSTER_LOG_FORMAT" ]; then
|
||||
check_structured_log_support "CLUSTER_LOG_FORMAT"
|
||||
scheduler_extra_args="${scheduler_extra_args}
|
||||
\"logging-format\": \"${CLUSTER_LOG_FORMAT}\""
|
||||
controllerManager_extra_args="${controllerManager_extra_args}
|
||||
\"logging-format\": \"${CLUSTER_LOG_FORMAT}\""
|
||||
apiServer_extra_args="${apiServer_extra_args}
|
||||
\"logging-format\": \"${CLUSTER_LOG_FORMAT}\""
|
||||
fi
|
||||
kubelet_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\""
|
||||
KUBELET_LOG_FORMAT=${KUBELET_LOG_FORMAT:-$CLUSTER_LOG_FORMAT}
|
||||
if [ -n "$KUBELET_LOG_FORMAT" ]; then
|
||||
check_structured_log_support "KUBECTL_LOG_FORMAT"
|
||||
kubelet_extra_args="${kubelet_extra_args}
|
||||
\"logging-format\": \"${KUBELET_LOG_FORMAT}\""
|
||||
fi
|
||||
|
||||
# JSON or YAML map injected into featureGates config
|
||||
feature_gates="${FEATURE_GATES:-{\}}"
|
||||
# --runtime-config argument value passed to the API server, again as a map
|
||||
runtime_config="${RUNTIME_CONFIG:-{\}}"
|
||||
|
||||
case "${GA_ONLY:-false}" in
|
||||
false)
|
||||
:
|
||||
;;
|
||||
true)
|
||||
if [ "${feature_gates}" != "{}" ]; then
|
||||
echo "GA_ONLY=true and FEATURE_GATES=${feature_gates} are mutually exclusive."
|
||||
exit 1
|
||||
fi
|
||||
if [ "${runtime_config}" != "{}" ]; then
|
||||
echo "GA_ONLY=true and RUNTIME_CONFIG=${runtime_config} are mutually exclusive."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Limiting to GA APIs and features for ${KUBE_VERSION}"
|
||||
feature_gates='{"AllAlpha":false,"AllBeta":false}'
|
||||
runtime_config='{"api/alpha":"false", "api/beta":"false"}'
|
||||
;;
|
||||
*)
|
||||
echo "\$GA_ONLY set to '${GA_ONLY}'; supported values are true and false (default)"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# create the config file
|
||||
cat <<EOF > "${ARTIFACTS}/kind-config.yaml"
|
||||
# config for 1 control plane node and 2 workers (necessary for conformance)
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
networking:
|
||||
ipFamily: ${IP_FAMILY:-ipv4}
|
||||
kubeProxyMode: ${KUBE_PROXY_MODE:-iptables}
|
||||
# don't pass through host search paths
|
||||
# TODO: possibly a reasonable default in the future for kind ...
|
||||
dnsSearch: []
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
- role: worker
|
||||
featureGates: ${feature_gates}
|
||||
runtimeConfig: ${runtime_config}
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: ClusterConfiguration
|
||||
metadata:
|
||||
name: config
|
||||
apiServer:
|
||||
extraArgs:
|
||||
${apiServer_extra_args}
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
${controllerManager_extra_args}
|
||||
scheduler:
|
||||
extraArgs:
|
||||
${scheduler_extra_args}
|
||||
---
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
${kubelet_extra_args}
|
||||
---
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
${kubelet_extra_args}
|
||||
EOF
|
||||
# NOTE: must match the number of workers above
|
||||
NUM_NODES=2
|
||||
# actually create the cluster
|
||||
# TODO(BenTheElder): settle on verbosity for this script
|
||||
KIND_CREATE_ATTEMPTED=true
|
||||
kind create cluster \
|
||||
--image=kindest/node:latest \
|
||||
--retain \
|
||||
--wait=1m \
|
||||
-v=3 \
|
||||
"--config=${ARTIFACTS}/kind-config.yaml"
|
||||
|
||||
# debug cluster version
|
||||
kubectl version
|
||||
|
||||
# Patch kube-proxy to set the verbosity level
|
||||
kubectl patch -n kube-system daemonset/kube-proxy \
|
||||
--type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--v='"${KIND_CLUSTER_LOG_LEVEL}"'" }]'
|
||||
}
|
||||
|
||||
# run e2es with ginkgo-e2e.sh
|
||||
run_tests() {
|
||||
# IPv6 clusters need some CoreDNS changes in order to work in k8s CI:
|
||||
# 1. k8s CI doesn´t offer IPv6 connectivity, so CoreDNS should be configured
|
||||
# to work in an offline environment:
|
||||
# https://github.com/coredns/coredns/issues/2494#issuecomment-457215452
|
||||
# 2. k8s CI adds following domains to resolv.conf search field:
|
||||
# c.k8s-prow-builds.internal google.internal.
|
||||
# CoreDNS should handle those domains and answer with NXDOMAIN instead of SERVFAIL
|
||||
# otherwise pods stops trying to resolve the domain.
|
||||
if [ "${IP_FAMILY:-ipv4}" = "ipv6" ]; then
|
||||
# Get the current config
|
||||
original_coredns=$(kubectl get -oyaml -n=kube-system configmap/coredns)
|
||||
echo "Original CoreDNS config:"
|
||||
echo "${original_coredns}"
|
||||
# Patch it
|
||||
fixed_coredns=$(
|
||||
printf '%s' "${original_coredns}" | sed \
|
||||
-e 's/^.*kubernetes cluster\.local/& internal/' \
|
||||
-e '/^.*upstream$/d' \
|
||||
-e '/^.*fallthrough.*$/d' \
|
||||
-e '/^.*forward . \/etc\/resolv.conf$/d' \
|
||||
-e '/^.*loop$/d' \
|
||||
)
|
||||
echo "Patched CoreDNS config:"
|
||||
echo "${fixed_coredns}"
|
||||
printf '%s' "${fixed_coredns}" | kubectl apply -f -
|
||||
fi
|
||||
|
||||
# ginkgo regexes and label filter
|
||||
SKIP="${SKIP:-}"
|
||||
FOCUS="${FOCUS:-}"
|
||||
LABEL_FILTER="${LABEL_FILTER:-}"
|
||||
if [ -z "${FOCUS}" ] && [ -z "${LABEL_FILTER}" ]; then
|
||||
FOCUS="\\[Conformance\\]"
|
||||
fi
|
||||
# if we set PARALLEL=true, skip serial tests set --ginkgo-parallel
|
||||
if [ "${PARALLEL:-false}" = "true" ]; then
|
||||
export GINKGO_PARALLEL=y
|
||||
if [ -z "${SKIP}" ]; then
|
||||
SKIP="\\[Serial\\]"
|
||||
else
|
||||
SKIP="\\[Serial\\]|${SKIP}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# setting this env prevents ginkgo e2e from trying to run provider setup
|
||||
export KUBERNETES_CONFORMANCE_TEST='y'
|
||||
# setting these is required to make RuntimeClass tests work ... :/
|
||||
export KUBE_CONTAINER_RUNTIME=remote
|
||||
export KUBE_CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock
|
||||
export KUBE_CONTAINER_RUNTIME_NAME=containerd
|
||||
# ginkgo can take forever to exit, so we run it in the background and save the
|
||||
# PID, bash will not run traps while waiting on a process, but it will while
|
||||
# running a builtin like `wait`, saving the PID also allows us to forward the
|
||||
# interrupt
|
||||
|
||||
kubectl apply -f ./cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml || exit 1
|
||||
kubectl apply -f ./cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml || exit 1
|
||||
kubectl apply -f ./cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshots.yaml || exit 1
|
||||
kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml || exit 1
|
||||
kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml || exit 1
|
||||
kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml || exit 1
|
||||
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.0.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml || exit 1
|
||||
curl -s https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/release-8.1/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | \
|
||||
awk '/--leader-election=true/ {print; print " - \"--enable-volume-group-snapshots=true\""; next}1' | \
|
||||
kubectl apply -f - || exit 1
|
||||
|
||||
|
||||
./hack/ginkgo-e2e.sh \
|
||||
'--provider=skeleton' "--num-nodes=${NUM_NODES}" \
|
||||
"--ginkgo.focus=${FOCUS}" "--ginkgo.skip=${SKIP}" "--ginkgo.label-filter=${LABEL_FILTER}" \
|
||||
"--report-dir=${ARTIFACTS}" '--disable-log-dump=true' &
|
||||
GINKGO_PID=$!
|
||||
wait "$GINKGO_PID"
|
||||
}
|
||||
|
||||
main() {
|
||||
# create temp dir and setup cleanup
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
# ensure artifacts (results) directory exists when not in CI
|
||||
export ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}"
|
||||
mkdir -p "${ARTIFACTS}"
|
||||
|
||||
# export the KUBECONFIG to a unique path for testing
|
||||
KUBECONFIG="${HOME}/.kube/kind-test-config"
|
||||
export KUBECONFIG
|
||||
echo "exported KUBECONFIG=${KUBECONFIG}"
|
||||
|
||||
# debug kind version
|
||||
kind version
|
||||
cp test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml || exit 1
|
||||
# build kubernetes
|
||||
build
|
||||
# in CI attempt to release some memory after building
|
||||
if [ -n "${KUBETEST_IN_DOCKER:-}" ]; then
|
||||
sync || true
|
||||
echo 1 > /proc/sys/vm/drop_caches || true
|
||||
fi
|
||||
|
||||
|
||||
# create the cluster and run tests
|
||||
res=0
|
||||
create_cluster || res=$?
|
||||
run_tests || res=$?
|
||||
cleanup || res=$?
|
||||
exit $res
|
||||
}
|
||||
|
||||
main
|
Loading…
Reference in New Issue
Block a user