mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-09-02 17:34:18 +00:00
csi-kata-directvolume: Support loop devices
Currently, the driver only supports passing raw image files to Kata, and hence only supports runtime-rs. To support the Go runtime and be able to test the feature there, we enable loop device support in the driver via a feature flag. Fixes: #10418 Signed-off-by: Aurélien Bombo <abombo@microsoft.com>
This commit is contained in:
1
src/tools/csi-kata-directvolume/.gitignore
vendored
1
src/tools/csi-kata-directvolume/.gitignore
vendored
@@ -1 +1,2 @@
|
|||||||
bin/
|
bin/
|
||||||
|
deploy/kata-directvolume/kata-directvol-rbac.yaml
|
||||||
|
@@ -30,50 +30,9 @@ cd tools/csi-kata-directvolume/ && make
|
|||||||
|
|
||||||
## Building the Container Image
|
## Building the Container Image
|
||||||
|
|
||||||
If you want to build the container image yourself, you can do so with the following command from a specified path.
|
If you want to build the container image yourself, you can do so with the following command:
|
||||||
Here, we just use `buildah/podman` as an example:
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ tree -L 2 buildah-directv/
|
$ cd src/tools/csi-kata-directvolume
|
||||||
buildah-directv/
|
$ docker build -t localhost/kata-directvolume:v1.0.18 .
|
||||||
├── bin
|
|
||||||
│ └── directvolplugin
|
|
||||||
└── Dockerfile
|
|
||||||
|
|
||||||
$ buildah bud -t kata-directvolume:v1.0.19
|
|
||||||
STEP 1/7: FROM alpine
|
|
||||||
STEP 2/7: LABEL maintainers="Kata Containers Authors"
|
|
||||||
STEP 3/7: LABEL description="Kata DirectVolume Driver"
|
|
||||||
STEP 4/7: ARG binary=./bin/directvolplugin
|
|
||||||
STEP 5/7: RUN apk add util-linux coreutils e2fsprogs xfsprogs xfsprogs-extra btrfs-progs && apk update && apk upgrade
|
|
||||||
fetch https://dl-cdn.alpinelinux.org/alpine/v3.19/main/x86_64/APKINDEX.tar.gz
|
|
||||||
fetch https://dl-cdn.alpinelinux.org/alpine/v3.19/community/x86_64/APKINDEX.tar.gz
|
|
||||||
(1/66) Installing libblkid (2.39.3-r0)
|
|
||||||
...
|
|
||||||
(66/66) Installing xfsprogs-extra (6.5.0-r0)
|
|
||||||
Executing busybox-1.36.1-r15.trigger
|
|
||||||
OK: 64 MiB in 81 packages
|
|
||||||
fetch https://dl-cdn.alpinelinux.org/alpine/v3.19/main/x86_64/APKINDEX.tar.gz
|
|
||||||
fetch https://dl-cdn.alpinelinux.org/alpine/v3.19/community/x86_64/APKINDEX.tar.gz
|
|
||||||
v3.19.0-19-ga0ddaee500e [https://dl-cdn.alpinelinux.org/alpine/v3.19/main]
|
|
||||||
v3.19.0-18-gec62a609516 [https://dl-cdn.alpinelinux.org/alpine/v3.19/community]
|
|
||||||
OK: 22983 distinct packages available
|
|
||||||
OK: 64 MiB in 81 packages
|
|
||||||
STEP 6/7: COPY ${binary} /kata-directvol-plugin
|
|
||||||
STEP 7/7: ENTRYPOINT ["/kata-directvol-plugin"]
|
|
||||||
COMMIT kata-directvolume:v1.0.19
|
|
||||||
Getting image source signatures
|
|
||||||
Copying blob 5af4f8f59b76 skipped: already exists
|
|
||||||
Copying blob a55645705de3 done
|
|
||||||
Copying config 244001cc51 done
|
|
||||||
Writing manifest to image destination
|
|
||||||
Storing signatures
|
|
||||||
--> 244001cc51d
|
|
||||||
Successfully tagged localhost/kata-directvolume:v1.0.19
|
|
||||||
244001cc51d77302c4ed5e1a0ec347d12d85dec4576ea1313f700f66e2a7d36d
|
|
||||||
$ podman save localhost/kata-directvolume:v1.0.19 -o kata-directvolume-v1.0.19.tar
|
|
||||||
$ ctr -n k8s.io image import kata-directvolume-v1.0.19.tar
|
|
||||||
unpacking localhost/kata-directvolume:v1.0.19 (sha256:1bdc33ff7f9cee92e74cbf77a9d79d00dce6dbb9ba19b9811f683e1a087f8fbf)...done
|
|
||||||
$ crictl images |grep 1.0.19
|
|
||||||
localhost/kata-directvolume v1.0.19 244001cc51d77 83.8MB
|
|
||||||
```
|
```
|
||||||
|
0
src/tools/csi-kata-directvolume/deploy/deploy.sh
Normal file → Executable file
0
src/tools/csi-kata-directvolume/deploy/deploy.sh
Normal file → Executable file
@@ -17,87 +17,58 @@ The easiest way to deploy the `Direct Volume CSI driver` is to run the `deploy.s
|
|||||||
the cluster as shown below for Kubernetes 1.28.2.
|
the cluster as shown below for Kubernetes 1.28.2.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo deploy/deploy.sh
|
$ ./deploy/deploy.sh
|
||||||
```
|
|
||||||
|
|
||||||
You'll get an output similar to the following, indicating the application of `RBAC rules` and the successful deployment of `csi-provisioner`, `node-driver-registrar`, `kata directvolume csi driver`(`csi-kata-directvol-plugin`), liveness-probe. Please note that the following output is specific to Kubernetes 1.28.2.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
Creating Namespace kata-directvolume ...
|
Creating Namespace kata-directvolume ...
|
||||||
kubectl apply -f /tmp/tmp.kN43BWUGQ5/kata-directvol-ns.yaml
|
kubectl apply -f /tmp/tmp.lAAPNQ1aI2/kata-directvol-ns.yaml
|
||||||
namespace/kata-directvolume created
|
namespace/kata-directvolume created
|
||||||
Namespace kata-directvolume created Done !
|
Namespace kata-directvolume created Done !
|
||||||
Applying RBAC rules ...
|
Applying RBAC rules ...
|
||||||
curl https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/v3.6.0/deploy/kubernetes/rbac.yaml --output /tmp/tmp.kN43BWUGQ5/rbac.yaml --silent --location
|
curl https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/v3.6.0/deploy/kubernetes/rbac.yaml --output /tmp/tmp.lAAPNQ1aI2/rbac.yaml --silent --location
|
||||||
kubectl apply -f ./kata-directvolume/kata-directvol-rbac.yaml
|
kubectl apply -f ./deploy/kata-directvolume/kata-directvol-rbac.yaml
|
||||||
serviceaccount/csi-provisioner created
|
serviceaccount/csi-provisioner created
|
||||||
clusterrole.rbac.authorization.k8s.io/external-provisioner-runner created
|
clusterrole.rbac.authorization.k8s.io/external-provisioner-runner created
|
||||||
clusterrolebinding.rbac.authorization.k8s.io/csi-provisioner-role created
|
clusterrolebinding.rbac.authorization.k8s.io/csi-provisioner-role created
|
||||||
role.rbac.authorization.k8s.io/external-provisioner-cfg created
|
role.rbac.authorization.k8s.io/external-provisioner-cfg created
|
||||||
rolebinding.rbac.authorization.k8s.io/csi-provisioner-role-cfg created
|
rolebinding.rbac.authorization.k8s.io/csi-provisioner-role-cfg created
|
||||||
|
Applying RBAC rules Done!
|
||||||
$ ./directvol-deploy.sh
|
|
||||||
deploying kata directvolume components
|
deploying kata directvolume components
|
||||||
./kata-directvolume/csi-directvol-driverinfo.yaml
|
./deploy/kata-directvolume/csi-directvol-driverinfo.yaml
|
||||||
csidriver.storage.k8s.io/directvolume.csi.katacontainers.io created
|
csidriver.storage.k8s.io/directvolume.csi.katacontainers.io created
|
||||||
./kata-directvolume/csi-directvol-plugin.yaml
|
./deploy/kata-directvolume/csi-directvol-plugin.yaml
|
||||||
kata-directvolume plugin using image: registry.k8s.io/sig-storage/csi-provisioner:v3.6.0
|
kata-directvolume plugin using image: registry.k8s.io/sig-storage/csi-provisioner:v3.6.0
|
||||||
kata-directvolume plugin using image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.0
|
kata-directvolume plugin using image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.0
|
||||||
kata-directvolume plugin using image: localhost/kata-directvolume:v1.0.52
|
kata-directvolume plugin using image: localhost/kata-directvolume:v1.0.19
|
||||||
kata-directvolume plugin using image: registry.k8s.io/sig-storage/livenessprobe:v2.8.0
|
kata-directvolume plugin using image: registry.k8s.io/sig-storage/livenessprobe:v2.8.0
|
||||||
daemonset.apps/csi-kata-directvol-plugin created
|
daemonset.apps/csi-kata-directvol-plugin created
|
||||||
./kata-directvolume/kata-directvol-ns.yaml
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
namespace/kata-directvolume unchanged
|
kata-directvolume pod/csi-kata-directvol-plugin-9vvhc 4/4 Running 0 3s
|
||||||
./kata-directvolume/kata-directvol-rbac.yaml
|
[...TRUNCATED...]
|
||||||
serviceaccount/csi-provisioner unchanged
|
|
||||||
clusterrole.rbac.authorization.k8s.io/external-provisioner-runner configured
|
|
||||||
clusterrolebinding.rbac.authorization.k8s.io/csi-provisioner-role unchanged
|
|
||||||
role.rbac.authorization.k8s.io/external-provisioner-cfg unchanged
|
|
||||||
rolebinding.rbac.authorization.k8s.io/csi-provisioner-role-cfg unchanged
|
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
|
||||||
default pod/kata-driectvol-01 1/1 Running 0 3h57m
|
|
||||||
kata-directvolume pod/csi-kata-directvol-plugin-92smp 4/4 Running 0 4s
|
|
||||||
kube-flannel pod/kube-flannel-ds-vq796 1/1 Running 1 (67d ago) 67d
|
|
||||||
kube-system pod/coredns-66f779496c-9bmp2 1/1 Running 3 (67d ago) 67d
|
|
||||||
kube-system pod/coredns-66f779496c-qlq6d 1/1 Running 1 (67d ago) 67d
|
|
||||||
kube-system pod/etcd-tnt001 1/1 Running 19 (67d ago) 67d
|
|
||||||
kube-system pod/kube-apiserver-tnt001 1/1 Running 5 (67d ago) 67d
|
|
||||||
kube-system pod/kube-controller-manager-tnt001 1/1 Running 8 (67d ago) 67d
|
|
||||||
kube-system pod/kube-proxy-p9t6t 1/1 Running 6 (67d ago) 67d
|
|
||||||
kube-system pod/kube-scheduler-tnt001 1/1 Running 8 (67d ago) 67d
|
|
||||||
|
|
||||||
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
|
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
|
||||||
kata-directvolume daemonset.apps/csi-kata-directvol-plugin 1 1 1 1 1 <none> 4s
|
kata-directvolume daemonset.apps/csi-kata-directvol-plugin 1 1 1 1 1 <none> 3s
|
||||||
kube-flannel daemonset.apps/kube-flannel-ds 1 1 1 1 1 <none> 67d
|
[...TRUNCATED...]
|
||||||
kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 67d
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## How to Run a Kata Pod and Validate it
|
## How to Run a Kata Pod and Validate it
|
||||||
|
|
||||||
|
|
||||||
First, ensure all expected pods are running properly, including `csi-provisioner`, `node-driver-registrar`, `kata-directvolume` `csi driver(csi-kata-directvol-plugin)`, liveness-probe:
|
First, ensure all expected containers are running properly:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ kubectl get po -A
|
$ kubectl get po -n kata-directvolume
|
||||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE
|
||||||
default csi-kata-directvol-plugin-dlphw 4/4 Running 0 68m
|
csi-kata-directvol-plugin-9vvhc 4/4 Running 0 6m14s
|
||||||
kube-flannel kube-flannel-ds-vq796 1/1 Running 1 (52d ago) 52d
|
|
||||||
kube-system coredns-66f779496c-9bmp2 1/1 Running 3 (52d ago) 52d
|
|
||||||
kube-system coredns-66f779496c-qlq6d 1/1 Running 1 (52d ago) 52d
|
|
||||||
kube-system etcd-node001 1/1 Running 19 (52d ago) 52d
|
|
||||||
kube-system kube-apiserver-node001 1/1 Running 5 (52d ago) 52d
|
|
||||||
kube-system kube-controller-manager-node001 1/1 Running 8 (52d ago) 52d
|
|
||||||
kube-system kube-proxy-p9t6t 1/1 Running 6 (52d ago) 52d
|
|
||||||
kube-system kube-scheduler-node001 1/1 Running 8 (52d ago) 52d
|
|
||||||
```
|
```
|
||||||
|
|
||||||
From the root directory, deploy the application pods including a storage class, a `PVC`, and a pod which uses direct block device based volume. The details can be seen in `/examples/pod-with-directvol/*.yaml`:
|
Deploy the application pods including a storage class, a `PVC`, and a
|
||||||
|
pod which uses direct block device based volume:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl apply -f ${BASE_DIR}/csi-storageclass.yaml
|
$ cd src/tools/csi-kata-directvolume/examples/pod-with-directvol
|
||||||
kubectl apply -f ${BASE_DIR}/csi-pvc.yaml
|
$ kubectl apply -f csi-storageclass.yaml
|
||||||
kubectl apply -f ${BASE_DIR}/csi-app.yaml
|
$ kubectl apply -f csi-pvc.yaml
|
||||||
|
$ kubectl apply -f csi-app.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Let's validate the components are deployed:
|
Let's validate the components are deployed:
|
||||||
|
@@ -49,6 +49,7 @@ func (dv *directVolume) CreateVolume(ctx context.Context, req *csi.CreateVolumeR
|
|||||||
|
|
||||||
volumeCtx := make(map[string]string)
|
volumeCtx := make(map[string]string)
|
||||||
volumeCtx[utils.IsDirectVolume] = "False"
|
volumeCtx[utils.IsDirectVolume] = "False"
|
||||||
|
volumeCtx[utils.KataContainersDirectLoop] = "False"
|
||||||
|
|
||||||
for key, value := range req.GetParameters() {
|
for key, value := range req.GetParameters() {
|
||||||
switch strings.ToLower(key) {
|
switch strings.ToLower(key) {
|
||||||
@@ -56,12 +57,18 @@ func (dv *directVolume) CreateVolume(ctx context.Context, req *csi.CreateVolumeR
|
|||||||
if value == utils.DirectVolumeTypeName {
|
if value == utils.DirectVolumeTypeName {
|
||||||
volumeCtx[utils.IsDirectVolume] = "True"
|
volumeCtx[utils.IsDirectVolume] = "True"
|
||||||
}
|
}
|
||||||
|
volumeCtx[utils.KataContainersDirectVolumeType] = value
|
||||||
case utils.KataContainersDirectFsType:
|
case utils.KataContainersDirectFsType:
|
||||||
volumeCtx[utils.KataContainersDirectFsType] = value
|
volumeCtx[utils.KataContainersDirectFsType] = value
|
||||||
|
case utils.KataContainersDirectLoop:
|
||||||
|
volumeCtx[utils.KataContainersDirectLoop] = value
|
||||||
default:
|
default:
|
||||||
continue
|
klog.Warningf("unknown parameter: %s", key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if isLoopDevice(volumeCtx) {
|
||||||
|
volumeCtx[utils.IsDirectVolume] = "True"
|
||||||
|
}
|
||||||
|
|
||||||
contentSrc := req.GetVolumeContentSource()
|
contentSrc := req.GetVolumeContentSource()
|
||||||
|
|
||||||
|
@@ -10,8 +10,10 @@ package directvolume
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"kata-containers/csi-kata-directvolume/pkg/utils"
|
"kata-containers/csi-kata-directvolume/pkg/utils"
|
||||||
|
|
||||||
@@ -68,8 +70,12 @@ func (dv *directVolume) NodePublishVolume(ctx context.Context, req *csi.NodePubl
|
|||||||
attrib := req.GetVolumeContext()
|
attrib := req.GetVolumeContext()
|
||||||
|
|
||||||
devicePath := dv.config.VolumeDevices[volumeID]
|
devicePath := dv.config.VolumeDevices[volumeID]
|
||||||
klog.Infof("target %v\nfstype %v\ndevice %v\nreadonly %v\nvolumeID %v\n",
|
klog.Infoln("target", targetPath)
|
||||||
targetPath, fsType, devicePath, readOnly, volumeID)
|
klog.Infoln("volType", volType)
|
||||||
|
klog.Infoln("fstype", fsType)
|
||||||
|
klog.Infoln("device", devicePath)
|
||||||
|
klog.Infoln("readonly", readOnly)
|
||||||
|
klog.Infoln("volumeID", volumeID)
|
||||||
|
|
||||||
options := []string{"bind"}
|
options := []string{"bind"}
|
||||||
if readOnly {
|
if readOnly {
|
||||||
@@ -93,13 +99,20 @@ func (dv *directVolume) NodePublishVolume(ctx context.Context, req *csi.NodePubl
|
|||||||
return nil, status.Error(codes.Aborted, errMsg)
|
return nil, status.Error(codes.Aborted, errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var guestOptions []string
|
||||||
|
if isLoopDevice(attrib) {
|
||||||
|
guestOptions = []string{}
|
||||||
|
} else {
|
||||||
|
guestOptions = options
|
||||||
|
}
|
||||||
|
|
||||||
// kata-containers DirectVolume add
|
// kata-containers DirectVolume add
|
||||||
mountInfo := utils.MountInfo{
|
mountInfo := utils.MountInfo{
|
||||||
VolumeType: volType,
|
VolumeType: volType,
|
||||||
Device: devicePath,
|
Device: devicePath,
|
||||||
FsType: fsType,
|
FsType: fsType,
|
||||||
Metadata: attrib,
|
Metadata: attrib,
|
||||||
Options: options,
|
Options: guestOptions,
|
||||||
}
|
}
|
||||||
if err := utils.AddDirectVolume(targetPath, mountInfo); err != nil {
|
if err := utils.AddDirectVolume(targetPath, mountInfo); err != nil {
|
||||||
klog.Errorf("add direct volume with source %s and mountInfo %v failed", targetPath, mountInfo)
|
klog.Errorf("add direct volume with source %s and mountInfo %v failed", targetPath, mountInfo)
|
||||||
@@ -196,8 +209,27 @@ func (dv *directVolume) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUn
|
|||||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseBool(s string) bool {
|
||||||
|
if b, err := strconv.ParseBool(s); err != nil {
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func isDirectVolume(VolumeCtx map[string]string) bool {
|
func isDirectVolume(VolumeCtx map[string]string) bool {
|
||||||
return VolumeCtx[utils.IsDirectVolume] == "True"
|
return parseBool(VolumeCtx[utils.IsDirectVolume])
|
||||||
|
}
|
||||||
|
|
||||||
|
func isLoopDevice(VolumeCtx map[string]string) bool {
|
||||||
|
return parseBool(VolumeCtx[utils.KataContainersDirectLoop])
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDeviceSymlinkPath returns the path of the symlink that is used to
|
||||||
|
// point to the loop device from inside the specified stagingTargetPath
|
||||||
|
// directory.
|
||||||
|
func getDeviceSymlinkPath(stagingTargetPath string) string {
|
||||||
|
return filepath.Join(stagingTargetPath, "device")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dv *directVolume) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
func (dv *directVolume) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||||
@@ -224,14 +256,14 @@ func (dv *directVolume) NodeStageVolume(ctx context.Context, req *csi.NodeStageV
|
|||||||
defer dv.mutex.Unlock()
|
defer dv.mutex.Unlock()
|
||||||
|
|
||||||
capacityInBytes := req.VolumeContext[utils.CapabilityInBytes]
|
capacityInBytes := req.VolumeContext[utils.CapabilityInBytes]
|
||||||
devicePath, err := utils.CreateDirectBlockDevice(volumeID, capacityInBytes, dv.config.StoragePath)
|
imagePath, err := utils.CreateDirectBlockDevice(volumeID, capacityInBytes, dv.config.StoragePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := status.Errorf(codes.Internal, "setup storage for volume '%s' failed", volumeID)
|
errMsg := status.Errorf(codes.Internal, "setup storage for volume '%s' failed", volumeID)
|
||||||
return &csi.NodeStageVolumeResponse{}, errMsg
|
return &csi.NodeStageVolumeResponse{}, errMsg
|
||||||
}
|
}
|
||||||
|
|
||||||
// /full_path_on_host/VolumeId/
|
// /full_path_on_host/VolumeId/
|
||||||
deviceUpperPath := filepath.Dir(*devicePath)
|
imageUpperPath := filepath.Dir(*imagePath)
|
||||||
if canMnt, err := utils.CanDoBindmount(dv.config.safeMounter, stagingTargetPath); err != nil {
|
if canMnt, err := utils.CanDoBindmount(dv.config.safeMounter, stagingTargetPath); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if !canMnt {
|
} else if !canMnt {
|
||||||
@@ -240,8 +272,8 @@ func (dv *directVolume) NodeStageVolume(ctx context.Context, req *csi.NodeStageV
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := []string{"bind"}
|
options := []string{"bind"}
|
||||||
if err := dv.config.safeMounter.DoBindmount(deviceUpperPath, stagingTargetPath, "", options); err != nil {
|
if err := dv.config.safeMounter.DoBindmount(imageUpperPath, stagingTargetPath, "", options); err != nil {
|
||||||
klog.Errorf("safe mounter: %v do bind mount %v failed, with error: %v", deviceUpperPath, stagingTargetPath, err.Error())
|
klog.Errorf("safe mounter: %v do bind mount %v failed, with error: %v", imageUpperPath, stagingTargetPath, err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -251,11 +283,33 @@ func (dv *directVolume) NodeStageVolume(ctx context.Context, req *csi.NodeStageV
|
|||||||
fsType = utils.DefaultFsType
|
fsType = utils.DefaultFsType
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := dv.config.safeMounter.SafeFormatWithFstype(*devicePath, fsType, options); err != nil {
|
if err := dv.config.safeMounter.SafeFormatWithFstype(*imagePath, fsType, options); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dv.config.VolumeDevices[volumeID] = *devicePath
|
if isLoopDevice(req.VolumeContext) {
|
||||||
|
deviceLink := getDeviceSymlinkPath(stagingTargetPath)
|
||||||
|
|
||||||
|
losetupOut, err := exec.Command("losetup", "-f", "--show", *imagePath).Output()
|
||||||
|
if err != nil {
|
||||||
|
var stderr []byte
|
||||||
|
if exitErr, isExitError := err.(*exec.ExitError); isExitError {
|
||||||
|
stderr = exitErr.Stderr
|
||||||
|
}
|
||||||
|
errMsg := status.Errorf(codes.Internal, "failed to set up loop device from %s: %v: %s", *imagePath, err, stderr)
|
||||||
|
return &csi.NodeStageVolumeResponse{}, errMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
devicePath := strings.TrimSuffix(string(losetupOut), "\n")
|
||||||
|
|
||||||
|
if err := os.Symlink(devicePath, deviceLink); err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "failed to create symlink at %s: %v", deviceLink, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dv.config.VolumeDevices[volumeID] = devicePath
|
||||||
|
} else {
|
||||||
|
dv.config.VolumeDevices[volumeID] = *imagePath
|
||||||
|
}
|
||||||
|
|
||||||
klog.Infof("directvolume: volume %s has been staged.", stagingTargetPath)
|
klog.Infof("directvolume: volume %s has been staged.", stagingTargetPath)
|
||||||
|
|
||||||
@@ -305,6 +359,24 @@ func (dv *directVolume) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnst
|
|||||||
dv.mutex.Lock()
|
dv.mutex.Lock()
|
||||||
defer dv.mutex.Unlock()
|
defer dv.mutex.Unlock()
|
||||||
|
|
||||||
|
deviceLink := getDeviceSymlinkPath(stagingTargetPath)
|
||||||
|
|
||||||
|
if _, err := os.Stat(deviceLink); err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return nil, status.Errorf(codes.Internal, "failed to stat file %s: %v", deviceLink, err)
|
||||||
|
}
|
||||||
|
// Else this volume didn't use a loop device, so do nothing.
|
||||||
|
} else {
|
||||||
|
// We have to resolve the symlink first because losetup won't follow it.
|
||||||
|
canonicalDevice, err := filepath.EvalSymlinks(deviceLink)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "failed to resolve device symlink %s: %v", deviceLink, err)
|
||||||
|
}
|
||||||
|
if err := exec.Command("losetup", "-d", canonicalDevice).Run(); err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "failed to detach loop device %s: %v", deviceLink, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Unmount only if the target path is really a mount point.
|
// Unmount only if the target path is really a mount point.
|
||||||
if isMnt, err := dv.config.safeMounter.IsMountPoint(stagingTargetPath); err != nil {
|
if isMnt, err := dv.config.safeMounter.IsMountPoint(stagingTargetPath); err != nil {
|
||||||
return nil, status.Error(codes.Internal, fmt.Sprintf("check staging target path: %v", err))
|
return nil, status.Error(codes.Internal, fmt.Sprintf("check staging target path: %v", err))
|
||||||
|
@@ -26,6 +26,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
KataContainersDirectVolumeType = "katacontainers.direct.volume/volumetype"
|
KataContainersDirectVolumeType = "katacontainers.direct.volume/volumetype"
|
||||||
KataContainersDirectFsType = "katacontainers.direct.volume/fstype"
|
KataContainersDirectFsType = "katacontainers.direct.volume/fstype"
|
||||||
|
KataContainersDirectLoop = "katacontainers.direct.volume/loop"
|
||||||
DirectVolumeTypeName = "directvol"
|
DirectVolumeTypeName = "directvol"
|
||||||
IsDirectVolume = "is_directvolume"
|
IsDirectVolume = "is_directvolume"
|
||||||
)
|
)
|
||||||
|
Reference in New Issue
Block a user