mirror of
https://github.com/oracle/zfssa-csi-driver.git
synced 2025-06-27 14:06:56 +00:00
Add missing parameters to CreateFilesystem req so that the created FS can be mounted later
This commit is contained in:
parent
817778f1c6
commit
b7e0b32f24
@ -6,10 +6,10 @@
|
|||||||
package service
|
package service
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/oracle/zfssa-csi-driver/pkg/utils"
|
|
||||||
"github.com/oracle/zfssa-csi-driver/pkg/zfssarest"
|
|
||||||
"context"
|
"context"
|
||||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
|
"github.com/oracle/zfssa-csi-driver/pkg/utils"
|
||||||
|
"github.com/oracle/zfssa-csi-driver/pkg/zfssarest"
|
||||||
context2 "golang.org/x/net/context"
|
context2 "golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
@ -19,25 +19,25 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
filesystemAccessModes = []csi.VolumeCapability_AccessMode{
|
filesystemAccessModes = []csi.VolumeCapability_AccessMode{
|
||||||
{ Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER },
|
{Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER},
|
||||||
{ Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER },
|
{Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER},
|
||||||
{ Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY },
|
{Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY},
|
||||||
{ Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY },
|
{Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY},
|
||||||
{ Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY },
|
{Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// ZFSSA mount volume
|
// ZFSSA mount volume
|
||||||
type zFilesystem struct {
|
type zFilesystem struct {
|
||||||
bolt *utils.Bolt
|
bolt *utils.Bolt
|
||||||
refcount int32
|
refcount int32
|
||||||
state volumeState
|
state volumeState
|
||||||
href string
|
href string
|
||||||
id *utils.VolumeId
|
id *utils.VolumeId
|
||||||
capacity int64
|
capacity int64
|
||||||
accessModes []csi.VolumeCapability_AccessMode
|
accessModes []csi.VolumeCapability_AccessMode
|
||||||
source *csi.VolumeContentSource
|
source *csi.VolumeContentSource
|
||||||
mountpoint string
|
mountpoint string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new filesysyem structure. If no information is provided (fsinfo is nil), this
|
// Creates a new filesysyem structure. If no information is provided (fsinfo is nil), this
|
||||||
@ -58,8 +58,18 @@ func (fs *zFilesystem) create(ctx context.Context, token *zfssarest.Token,
|
|||||||
capacityRange := req.GetCapacityRange()
|
capacityRange := req.GetCapacityRange()
|
||||||
capabilities := req.GetVolumeCapabilities()
|
capabilities := req.GetVolumeCapabilities()
|
||||||
|
|
||||||
fsinfo, httpStatus, err := zfssarest.CreateFilesystem(ctx, token,
|
if _, ok := req.Parameters["restrictChown"]; !ok {
|
||||||
req.GetName(), getVolumeSize(capacityRange), &req.Parameters)
|
utils.GetLogCTRL(ctx, 5).Println("Adding restrictChown to CreateFilesystem req parameters")
|
||||||
|
req.Parameters["restrictChown"] = "false"
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := req.Parameters["shareNFS"]; !ok {
|
||||||
|
utils.GetLogCTRL(ctx, 5).Println("Adding shareNFS to CreateFilesystem req parameters")
|
||||||
|
req.Parameters["shareNFS"] = "on"
|
||||||
|
}
|
||||||
|
|
||||||
|
fsinfo, httpStatus, err := zfssarest.CreateFilesystem(ctx, token,
|
||||||
|
req.GetName(), getVolumeSize(capacityRange), &req.Parameters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if httpStatus != http.StatusConflict {
|
if httpStatus != http.StatusConflict {
|
||||||
fs.state = stateDeleted
|
fs.state = stateDeleted
|
||||||
@ -71,43 +81,43 @@ func (fs *zFilesystem) create(ctx context.Context, token *zfssarest.Token,
|
|||||||
// with the same name. We get the information from the appliance, update
|
// with the same name. We get the information from the appliance, update
|
||||||
// the file system context and check its compatibility with the request.
|
// the file system context and check its compatibility with the request.
|
||||||
if fs.state == stateCreated {
|
if fs.state == stateCreated {
|
||||||
fsinfo, _, err = zfssarest.GetFilesystem(ctx, token,
|
fsinfo, _, err = zfssarest.GetFilesystem(ctx, token,
|
||||||
req.Parameters["pool"], req.Parameters["project"], req.GetName())
|
req.Parameters["pool"], req.Parameters["project"], req.GetName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fs.setInfo(fsinfo)
|
fs.setInfo(fsinfo)
|
||||||
// pass mountpoint as a volume context value to use for nfs mount to the pod
|
// pass mountpoint as a volume context value to use for nfs mount to the pod
|
||||||
req.Parameters["mountpoint"] = fs.mountpoint
|
req.Parameters["mountpoint"] = fs.mountpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
// The volume has already been created. The compatibility of the
|
// The volume has already been created. The compatibility of the
|
||||||
// capacity range and accessModes is checked.
|
// capacity range and accessModes is checked.
|
||||||
if !compareCapacityRange(capacityRange, fs.capacity) {
|
if !compareCapacityRange(capacityRange, fs.capacity) {
|
||||||
return nil,
|
return nil,
|
||||||
status.Errorf(codes.AlreadyExists,
|
status.Errorf(codes.AlreadyExists,
|
||||||
"Volume (%s) is already on target (%s),"+
|
"Volume (%s) is already on target (%s),"+
|
||||||
" capacity range incompatible (%v), requested (%v/%v)",
|
" capacity range incompatible (%v), requested (%v/%v)",
|
||||||
fs.id.Name, fs.id.Zfssa, fs.capacity,
|
fs.id.Name, fs.id.Zfssa, fs.capacity,
|
||||||
capacityRange.RequiredBytes, capacityRange.LimitBytes)
|
capacityRange.RequiredBytes, capacityRange.LimitBytes)
|
||||||
}
|
}
|
||||||
if !compareCapabilities(capabilities, fs.accessModes, false) {
|
if !compareCapabilities(capabilities, fs.accessModes, false) {
|
||||||
return nil,
|
return nil,
|
||||||
status.Errorf(codes.AlreadyExists,
|
status.Errorf(codes.AlreadyExists,
|
||||||
"Volume (%s) is already on target (%s), accessModes are incompatible",
|
"Volume (%s) is already on target (%s), accessModes are incompatible",
|
||||||
fs.id.Name, fs.id.Zfssa)
|
fs.id.Name, fs.id.Zfssa)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.setInfo(fsinfo)
|
fs.setInfo(fsinfo)
|
||||||
// pass mountpoint as a volume context value to use for nfs mount to the pod
|
// pass mountpoint as a volume context value to use for nfs mount to the pod
|
||||||
req.Parameters["mountpoint"] = fs.mountpoint
|
req.Parameters["mountpoint"] = fs.mountpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.CreateVolumeResponse{
|
return &csi.CreateVolumeResponse{
|
||||||
Volume: &csi.Volume{
|
Volume: &csi.Volume{
|
||||||
VolumeId: fs.id.String(),
|
VolumeId: fs.id.String(),
|
||||||
CapacityBytes: fs.capacity,
|
CapacityBytes: fs.capacity,
|
||||||
VolumeContext: req.Parameters}}, nil
|
VolumeContext: req.Parameters}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *zFilesystem) cloneSnapshot(ctx context.Context, token *zfssarest.Token,
|
func (fs *zFilesystem) cloneSnapshot(ctx context.Context, token *zfssarest.Token,
|
||||||
@ -125,8 +135,8 @@ func (fs *zFilesystem) cloneSnapshot(ctx context.Context, token *zfssarest.Token
|
|||||||
}
|
}
|
||||||
|
|
||||||
fs.setInfo(fsinfo)
|
fs.setInfo(fsinfo)
|
||||||
// pass mountpoint as a volume context value to use for nfs mount to the pod
|
// pass mountpoint as a volume context value to use for nfs mount to the pod
|
||||||
req.Parameters["mountpoint"] = fs.mountpoint
|
req.Parameters["mountpoint"] = fs.mountpoint
|
||||||
|
|
||||||
return &csi.CreateVolumeResponse{
|
return &csi.CreateVolumeResponse{
|
||||||
Volume: &csi.Volume{
|
Volume: &csi.Volume{
|
||||||
@ -168,19 +178,16 @@ func (lun *zFilesystem) clone(ctx context.Context, token *zfssarest.Token,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Publishes a file system. In this case there's nothing to do.
|
// Publishes a file system. In this case there's nothing to do.
|
||||||
//
|
|
||||||
func (fs *zFilesystem) controllerPublishVolume(ctx context.Context, token *zfssarest.Token,
|
func (fs *zFilesystem) controllerPublishVolume(ctx context.Context, token *zfssarest.Token,
|
||||||
req *csi.ControllerPublishVolumeRequest, nodeName string) (*csi.ControllerPublishVolumeResponse, error) {
|
req *csi.ControllerPublishVolumeRequest, nodeName string) (*csi.ControllerPublishVolumeResponse, error) {
|
||||||
|
|
||||||
// Note: the volume context of the volume provisioned from an existing share does not have the mountpoint.
|
// Note: the volume context of the volume provisioned from an existing share does not have the mountpoint.
|
||||||
// Use the share (corresponding to volumeAttributes.share of PV configuration) to define the mountpoint.
|
// Use the share (corresponding to volumeAttributes.share of PV configuration) to define the mountpoint.
|
||||||
|
|
||||||
return &csi.ControllerPublishVolumeResponse{}, nil
|
return &csi.ControllerPublishVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Unpublishes a file system. In this case there's nothing to do.
|
// Unpublishes a file system. In this case there's nothing to do.
|
||||||
//
|
|
||||||
func (fs *zFilesystem) controllerUnpublishVolume(ctx context.Context, token *zfssarest.Token,
|
func (fs *zFilesystem) controllerUnpublishVolume(ctx context.Context, token *zfssarest.Token,
|
||||||
req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
|
req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
|
||||||
utils.GetLogCTRL(ctx, 5).Println("fs.controllerUnpublishVolume")
|
utils.GetLogCTRL(ctx, 5).Println("fs.controllerUnpublishVolume")
|
||||||
@ -213,7 +220,7 @@ func (fs *zFilesystem) controllerExpandVolume(ctx context.Context, token *zfssar
|
|||||||
reqCapacity := req.GetCapacityRange().RequiredBytes
|
reqCapacity := req.GetCapacityRange().RequiredBytes
|
||||||
if fs.capacity >= reqCapacity {
|
if fs.capacity >= reqCapacity {
|
||||||
return &csi.ControllerExpandVolumeResponse{
|
return &csi.ControllerExpandVolumeResponse{
|
||||||
CapacityBytes: fs.capacity,
|
CapacityBytes: fs.capacity,
|
||||||
NodeExpansionRequired: false,
|
NodeExpansionRequired: false,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -228,8 +235,8 @@ func (fs *zFilesystem) controllerExpandVolume(ctx context.Context, token *zfssar
|
|||||||
fs.capacity = fsinfo.Quota
|
fs.capacity = fsinfo.Quota
|
||||||
|
|
||||||
return &csi.ControllerExpandVolumeResponse{
|
return &csi.ControllerExpandVolumeResponse{
|
||||||
CapacityBytes: fsinfo.Quota,
|
CapacityBytes: fsinfo.Quota,
|
||||||
NodeExpansionRequired: false,
|
NodeExpansionRequired: false,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -277,12 +284,12 @@ func (fs *zFilesystem) getSnapshotsList(ctx context.Context, token *zfssarest.To
|
|||||||
return zfssaSnapshotList2csiSnapshotList(ctx, token.Name, snapList), nil
|
return zfssaSnapshotList2csiSnapshotList(ctx, token.Name, snapList), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *zFilesystem) getState() volumeState { return fs.state }
|
func (fs *zFilesystem) getState() volumeState { return fs.state }
|
||||||
func (fs *zFilesystem) getName() string { return fs.id.Name }
|
func (fs *zFilesystem) getName() string { return fs.id.Name }
|
||||||
func (fs *zFilesystem) getHref() string { return fs.href }
|
func (fs *zFilesystem) getHref() string { return fs.href }
|
||||||
func (fs *zFilesystem) getVolumeID() *utils.VolumeId { return fs.id }
|
func (fs *zFilesystem) getVolumeID() *utils.VolumeId { return fs.id }
|
||||||
func (fs *zFilesystem) getCapacity() int64 { return fs.capacity }
|
func (fs *zFilesystem) getCapacity() int64 { return fs.capacity }
|
||||||
func (fs *zFilesystem) isBlock() bool { return false }
|
func (fs *zFilesystem) isBlock() bool { return false }
|
||||||
|
|
||||||
func (fs *zFilesystem) setInfo(volInfo interface{}) {
|
func (fs *zFilesystem) setInfo(volInfo interface{}) {
|
||||||
|
|
||||||
@ -316,7 +323,7 @@ func (fs *zFilesystem) lock(ctx context.Context) volumeState {
|
|||||||
return fs.state
|
return fs.state
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *zFilesystem) unlock(ctx context.Context) (int32, volumeState){
|
func (fs *zFilesystem) unlock(ctx context.Context) (int32, volumeState) {
|
||||||
fs.bolt.Unlock(ctx)
|
fs.bolt.Unlock(ctx)
|
||||||
utils.GetLogCTRL(ctx, 5).Printf("%s is unlocked", fs.id.String())
|
utils.GetLogCTRL(ctx, 5).Printf("%s is unlocked", fs.id.String())
|
||||||
return fs.refcount, fs.state
|
return fs.refcount, fs.state
|
||||||
|
Loading…
Reference in New Issue
Block a user