mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Remove unwanted newlines in glusterfs driver
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
parent
c037a48e31
commit
b867f601da
@ -82,7 +82,6 @@ const (
|
|||||||
// out from below formula.
|
// out from below formula.
|
||||||
// max length of name of an ep - length of pvc uuid
|
// max length of name of an ep - length of pvc uuid
|
||||||
// where max length of name of an ep is 63 and length of uuid is 37
|
// where max length of name of an ep is 63 and length of uuid is 37
|
||||||
|
|
||||||
maxCustomEpNamePrefixLen = 26
|
maxCustomEpNamePrefixLen = 26
|
||||||
|
|
||||||
// absoluteGidMin/Max are currently the same as the
|
// absoluteGidMin/Max are currently the same as the
|
||||||
@ -149,13 +148,11 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
kubeClient := plugin.host.GetKubeClient()
|
kubeClient := plugin.host.GetKubeClient()
|
||||||
if kubeClient == nil {
|
if kubeClient == nil {
|
||||||
return nil, fmt.Errorf("failed to get kube client to initialize mounter")
|
return nil, fmt.Errorf("failed to get kube client to initialize mounter")
|
||||||
}
|
}
|
||||||
ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(epName, metav1.GetOptions{})
|
ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(epName, metav1.GetOptions{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("failed to get endpoint %s: %v", epName, err)
|
klog.Errorf("failed to get endpoint %s: %v", epName, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -171,12 +168,10 @@ func (plugin *glusterfsPlugin) getEndpointNameAndNamespace(spec *volume.Spec, de
|
|||||||
return "", "", fmt.Errorf("no glusterFS endpoint specified")
|
return "", "", fmt.Errorf("no glusterFS endpoint specified")
|
||||||
}
|
}
|
||||||
return endpoints, defaultNamespace, nil
|
return endpoints, defaultNamespace, nil
|
||||||
|
|
||||||
} else if spec.PersistentVolume != nil &&
|
} else if spec.PersistentVolume != nil &&
|
||||||
spec.PersistentVolume.Spec.Glusterfs != nil {
|
spec.PersistentVolume.Spec.Glusterfs != nil {
|
||||||
endpoints := spec.PersistentVolume.Spec.Glusterfs.EndpointsName
|
endpoints := spec.PersistentVolume.Spec.Glusterfs.EndpointsName
|
||||||
endpointsNs := defaultNamespace
|
endpointsNs := defaultNamespace
|
||||||
|
|
||||||
overriddenNs := spec.PersistentVolume.Spec.Glusterfs.EndpointsNamespace
|
overriddenNs := spec.PersistentVolume.Spec.Glusterfs.EndpointsNamespace
|
||||||
if overriddenNs != nil {
|
if overriddenNs != nil {
|
||||||
if len(*overriddenNs) > 0 {
|
if len(*overriddenNs) > 0 {
|
||||||
@ -229,7 +224,6 @@ func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string)
|
|||||||
|
|
||||||
// To reconstruct volume spec we need endpoint where fetching endpoint from mount
|
// To reconstruct volume spec we need endpoint where fetching endpoint from mount
|
||||||
// string looks to be impossible, so returning error.
|
// string looks to be impossible, so returning error.
|
||||||
|
|
||||||
return nil, fmt.Errorf("impossible to reconstruct glusterfs volume spec from volume mountpath")
|
return nil, fmt.Errorf("impossible to reconstruct glusterfs volume spec from volume mountpath")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,14 +320,12 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
hasLogFile := false
|
hasLogFile := false
|
||||||
hasLogLevel := false
|
hasLogLevel := false
|
||||||
log := ""
|
log := ""
|
||||||
|
|
||||||
if b.readOnly {
|
if b.readOnly {
|
||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for log-file,log-level options existence in user supplied mount options, if provided, use those.
|
// Check for log-file,log-level options existence in user supplied mount options, if provided, use those.
|
||||||
for _, userOpt := range b.mountOptions {
|
for _, userOpt := range b.mountOptions {
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case dstrings.HasPrefix(userOpt, "log-file"):
|
case dstrings.HasPrefix(userOpt, "log-file"):
|
||||||
klog.V(4).Infof("log-file mount option has provided")
|
klog.V(4).Infof("log-file mount option has provided")
|
||||||
@ -343,7 +335,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
klog.V(4).Infof("log-level mount option has provided")
|
klog.V(4).Infof("log-level mount option has provided")
|
||||||
hasLogLevel = true
|
hasLogLevel = true
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If logfile has not been provided, create driver specific log file.
|
// If logfile has not been provided, create driver specific log file.
|
||||||
@ -361,11 +352,9 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
// Use derived log file in gluster fuse mount
|
// Use derived log file in gluster fuse mount
|
||||||
options = append(options, "log-file="+log)
|
options = append(options, "log-file="+log)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hasLogLevel {
|
if !hasLogLevel {
|
||||||
options = append(options, "log-level=ERROR")
|
options = append(options, "log-level=ERROR")
|
||||||
}
|
}
|
||||||
|
|
||||||
var addrlist []string
|
var addrlist []string
|
||||||
if b.hosts == nil {
|
if b.hosts == nil {
|
||||||
return fmt.Errorf("glusterfs endpoint is nil in mounter")
|
return fmt.Errorf("glusterfs endpoint is nil in mounter")
|
||||||
@ -380,7 +369,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//Add backup-volfile-servers and auto_unmount options.
|
//Add backup-volfile-servers and auto_unmount options.
|
||||||
@ -388,7 +376,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
options = append(options, "auto_unmount")
|
options = append(options, "auto_unmount")
|
||||||
|
|
||||||
mountOptions := volutil.JoinMountOptions(b.mountOptions, options)
|
mountOptions := volutil.JoinMountOptions(b.mountOptions, options)
|
||||||
|
|
||||||
// with `backup-volfile-servers` mount option in place, it is not required to
|
// with `backup-volfile-servers` mount option in place, it is not required to
|
||||||
// iterate over all the servers in the addrlist. A mount attempt with this option
|
// iterate over all the servers in the addrlist. A mount attempt with this option
|
||||||
// will fetch all the servers mentioned in the backup-volfile-servers list.
|
// will fetch all the servers mentioned in the backup-volfile-servers list.
|
||||||
@ -401,7 +388,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
klog.Infof("successfully mounted directory %s", dir)
|
klog.Infof("successfully mounted directory %s", dir)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if dstrings.Contains(errs.Error(), "Invalid option auto_unmount") ||
|
if dstrings.Contains(errs.Error(), "Invalid option auto_unmount") ||
|
||||||
dstrings.Contains(errs.Error(), "Invalid argument") {
|
dstrings.Contains(errs.Error(), "Invalid argument") {
|
||||||
// Give a try without `auto_unmount` mount option, because
|
// Give a try without `auto_unmount` mount option, because
|
||||||
@ -438,7 +424,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
func getVolumeInfo(spec *volume.Spec) (string, bool, error) {
|
func getVolumeInfo(spec *volume.Spec) (string, bool, error) {
|
||||||
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
||||||
return spec.Volume.Glusterfs.Path, spec.Volume.Glusterfs.ReadOnly, nil
|
return spec.Volume.Glusterfs.Path, spec.Volume.Glusterfs.ReadOnly, nil
|
||||||
|
|
||||||
} else if spec.PersistentVolume != nil &&
|
} else if spec.PersistentVolume != nil &&
|
||||||
spec.PersistentVolume.Spec.Glusterfs != nil {
|
spec.PersistentVolume.Spec.Glusterfs != nil {
|
||||||
return spec.PersistentVolume.Spec.Glusterfs.Path, spec.ReadOnly, nil
|
return spec.PersistentVolume.Spec.Glusterfs.Path, spec.ReadOnly, nil
|
||||||
@ -489,7 +474,6 @@ func convertGid(gidString string) (int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("failed to parse gid %v: %v", gidString, err)
|
return 0, fmt.Errorf("failed to parse gid %v: %v", gidString, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if gid64 < 0 {
|
if gid64 < 0 {
|
||||||
return 0, fmt.Errorf("negative GIDs %v are not allowed", gidString)
|
return 0, fmt.Errorf("negative GIDs %v are not allowed", gidString)
|
||||||
}
|
}
|
||||||
@ -501,7 +485,6 @@ func convertGid(gidString string) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func convertVolumeParam(volumeString string) (int, error) {
|
func convertVolumeParam(volumeString string) (int, error) {
|
||||||
|
|
||||||
count, err := strconv.Atoi(volumeString)
|
count, err := strconv.Atoi(volumeString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("failed to parse volumestring %q: %v", volumeString, err)
|
return 0, fmt.Errorf("failed to parse volumestring %q: %v", volumeString, err)
|
||||||
@ -555,27 +538,21 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get existing persistent volumes")
|
return fmt.Errorf("failed to get existing persistent volumes")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, pv := range pvList.Items {
|
for _, pv := range pvList.Items {
|
||||||
if v1helper.GetPersistentVolumeClass(&pv) != className {
|
if v1helper.GetPersistentVolumeClass(&pv) != className {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
pvName := pv.ObjectMeta.Name
|
pvName := pv.ObjectMeta.Name
|
||||||
|
|
||||||
gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey]
|
gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
klog.Warningf("no GID found in pv %v", pvName)
|
klog.Warningf("no GID found in pv %v", pvName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
gid, err := convertGid(gidStr)
|
gid, err := convertGid(gidStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("failed to parse gid %s: %v", gidStr, err)
|
klog.Errorf("failed to parse gid %s: %v", gidStr, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = gidTable.Allocate(gid)
|
_, err = gidTable.Allocate(gid)
|
||||||
if err == ErrConflict {
|
if err == ErrConflict {
|
||||||
klog.Warningf("GID %v found in pv %v was already allocated", gid, pvName)
|
klog.Warningf("GID %v found in pv %v was already allocated", gid, pvName)
|
||||||
@ -583,7 +560,6 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
|||||||
return fmt.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err)
|
return fmt.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -601,7 +577,6 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return gidTable, nil
|
return gidTable, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -626,56 +601,45 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
|
|||||||
// if in the meantime a table appeared, use it
|
// if in the meantime a table appeared, use it
|
||||||
plugin.gidTableLock.Lock()
|
plugin.gidTableLock.Lock()
|
||||||
defer plugin.gidTableLock.Unlock()
|
defer plugin.gidTableLock.Unlock()
|
||||||
|
|
||||||
gidTable, ok = plugin.gidTable[className]
|
gidTable, ok = plugin.gidTable[className]
|
||||||
if ok {
|
if ok {
|
||||||
err = gidTable.SetRange(min, max)
|
err = gidTable.SetRange(min, max)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return gidTable, nil
|
return gidTable, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
plugin.gidTable[className] = newGidTable
|
plugin.gidTable[className] = newGidTable
|
||||||
|
|
||||||
return newGidTable, nil
|
return newGidTable, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
|
func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
|
||||||
gidStr, ok := d.spec.Annotations[volutil.VolumeGidAnnotationKey]
|
gidStr, ok := d.spec.Annotations[volutil.VolumeGidAnnotationKey]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, false, nil
|
return 0, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
gid, err := convertGid(gidStr)
|
gid, err := convertGid(gidStr)
|
||||||
|
|
||||||
return gid, true, err
|
return gid, true, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *glusterfsVolumeDeleter) Delete() error {
|
func (d *glusterfsVolumeDeleter) Delete() error {
|
||||||
klog.V(2).Infof("delete volume %s", d.glusterfsMounter.path)
|
klog.V(2).Infof("delete volume %s", d.glusterfsMounter.path)
|
||||||
|
|
||||||
volumeName := d.glusterfsMounter.path
|
volumeName := d.glusterfsMounter.path
|
||||||
volumeID, err := getVolumeID(d.spec, volumeName)
|
volumeID, err := getVolumeID(d.spec, volumeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get volumeID: %v", err)
|
return fmt.Errorf("failed to get volumeID: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec)
|
class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg, err := parseClassParameters(class.Parameters, d.plugin.host.GetKubeClient())
|
cfg, err := parseClassParameters(class.Parameters, d.plugin.host.GetKubeClient())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.provisionerConfig = *cfg
|
d.provisionerConfig = *cfg
|
||||||
|
|
||||||
klog.V(4).Infof("deleting volume %q", volumeID)
|
klog.V(4).Infof("deleting volume %q", volumeID)
|
||||||
|
|
||||||
gid, exists, err := d.getGid()
|
gid, exists, err := d.getGid()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Error(err)
|
klog.Error(err)
|
||||||
@ -684,13 +648,11 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get gidTable: %v", err)
|
return fmt.Errorf("failed to get gidTable: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = gidTable.Release(gid)
|
err = gidTable.Release(gid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to release gid %v: %v", gid, err)
|
return fmt.Errorf("failed to release gid %v: %v", gid, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cli := gcli.NewClient(d.url, d.user, d.secretValue)
|
cli := gcli.NewClient(d.url, d.user, d.secretValue)
|
||||||
if cli == nil {
|
if cli == nil {
|
||||||
klog.Errorf("failed to create glusterfs REST client")
|
klog.Errorf("failed to create glusterfs REST client")
|
||||||
@ -732,16 +694,13 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
|
|||||||
if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.options.PVC.Spec.Selector != nil {
|
if p.options.PVC.Spec.Selector != nil {
|
||||||
klog.V(4).Infof("not able to parse your claim Selector")
|
klog.V(4).Infof("not able to parse your claim Selector")
|
||||||
return nil, fmt.Errorf("not able to parse your claim Selector")
|
return nil, fmt.Errorf("not able to parse your claim Selector")
|
||||||
}
|
}
|
||||||
|
|
||||||
if volutil.CheckPersistentVolumeClaimModeBlock(p.options.PVC) {
|
if volutil.CheckPersistentVolumeClaimModeBlock(p.options.PVC) {
|
||||||
return nil, fmt.Errorf("%s does not support block volume provisioning", p.plugin.GetPluginName())
|
return nil, fmt.Errorf("%s does not support block volume provisioning", p.plugin.GetPluginName())
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof("provision volume with options %v", p.options)
|
klog.V(4).Infof("provision volume with options %v", p.options)
|
||||||
scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC)
|
scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC)
|
||||||
cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient())
|
cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient())
|
||||||
@ -754,21 +713,17 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get gidTable: %v", err)
|
return nil, fmt.Errorf("failed to get gidTable: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
gid, _, err := gidTable.AllocateNext()
|
gid, _, err := gidTable.AllocateNext()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("failed to reserve GID from table: %v", err)
|
klog.Errorf("failed to reserve GID from table: %v", err)
|
||||||
return nil, fmt.Errorf("failed to reserve GID from table: %v", err)
|
return nil, fmt.Errorf("failed to reserve GID from table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(2).Infof("allocated GID %d for PVC %s", gid, p.options.PVC.Name)
|
klog.V(2).Infof("allocated GID %d for PVC %s", gid, p.options.PVC.Name)
|
||||||
|
|
||||||
glusterfs, sizeGiB, volID, err := p.CreateVolume(gid)
|
glusterfs, sizeGiB, volID, err := p.CreateVolume(gid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if releaseErr := gidTable.Release(gid); releaseErr != nil {
|
if releaseErr := gidTable.Release(gid); releaseErr != nil {
|
||||||
klog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr)
|
klog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("failed to create volume: %v", err)
|
return nil, fmt.Errorf("failed to create volume: %v", err)
|
||||||
}
|
}
|
||||||
mode := v1.PersistentVolumeFilesystem
|
mode := v1.PersistentVolumeFilesystem
|
||||||
@ -780,11 +735,8 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
|
|||||||
if len(pv.Spec.AccessModes) == 0 {
|
if len(pv.Spec.AccessModes) == 0 {
|
||||||
pv.Spec.AccessModes = p.plugin.GetAccessModes()
|
pv.Spec.AccessModes = p.plugin.GetAccessModes()
|
||||||
}
|
}
|
||||||
|
|
||||||
pv.Spec.MountOptions = p.options.MountOptions
|
pv.Spec.MountOptions = p.options.MountOptions
|
||||||
|
|
||||||
gidStr := strconv.FormatInt(int64(gid), 10)
|
gidStr := strconv.FormatInt(int64(gid), 10)
|
||||||
|
|
||||||
pv.Annotations = map[string]string{
|
pv.Annotations = map[string]string{
|
||||||
volutil.VolumeGidAnnotationKey: gidStr,
|
volutil.VolumeGidAnnotationKey: gidStr,
|
||||||
volutil.VolumeDynamicallyCreatedByKey: heketiAnn,
|
volutil.VolumeDynamicallyCreatedByKey: heketiAnn,
|
||||||
@ -792,7 +744,6 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
|
|||||||
"Description": glusterDescAnn,
|
"Description": glusterDescAnn,
|
||||||
heketiVolIDAnn: volID,
|
heketiVolIDAnn: volID,
|
||||||
}
|
}
|
||||||
|
|
||||||
pv.Spec.Capacity = v1.ResourceList{
|
pv.Spec.Capacity = v1.ResourceList{
|
||||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)),
|
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)),
|
||||||
}
|
}
|
||||||
@ -803,12 +754,10 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
|
|||||||
var clusterIDs []string
|
var clusterIDs []string
|
||||||
customVolumeName := ""
|
customVolumeName := ""
|
||||||
epServiceName := ""
|
epServiceName := ""
|
||||||
|
|
||||||
kubeClient := p.plugin.host.GetKubeClient()
|
kubeClient := p.plugin.host.GetKubeClient()
|
||||||
if kubeClient == nil {
|
if kubeClient == nil {
|
||||||
return nil, 0, "", fmt.Errorf("failed to get kube client to update endpoint")
|
return nil, 0, "", fmt.Errorf("failed to get kube client to update endpoint")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(p.provisionerConfig.customEpNamePrefix) == 0 {
|
if len(p.provisionerConfig.customEpNamePrefix) == 0 {
|
||||||
epServiceName = string(p.options.PVC.UID)
|
epServiceName = string(p.options.PVC.UID)
|
||||||
} else {
|
} else {
|
||||||
@ -821,7 +770,6 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
|
|||||||
return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
|
return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
|
||||||
}
|
}
|
||||||
klog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service)
|
klog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service)
|
||||||
|
|
||||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
|
|
||||||
// GlusterFS/heketi creates volumes in units of GiB.
|
// GlusterFS/heketi creates volumes in units of GiB.
|
||||||
@ -830,7 +778,6 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
|
|||||||
return nil, 0, "", err
|
return nil, 0, "", err
|
||||||
}
|
}
|
||||||
klog.V(2).Infof("create volume of size %dGiB", sz)
|
klog.V(2).Infof("create volume of size %dGiB", sz)
|
||||||
|
|
||||||
if p.url == "" {
|
if p.url == "" {
|
||||||
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
||||||
}
|
}
|
||||||
@ -846,7 +793,6 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
|
|||||||
if p.provisionerConfig.volumeNamePrefix != "" {
|
if p.provisionerConfig.volumeNamePrefix != "" {
|
||||||
customVolumeName = fmt.Sprintf("%s_%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Namespace, p.options.PVC.Name, uuid.NewUUID())
|
customVolumeName = fmt.Sprintf("%s_%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Namespace, p.options.PVC.Name, uuid.NewUUID())
|
||||||
}
|
}
|
||||||
|
|
||||||
gid64 := int64(gid)
|
gid64 := int64(gid)
|
||||||
snaps := struct {
|
snaps := struct {
|
||||||
Enable bool `json:"enable"`
|
Enable bool `json:"enable"`
|
||||||
@ -855,7 +801,6 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
|
|||||||
true,
|
true,
|
||||||
p.provisionerConfig.thinPoolSnapFactor,
|
p.provisionerConfig.thinPoolSnapFactor,
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions, Snapshot: snaps}
|
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions, Snapshot: snaps}
|
||||||
volume, err := cli.VolumeCreate(volumeReq)
|
volume, err := cli.VolumeCreate(volumeReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -867,29 +812,23 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
|
return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
addrlist := make([]v1.EndpointAddress, len(dynamicHostIps))
|
addrlist := make([]v1.EndpointAddress, len(dynamicHostIps))
|
||||||
for i, v := range dynamicHostIps {
|
for i, v := range dynamicHostIps {
|
||||||
addrlist[i].IP = v
|
addrlist[i].IP = v
|
||||||
}
|
}
|
||||||
subset := make([]v1.EndpointSubset, 1)
|
subset := make([]v1.EndpointSubset, 1)
|
||||||
ports := []v1.EndpointPort{{Port: 1, Protocol: "TCP"}}
|
ports := []v1.EndpointPort{{Port: 1, Protocol: "TCP"}}
|
||||||
|
|
||||||
endpoint.Subsets = subset
|
endpoint.Subsets = subset
|
||||||
endpoint.Subsets[0].Addresses = addrlist
|
endpoint.Subsets[0].Addresses = addrlist
|
||||||
endpoint.Subsets[0].Ports = ports
|
endpoint.Subsets[0].Ports = ports
|
||||||
|
|
||||||
_, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(endpoint)
|
_, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
deleteErr := cli.VolumeDelete(volume.Id)
|
deleteErr := cli.VolumeDelete(volume.Id)
|
||||||
if deleteErr != nil {
|
if deleteErr != nil {
|
||||||
klog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr)
|
klog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(3).Infof("failed to update endpoint, deleting %s", endpoint)
|
klog.V(3).Infof("failed to update endpoint, deleting %s", endpoint)
|
||||||
|
|
||||||
err = kubeClient.CoreV1().Services(epNamespace).Delete(epServiceName, nil)
|
err = kubeClient.CoreV1().Services(epNamespace).Delete(epServiceName, nil)
|
||||||
|
|
||||||
if err != nil && errors.IsNotFound(err) {
|
if err != nil && errors.IsNotFound(err) {
|
||||||
klog.V(1).Infof("service %s does not exist in namespace %s", epServiceName, epNamespace)
|
klog.V(1).Infof("service %s does not exist in namespace %s", epServiceName, epNamespace)
|
||||||
err = nil
|
err = nil
|
||||||
@ -899,11 +838,8 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
|
|||||||
}
|
}
|
||||||
klog.V(1).Infof("service/endpoint: %s/%s deleted successfully", epNamespace, epServiceName)
|
klog.V(1).Infof("service/endpoint: %s/%s deleted successfully", epNamespace, epServiceName)
|
||||||
return nil, 0, "", fmt.Errorf("failed to update endpoint %s: %v", endpoint, err)
|
return nil, 0, "", fmt.Errorf("failed to update endpoint %s: %v", endpoint, err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(3).Infof("endpoint %s updated successfully", endpoint)
|
klog.V(3).Infof("endpoint %s updated successfully", endpoint)
|
||||||
|
|
||||||
return &v1.GlusterfsPersistentVolumeSource{
|
return &v1.GlusterfsPersistentVolumeSource{
|
||||||
EndpointsName: endpoint.Name,
|
EndpointsName: endpoint.Name,
|
||||||
EndpointsNamespace: &epNamespace,
|
EndpointsNamespace: &epNamespace,
|
||||||
@ -923,7 +859,6 @@ func (p *glusterfsVolumeProvisioner) createOrGetEndpointService(namespace string
|
|||||||
} else {
|
} else {
|
||||||
pvcNameOrID = pvc.Name
|
pvcNameOrID = pvc.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint = &v1.Endpoints{
|
endpoint = &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
@ -1117,16 +1052,13 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
|||||||
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName)
|
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(cfg.url) == 0 {
|
if len(cfg.url) == 0 {
|
||||||
return nil, fmt.Errorf("StorageClass for provisioner %s must contain 'resturl' parameter", glusterfsPluginName)
|
return nil, fmt.Errorf("StorageClass for provisioner %s must contain 'resturl' parameter", glusterfsPluginName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(parseVolumeType) == 0 {
|
if len(parseVolumeType) == 0 {
|
||||||
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}
|
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}
|
||||||
} else {
|
} else {
|
||||||
parseVolumeTypeInfo := dstrings.Split(parseVolumeType, ":")
|
parseVolumeTypeInfo := dstrings.Split(parseVolumeType, ":")
|
||||||
|
|
||||||
switch parseVolumeTypeInfo[0] {
|
switch parseVolumeTypeInfo[0] {
|
||||||
case "replicate":
|
case "replicate":
|
||||||
if len(parseVolumeTypeInfo) >= 2 {
|
if len(parseVolumeTypeInfo) >= 2 {
|
||||||
@ -1167,7 +1099,6 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 {
|
if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 {
|
||||||
|
|
||||||
// secretName + Namespace has precedence over userKey
|
// secretName + Namespace has precedence over userKey
|
||||||
if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 {
|
if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 {
|
||||||
cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient)
|
cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient)
|
||||||
@ -1180,27 +1111,22 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
|||||||
} else {
|
} else {
|
||||||
cfg.secretValue = cfg.userKey
|
cfg.secretValue = cfg.userKey
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.gidMin > cfg.gidMax {
|
if cfg.gidMin > cfg.gidMax {
|
||||||
return nil, fmt.Errorf("storageClass for provisioner %q must have gidMax value >= gidMin", glusterfsPluginName)
|
return nil, fmt.Errorf("storageClass for provisioner %q must have gidMax value >= gidMin", glusterfsPluginName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(parseVolumeOptions) != 0 {
|
if len(parseVolumeOptions) != 0 {
|
||||||
volOptions := dstrings.Split(parseVolumeOptions, ",")
|
volOptions := dstrings.Split(parseVolumeOptions, ",")
|
||||||
if len(volOptions) == 0 {
|
if len(volOptions) == 0 {
|
||||||
return nil, fmt.Errorf("storageClass for provisioner %q must have valid (for e.g., 'client.ssl on') volume option", glusterfsPluginName)
|
return nil, fmt.Errorf("storageClass for provisioner %q must have valid (for e.g., 'client.ssl on') volume option", glusterfsPluginName)
|
||||||
}
|
}
|
||||||
cfg.volumeOptions = volOptions
|
cfg.volumeOptions = volOptions
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(parseVolumeNamePrefix) != 0 {
|
if len(parseVolumeNamePrefix) != 0 {
|
||||||
if dstrings.Contains(parseVolumeNamePrefix, "_") {
|
if dstrings.Contains(parseVolumeNamePrefix, "_") {
|
||||||
return nil, fmt.Errorf("storageclass parameter 'volumenameprefix' should not contain '_' in its value")
|
return nil, fmt.Errorf("storageclass parameter 'volumenameprefix' should not contain '_' in its value")
|
||||||
}
|
}
|
||||||
cfg.volumeNamePrefix = parseVolumeNamePrefix
|
cfg.volumeNamePrefix = parseVolumeNamePrefix
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(parseThinPoolSnapFactor) != 0 {
|
if len(parseThinPoolSnapFactor) != 0 {
|
||||||
thinPoolSnapFactor, err := strconv.ParseFloat(parseThinPoolSnapFactor, 32)
|
thinPoolSnapFactor, err := strconv.ParseFloat(parseThinPoolSnapFactor, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1239,11 +1165,9 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
|||||||
volumeName := pvSpec.Glusterfs.Path
|
volumeName := pvSpec.Glusterfs.Path
|
||||||
klog.V(2).Infof("received request to expand volume %s", volumeName)
|
klog.V(2).Infof("received request to expand volume %s", volumeName)
|
||||||
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
|
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oldSize, fmt.Errorf("failed to get volumeID for volume %s: %v", volumeName, err)
|
return oldSize, fmt.Errorf("failed to get volumeID for volume %s: %v", volumeName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//Get details of StorageClass.
|
//Get details of StorageClass.
|
||||||
class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
|
class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1253,7 +1177,6 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return oldSize, err
|
return oldSize, err
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof("expanding volume: %q", volumeID)
|
klog.V(4).Infof("expanding volume: %q", volumeID)
|
||||||
|
|
||||||
//Create REST server connection
|
//Create REST server connection
|
||||||
@ -1276,7 +1199,6 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
|||||||
klog.Errorf("error when fetching details of volume %s: %v", volumeName, err)
|
klog.Errorf("error when fetching details of volume %s: %v", volumeName, err)
|
||||||
return oldSize, err
|
return oldSize, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if int64(currentVolumeInfo.Size) >= requestGiB {
|
if int64(currentVolumeInfo.Size) >= requestGiB {
|
||||||
return newSize, nil
|
return newSize, nil
|
||||||
}
|
}
|
||||||
@ -1290,7 +1212,6 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
|||||||
klog.Errorf("failed to expand volume %s: %v", volumeName, err)
|
klog.Errorf("failed to expand volume %s: %v", volumeName, err)
|
||||||
return oldSize, err
|
return oldSize, err
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size)
|
klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size)
|
||||||
newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size))
|
newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size))
|
||||||
return newVolumeSize, nil
|
return newVolumeSize, nil
|
||||||
|
Loading…
Reference in New Issue
Block a user