Remove unwanted newlines in glusterfs driver

Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
Humble Chirammal 2019-06-06 22:03:15 +05:30
parent c037a48e31
commit b867f601da

View File

@ -82,7 +82,6 @@ const (
// out from below formula.
// max length of name of an ep - length of pvc uuid
// where max length of name of an ep is 63 and length of uuid is 37
maxCustomEpNamePrefixLen = 26
// absoluteGidMin/Max are currently the same as the
@ -149,13 +148,11 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
if err != nil {
return nil, err
}
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("failed to get kube client to initialize mounter")
}
ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(epName, metav1.GetOptions{})
if err != nil {
klog.Errorf("failed to get endpoint %s: %v", epName, err)
return nil, err
@ -171,12 +168,10 @@ func (plugin *glusterfsPlugin) getEndpointNameAndNamespace(spec *volume.Spec, de
return "", "", fmt.Errorf("no glusterFS endpoint specified")
}
return endpoints, defaultNamespace, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Glusterfs != nil {
endpoints := spec.PersistentVolume.Spec.Glusterfs.EndpointsName
endpointsNs := defaultNamespace
overriddenNs := spec.PersistentVolume.Spec.Glusterfs.EndpointsNamespace
if overriddenNs != nil {
if len(*overriddenNs) > 0 {
@ -229,7 +224,6 @@ func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string)
// To reconstruct volume spec we need endpoint where fetching endpoint from mount
// string looks to be impossible, so returning error.
return nil, fmt.Errorf("impossible to reconstruct glusterfs volume spec from volume mountpath")
}
@ -326,14 +320,12 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
hasLogFile := false
hasLogLevel := false
log := ""
if b.readOnly {
options = append(options, "ro")
}
// Check for log-file,log-level options existence in user supplied mount options, if provided, use those.
for _, userOpt := range b.mountOptions {
switch {
case dstrings.HasPrefix(userOpt, "log-file"):
klog.V(4).Infof("log-file mount option has provided")
@ -343,7 +335,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
klog.V(4).Infof("log-level mount option has provided")
hasLogLevel = true
}
}
// If logfile has not been provided, create driver specific log file.
@ -361,11 +352,9 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
// Use derived log file in gluster fuse mount
options = append(options, "log-file="+log)
}
if !hasLogLevel {
options = append(options, "log-level=ERROR")
}
var addrlist []string
if b.hosts == nil {
return fmt.Errorf("glusterfs endpoint is nil in mounter")
@ -380,7 +369,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
}
}
}
}
//Add backup-volfile-servers and auto_unmount options.
@ -388,7 +376,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
options = append(options, "auto_unmount")
mountOptions := volutil.JoinMountOptions(b.mountOptions, options)
// with `backup-volfile-servers` mount option in place, it is not required to
// iterate over all the servers in the addrlist. A mount attempt with this option
// will fetch all the servers mentioned in the backup-volfile-servers list.
@ -401,7 +388,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
klog.Infof("successfully mounted directory %s", dir)
return nil
}
if dstrings.Contains(errs.Error(), "Invalid option auto_unmount") ||
dstrings.Contains(errs.Error(), "Invalid argument") {
// Give a try without `auto_unmount` mount option, because
@ -438,7 +424,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
func getVolumeInfo(spec *volume.Spec) (string, bool, error) {
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
return spec.Volume.Glusterfs.Path, spec.Volume.Glusterfs.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Glusterfs != nil {
return spec.PersistentVolume.Spec.Glusterfs.Path, spec.ReadOnly, nil
@ -489,7 +474,6 @@ func convertGid(gidString string) (int, error) {
if err != nil {
return 0, fmt.Errorf("failed to parse gid %v: %v", gidString, err)
}
if gid64 < 0 {
return 0, fmt.Errorf("negative GIDs %v are not allowed", gidString)
}
@ -501,7 +485,6 @@ func convertGid(gidString string) (int, error) {
}
func convertVolumeParam(volumeString string) (int, error) {
count, err := strconv.Atoi(volumeString)
if err != nil {
return 0, fmt.Errorf("failed to parse volumestring %q: %v", volumeString, err)
@ -555,27 +538,21 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
if err != nil {
return fmt.Errorf("failed to get existing persistent volumes")
}
for _, pv := range pvList.Items {
if v1helper.GetPersistentVolumeClass(&pv) != className {
continue
}
pvName := pv.ObjectMeta.Name
gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey]
if !ok {
klog.Warningf("no GID found in pv %v", pvName)
continue
}
gid, err := convertGid(gidStr)
if err != nil {
klog.Errorf("failed to parse gid %s: %v", gidStr, err)
continue
}
_, err = gidTable.Allocate(gid)
if err == ErrConflict {
klog.Warningf("GID %v found in pv %v was already allocated", gid, pvName)
@ -583,7 +560,6 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
return fmt.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err)
}
}
return nil
}
@ -601,7 +577,6 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
if err != nil {
return nil, err
}
return gidTable, nil
}
@ -626,56 +601,45 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
// if in the meantime a table appeared, use it
plugin.gidTableLock.Lock()
defer plugin.gidTableLock.Unlock()
gidTable, ok = plugin.gidTable[className]
if ok {
err = gidTable.SetRange(min, max)
if err != nil {
return nil, err
}
return gidTable, nil
}
plugin.gidTable[className] = newGidTable
return newGidTable, nil
}
func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
gidStr, ok := d.spec.Annotations[volutil.VolumeGidAnnotationKey]
if !ok {
return 0, false, nil
}
gid, err := convertGid(gidStr)
return gid, true, err
}
func (d *glusterfsVolumeDeleter) Delete() error {
klog.V(2).Infof("delete volume %s", d.glusterfsMounter.path)
volumeName := d.glusterfsMounter.path
volumeID, err := getVolumeID(d.spec, volumeName)
if err != nil {
return fmt.Errorf("failed to get volumeID: %v", err)
}
class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec)
if err != nil {
return err
}
cfg, err := parseClassParameters(class.Parameters, d.plugin.host.GetKubeClient())
if err != nil {
return err
}
d.provisionerConfig = *cfg
klog.V(4).Infof("deleting volume %q", volumeID)
gid, exists, err := d.getGid()
if err != nil {
klog.Error(err)
@ -684,13 +648,11 @@ func (d *glusterfsVolumeDeleter) Delete() error {
if err != nil {
return fmt.Errorf("failed to get gidTable: %v", err)
}
err = gidTable.Release(gid)
if err != nil {
return fmt.Errorf("failed to release gid %v: %v", gid, err)
}
}
cli := gcli.NewClient(d.url, d.user, d.secretValue)
if cli == nil {
klog.Errorf("failed to create glusterfs REST client")
@ -732,16 +694,13 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
}
if p.options.PVC.Spec.Selector != nil {
klog.V(4).Infof("not able to parse your claim Selector")
return nil, fmt.Errorf("not able to parse your claim Selector")
}
if volutil.CheckPersistentVolumeClaimModeBlock(p.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", p.plugin.GetPluginName())
}
klog.V(4).Infof("provision volume with options %v", p.options)
scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC)
cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient())
@ -754,21 +713,17 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
if err != nil {
return nil, fmt.Errorf("failed to get gidTable: %v", err)
}
gid, _, err := gidTable.AllocateNext()
if err != nil {
klog.Errorf("failed to reserve GID from table: %v", err)
return nil, fmt.Errorf("failed to reserve GID from table: %v", err)
}
klog.V(2).Infof("allocated GID %d for PVC %s", gid, p.options.PVC.Name)
glusterfs, sizeGiB, volID, err := p.CreateVolume(gid)
if err != nil {
if releaseErr := gidTable.Release(gid); releaseErr != nil {
klog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr)
}
return nil, fmt.Errorf("failed to create volume: %v", err)
}
mode := v1.PersistentVolumeFilesystem
@ -780,11 +735,8 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = p.plugin.GetAccessModes()
}
pv.Spec.MountOptions = p.options.MountOptions
gidStr := strconv.FormatInt(int64(gid), 10)
pv.Annotations = map[string]string{
volutil.VolumeGidAnnotationKey: gidStr,
volutil.VolumeDynamicallyCreatedByKey: heketiAnn,
@ -792,7 +744,6 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop
"Description": glusterDescAnn,
heketiVolIDAnn: volID,
}
pv.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)),
}
@ -803,12 +754,10 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
var clusterIDs []string
customVolumeName := ""
epServiceName := ""
kubeClient := p.plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, 0, "", fmt.Errorf("failed to get kube client to update endpoint")
}
if len(p.provisionerConfig.customEpNamePrefix) == 0 {
epServiceName = string(p.options.PVC.UID)
} else {
@ -821,7 +770,6 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
}
klog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service)
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// GlusterFS/heketi creates volumes in units of GiB.
@ -830,7 +778,6 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
return nil, 0, "", err
}
klog.V(2).Infof("create volume of size %dGiB", sz)
if p.url == "" {
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
}
@ -846,7 +793,6 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
if p.provisionerConfig.volumeNamePrefix != "" {
customVolumeName = fmt.Sprintf("%s_%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Namespace, p.options.PVC.Name, uuid.NewUUID())
}
gid64 := int64(gid)
snaps := struct {
Enable bool `json:"enable"`
@ -855,7 +801,6 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
true,
p.provisionerConfig.thinPoolSnapFactor,
}
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions, Snapshot: snaps}
volume, err := cli.VolumeCreate(volumeReq)
if err != nil {
@ -867,29 +812,23 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
if err != nil {
return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
}
addrlist := make([]v1.EndpointAddress, len(dynamicHostIps))
for i, v := range dynamicHostIps {
addrlist[i].IP = v
}
subset := make([]v1.EndpointSubset, 1)
ports := []v1.EndpointPort{{Port: 1, Protocol: "TCP"}}
endpoint.Subsets = subset
endpoint.Subsets[0].Addresses = addrlist
endpoint.Subsets[0].Ports = ports
_, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(endpoint)
if err != nil {
deleteErr := cli.VolumeDelete(volume.Id)
if deleteErr != nil {
klog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr)
}
klog.V(3).Infof("failed to update endpoint, deleting %s", endpoint)
err = kubeClient.CoreV1().Services(epNamespace).Delete(epServiceName, nil)
if err != nil && errors.IsNotFound(err) {
klog.V(1).Infof("service %s does not exist in namespace %s", epServiceName, epNamespace)
err = nil
@ -899,11 +838,8 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi
}
klog.V(1).Infof("service/endpoint: %s/%s deleted successfully", epNamespace, epServiceName)
return nil, 0, "", fmt.Errorf("failed to update endpoint %s: %v", endpoint, err)
}
klog.V(3).Infof("endpoint %s updated successfully", endpoint)
return &v1.GlusterfsPersistentVolumeSource{
EndpointsName: endpoint.Name,
EndpointsNamespace: &epNamespace,
@ -923,7 +859,6 @@ func (p *glusterfsVolumeProvisioner) createOrGetEndpointService(namespace string
} else {
pvcNameOrID = pvc.Name
}
endpoint = &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@ -1117,16 +1052,13 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName)
}
}
if len(cfg.url) == 0 {
return nil, fmt.Errorf("StorageClass for provisioner %s must contain 'resturl' parameter", glusterfsPluginName)
}
if len(parseVolumeType) == 0 {
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}
} else {
parseVolumeTypeInfo := dstrings.Split(parseVolumeType, ":")
switch parseVolumeTypeInfo[0] {
case "replicate":
if len(parseVolumeTypeInfo) >= 2 {
@ -1167,7 +1099,6 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
}
if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 {
// secretName + Namespace has precedence over userKey
if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 {
cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient)
@ -1180,27 +1111,22 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
} else {
cfg.secretValue = cfg.userKey
}
if cfg.gidMin > cfg.gidMax {
return nil, fmt.Errorf("storageClass for provisioner %q must have gidMax value >= gidMin", glusterfsPluginName)
}
if len(parseVolumeOptions) != 0 {
volOptions := dstrings.Split(parseVolumeOptions, ",")
if len(volOptions) == 0 {
return nil, fmt.Errorf("storageClass for provisioner %q must have valid (for e.g., 'client.ssl on') volume option", glusterfsPluginName)
}
cfg.volumeOptions = volOptions
}
if len(parseVolumeNamePrefix) != 0 {
if dstrings.Contains(parseVolumeNamePrefix, "_") {
return nil, fmt.Errorf("storageclass parameter 'volumenameprefix' should not contain '_' in its value")
}
cfg.volumeNamePrefix = parseVolumeNamePrefix
}
if len(parseThinPoolSnapFactor) != 0 {
thinPoolSnapFactor, err := strconv.ParseFloat(parseThinPoolSnapFactor, 32)
if err != nil {
@ -1239,11 +1165,9 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
volumeName := pvSpec.Glusterfs.Path
klog.V(2).Infof("received request to expand volume %s", volumeName)
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
if err != nil {
return oldSize, fmt.Errorf("failed to get volumeID for volume %s: %v", volumeName, err)
}
//Get details of StorageClass.
class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
if err != nil {
@ -1253,7 +1177,6 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
if err != nil {
return oldSize, err
}
klog.V(4).Infof("expanding volume: %q", volumeID)
//Create REST server connection
@ -1276,7 +1199,6 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
klog.Errorf("error when fetching details of volume %s: %v", volumeName, err)
return oldSize, err
}
if int64(currentVolumeInfo.Size) >= requestGiB {
return newSize, nil
}
@ -1290,7 +1212,6 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
klog.Errorf("failed to expand volume %s: %v", volumeName, err)
return oldSize, err
}
klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size)
newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size))
return newVolumeSize, nil