mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #59928 from humblec/more-correction
Automatic merge from submit-queue (batch tested with PRs 59901, 59302, 59928). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Reformat and update error strings. Signed-off-by: Humble Chirammal <hchiramm@redhat.com> **What this PR does / why we need it**: **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # **Special notes for your reviewer**: **Release note**: ```release-note ```
This commit is contained in:
commit
d1cb55c8a7
@ -75,6 +75,7 @@ const (
|
|||||||
gciLinuxGlusterMountBinaryPath = "/sbin/mount.glusterfs"
|
gciLinuxGlusterMountBinaryPath = "/sbin/mount.glusterfs"
|
||||||
defaultGidMin = 2000
|
defaultGidMin = 2000
|
||||||
defaultGidMax = math.MaxInt32
|
defaultGidMax = math.MaxInt32
|
||||||
|
|
||||||
// absoluteGidMin/Max are currently the same as the
|
// absoluteGidMin/Max are currently the same as the
|
||||||
// default values, but they play a different role and
|
// default values, but they play a different role and
|
||||||
// could take a different value. Only thing we need is:
|
// could take a different value. Only thing we need is:
|
||||||
@ -149,7 +150,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
epName := source.EndpointsName
|
epName := source.EndpointsName
|
||||||
// PVC/POD is in same ns.
|
// PVC/POD is in same namespace.
|
||||||
podNs := pod.Namespace
|
podNs := pod.Namespace
|
||||||
kubeClient := plugin.host.GetKubeClient()
|
kubeClient := plugin.host.GetKubeClient()
|
||||||
if kubeClient == nil {
|
if kubeClient == nil {
|
||||||
@ -157,10 +158,10 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
|
|||||||
}
|
}
|
||||||
ep, err := kubeClient.CoreV1().Endpoints(podNs).Get(epName, metav1.GetOptions{})
|
ep, err := kubeClient.CoreV1().Endpoints(podNs).Get(epName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to get endpoints %s[%v]", epName, err)
|
glog.Errorf("failed to get endpoint %s: %v", epName, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
glog.V(1).Infof("glusterfs pv endpoint: [%v]", ep)
|
glog.V(4).Infof("glusterfs pv endpoint %v", ep)
|
||||||
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName()))
|
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,7 +252,7 @@ func (b *glusterfsMounter) SetUp(fsGroup *int64) error {
|
|||||||
|
|
||||||
func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||||
glog.V(4).Infof("mount setup: [%s %v %v]", dir, !notMnt, err)
|
glog.V(4).Infof("mount setup: %s %v %v", dir, !notMnt, err)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -300,7 +301,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
|
|
||||||
p := path.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName)
|
p := path.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName)
|
||||||
if err := os.MkdirAll(p, 0750); err != nil {
|
if err := os.MkdirAll(p, 0750); err != nil {
|
||||||
return fmt.Errorf("Error creating directory [%v]: error: %v", p, err)
|
return fmt.Errorf("failed to create directory %v: %v", p, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// adding log-level ERROR to remove noise
|
// adding log-level ERROR to remove noise
|
||||||
@ -312,7 +313,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
|
|
||||||
var addrlist []string
|
var addrlist []string
|
||||||
if b.hosts == nil {
|
if b.hosts == nil {
|
||||||
return fmt.Errorf("glusterfs: endpoint is nil")
|
return fmt.Errorf("glusterfs endpoint is nil in mounter")
|
||||||
}
|
}
|
||||||
addr := sets.String{}
|
addr := sets.String{}
|
||||||
if b.hosts.Subsets != nil {
|
if b.hosts.Subsets != nil {
|
||||||
@ -338,7 +339,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
ip := addrlist[0]
|
ip := addrlist[0]
|
||||||
errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions)
|
errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions)
|
||||||
if errs == nil {
|
if errs == nil {
|
||||||
glog.Infof("glusterfs: successfully mounted %s", dir)
|
glog.Infof("successfully mounted directory %s", dir)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -424,11 +425,11 @@ type glusterfsVolumeProvisioner struct {
|
|||||||
func convertGid(gidString string) (int, error) {
|
func convertGid(gidString string) (int, error) {
|
||||||
gid64, err := strconv.ParseInt(gidString, 10, 32)
|
gid64, err := strconv.ParseInt(gidString, 10, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("failed to parse gid [%v]", gidString)
|
return 0, fmt.Errorf("failed to parse gid %v: %v", gidString, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if gid64 < 0 {
|
if gid64 < 0 {
|
||||||
return 0, fmt.Errorf("negative GIDs are not allowed: [%v]", gidString)
|
return 0, fmt.Errorf("negative GIDs %v are not allowed", gidString)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseInt returns a int64, but since we parsed only
|
// ParseInt returns a int64, but since we parsed only
|
||||||
@ -441,7 +442,7 @@ func convertVolumeParam(volumeString string) (int, error) {
|
|||||||
|
|
||||||
count, err := strconv.Atoi(volumeString)
|
count, err := strconv.Atoi(volumeString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("failed to parse %q", volumeString)
|
return 0, fmt.Errorf("failed to parse volumestring %q: %v", volumeString, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if count < 0 {
|
if count < 0 {
|
||||||
@ -504,21 +505,21 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
|||||||
gidStr, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]
|
gidStr, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Warningf("no GID found in pv [%v]", pvName)
|
glog.Warningf("no GID found in pv %v", pvName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
gid, err := convertGid(gidStr)
|
gid, err := convertGid(gidStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to parse gid[%s]: %v", gidStr, err)
|
glog.Errorf("failed to parse gid %s: %v", gidStr, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = gidTable.Allocate(gid)
|
_, err = gidTable.Allocate(gid)
|
||||||
if err == ErrConflict {
|
if err == ErrConflict {
|
||||||
glog.Warningf("GID [%v] found in pv [%v] was already allocated", gid)
|
glog.Warningf("GID %v found in pv %v was already allocated", gid, pvName)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
glog.Errorf("failed to store gid [%v] found in pv [%v]: error:%v", gid, pvName, err)
|
glog.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -563,7 +564,6 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if in the meantime a table appeared, use it
|
// if in the meantime a table appeared, use it
|
||||||
|
|
||||||
plugin.gidTableLock.Lock()
|
plugin.gidTableLock.Lock()
|
||||||
defer plugin.gidTableLock.Unlock()
|
defer plugin.gidTableLock.Unlock()
|
||||||
|
|
||||||
@ -595,12 +595,12 @@ func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *glusterfsVolumeDeleter) Delete() error {
|
func (d *glusterfsVolumeDeleter) Delete() error {
|
||||||
glog.V(2).Infof("delete volume: %s ", d.glusterfsMounter.path)
|
glog.V(2).Infof("delete volume %s", d.glusterfsMounter.path)
|
||||||
|
|
||||||
volumeName := d.glusterfsMounter.path
|
volumeName := d.glusterfsMounter.path
|
||||||
volumeID, err := getVolumeID(d.spec, volumeName)
|
volumeID, err := getVolumeID(d.spec, volumeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get volumeID, err: %v", err)
|
return fmt.Errorf("failed to get volumeID: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec)
|
class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec)
|
||||||
@ -614,7 +614,7 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
|||||||
}
|
}
|
||||||
d.provisionerConfig = *cfg
|
d.provisionerConfig = *cfg
|
||||||
|
|
||||||
glog.V(4).Infof("deleting volume [%q]", volumeID)
|
glog.V(4).Infof("deleting volume %q", volumeID)
|
||||||
|
|
||||||
gid, exists, err := d.getGid()
|
gid, exists, err := d.getGid()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -622,26 +622,26 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
|||||||
} else if exists {
|
} else if exists {
|
||||||
gidTable, err := d.plugin.getGidTable(class.Name, cfg.gidMin, cfg.gidMax)
|
gidTable, err := d.plugin.getGidTable(class.Name, cfg.gidMin, cfg.gidMax)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get gidTable: error: %v", err)
|
return fmt.Errorf("failed to get gidTable: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = gidTable.Release(gid)
|
err = gidTable.Release(gid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to release gid [%v], error: %v", gid, err)
|
return fmt.Errorf("failed to release gid %v: %v", gid, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cli := gcli.NewClient(d.url, d.user, d.secretValue)
|
cli := gcli.NewClient(d.url, d.user, d.secretValue)
|
||||||
if cli == nil {
|
if cli == nil {
|
||||||
glog.Errorf("failed to create glusterfs rest client")
|
glog.Errorf("failed to create glusterfs REST client")
|
||||||
return fmt.Errorf("failed to create glusterfs rest client, REST server authentication failed")
|
return fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||||
}
|
}
|
||||||
err = cli.VolumeDelete(volumeID)
|
err = cli.VolumeDelete(volumeID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error when deleting the volume[%s]: %v", volumeName, err)
|
glog.Errorf("failed to delete volume %s: %v", volumeName, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.V(2).Infof("volume [%s] deleted successfully", volumeName)
|
glog.V(2).Infof("volume %s deleted successfully", volumeName)
|
||||||
|
|
||||||
//Deleter takes endpoint and namespace from pv spec.
|
//Deleter takes endpoint and namespace from pv spec.
|
||||||
pvSpec := d.spec.Spec
|
pvSpec := d.spec.Spec
|
||||||
@ -658,26 +658,26 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
|||||||
if pvSpec.Glusterfs.EndpointsName != "" {
|
if pvSpec.Glusterfs.EndpointsName != "" {
|
||||||
dynamicEndpoint = pvSpec.Glusterfs.EndpointsName
|
dynamicEndpoint = pvSpec.Glusterfs.EndpointsName
|
||||||
}
|
}
|
||||||
glog.V(3).Infof("dynamic namespace and endpoint : [%v/%v]", dynamicNamespace, dynamicEndpoint)
|
glog.V(3).Infof("dynamic namespace and endpoint %v/%v", dynamicNamespace, dynamicEndpoint)
|
||||||
err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint)
|
err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error when deleting endpoint/service :%v", err)
|
glog.Errorf("failed to delete endpoint/service %v/%v: %v", dynamicNamespace, dynamicEndpoint, err)
|
||||||
} else {
|
} else {
|
||||||
glog.V(1).Infof("endpoint: [%v/%v] is deleted successfully ", dynamicNamespace, dynamicEndpoint)
|
glog.V(1).Infof("endpoint %v/%v is deleted successfully ", dynamicNamespace, dynamicEndpoint)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes [%v]: only AccessModes [%v] are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.options.PVC.Spec.Selector != nil {
|
if p.options.PVC.Spec.Selector != nil {
|
||||||
glog.V(4).Infof("not able to parse your claim Selector")
|
glog.V(4).Infof("not able to parse your claim Selector")
|
||||||
return nil, fmt.Errorf("not able to parse your claim Selector")
|
return nil, fmt.Errorf("not able to parse your claim Selector")
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("Provision VolumeOptions [%v]", p.options)
|
glog.V(4).Infof("Provision VolumeOptions %v", p.options)
|
||||||
scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC)
|
scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC)
|
||||||
cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient())
|
cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -689,25 +689,25 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
|||||||
|
|
||||||
gidTable, err := p.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
|
gidTable, err := p.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get gidTable, error: %v", err)
|
return nil, fmt.Errorf("failed to get gidTable: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
gid, _, err := gidTable.AllocateNext()
|
gid, _, err := gidTable.AllocateNext()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to reserve GID from table, error: %v", err)
|
glog.Errorf("failed to reserve GID from table: %v", err)
|
||||||
return nil, fmt.Errorf("failed to reserve GID from table, error: %v", err)
|
return nil, fmt.Errorf("failed to reserve GID from table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(2).Infof("Allocated GID [%d] for PVC [%s]", gid, p.options.PVC.Name)
|
glog.V(2).Infof("Allocated GID %d for PVC %s", gid, p.options.PVC.Name)
|
||||||
|
|
||||||
glusterfs, sizeGiB, volID, err := p.CreateVolume(gid)
|
glusterfs, sizeGiB, volID, err := p.CreateVolume(gid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if releaseErr := gidTable.Release(gid); releaseErr != nil {
|
if releaseErr := gidTable.Release(gid); releaseErr != nil {
|
||||||
glog.Errorf("error when releasing GID in storageclass: [%s]", scName)
|
glog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Errorf("create volume error: %v.", err)
|
glog.Errorf("failed to create volume: %v", err)
|
||||||
return nil, fmt.Errorf("create volume error: %v", err)
|
return nil, fmt.Errorf("failed to create volume: %v", err)
|
||||||
}
|
}
|
||||||
mode := v1.PersistentVolumeFilesystem
|
mode := v1.PersistentVolumeFilesystem
|
||||||
pv := new(v1.PersistentVolume)
|
pv := new(v1.PersistentVolume)
|
||||||
@ -741,20 +741,22 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum
|
|||||||
var clusterIDs []string
|
var clusterIDs []string
|
||||||
customVolumeName := ""
|
customVolumeName := ""
|
||||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
|
|
||||||
|
// GlusterFS/heketi creates volumes in units of GiB.
|
||||||
sz := int(volume.RoundUpToGiB(capacity))
|
sz := int(volume.RoundUpToGiB(capacity))
|
||||||
glog.V(2).Infof("create volume of size: %d GiB", sz)
|
glog.V(2).Infof("create volume of size %dGiB", sz)
|
||||||
if p.url == "" {
|
if p.url == "" {
|
||||||
glog.Errorf("REST server endpoint is empty")
|
glog.Errorf("REST server endpoint is empty")
|
||||||
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
||||||
}
|
}
|
||||||
cli := gcli.NewClient(p.url, p.user, p.secretValue)
|
cli := gcli.NewClient(p.url, p.user, p.secretValue)
|
||||||
if cli == nil {
|
if cli == nil {
|
||||||
glog.Errorf("failed to create glusterfs rest client")
|
glog.Errorf("failed to create glusterfs REST client")
|
||||||
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||||
}
|
}
|
||||||
if p.provisionerConfig.clusterID != "" {
|
if p.provisionerConfig.clusterID != "" {
|
||||||
clusterIDs = dstrings.Split(p.clusterID, ",")
|
clusterIDs = dstrings.Split(p.clusterID, ",")
|
||||||
glog.V(4).Infof("provided clusterIDs: [%v]", clusterIDs)
|
glog.V(4).Infof("provided clusterIDs %v", clusterIDs)
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.provisionerConfig.volumeNamePrefix != "" {
|
if p.provisionerConfig.volumeNamePrefix != "" {
|
||||||
@ -765,33 +767,33 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum
|
|||||||
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions}
|
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions}
|
||||||
volume, err := cli.VolumeCreate(volumeReq)
|
volume, err := cli.VolumeCreate(volumeReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error creating volume, error: %v ", err)
|
glog.Errorf("failed to create volume: %v", err)
|
||||||
return nil, 0, "", fmt.Errorf("error creating volume %v", err)
|
return nil, 0, "", fmt.Errorf("failed to create volume: %v", err)
|
||||||
}
|
}
|
||||||
glog.V(1).Infof("volume with size: %d and name: %s created", volume.Size, volume.Name)
|
glog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name)
|
||||||
volID = volume.Id
|
volID = volume.Id
|
||||||
dynamicHostIps, err := getClusterNodes(cli, volume.Cluster)
|
dynamicHostIps, err := getClusterNodes(cli, volume.Cluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error [%v] when getting cluster nodes for volume [%s]", err, volume)
|
glog.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
|
||||||
return nil, 0, "", fmt.Errorf("error [%v] when getting cluster nodes for volume [%s]", err, volume)
|
return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The 'endpointname' is created in form of 'glusterfs-dynamic-<claimname>'.
|
// The 'endpointname' is created in form of 'glusterfs-dynamic-<claimname>'.
|
||||||
// createEndpointService() checks for this 'endpoint' existence in PVC's namespace and
|
// createEndpointService() checks for this 'endpoint' existence in PVC's namespace and
|
||||||
// If not found, it create an endpoint and svc using the IPs we dynamically picked at time
|
// If not found, it create an endpoint and service using the IPs we dynamically picked at time
|
||||||
// of volume creation.
|
// of volume creation.
|
||||||
epServiceName := dynamicEpSvcPrefix + p.options.PVC.Name
|
epServiceName := dynamicEpSvcPrefix + p.options.PVC.Name
|
||||||
epNamespace := p.options.PVC.Namespace
|
epNamespace := p.options.PVC.Namespace
|
||||||
endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name)
|
endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to create endpoint/service: %v", err)
|
glog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
|
||||||
deleteErr := cli.VolumeDelete(volume.Id)
|
deleteErr := cli.VolumeDelete(volume.Id)
|
||||||
if deleteErr != nil {
|
if deleteErr != nil {
|
||||||
glog.Errorf("error when deleting the volume :%v , manual deletion required", deleteErr)
|
glog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr)
|
||||||
}
|
}
|
||||||
return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v", err)
|
return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
|
||||||
}
|
}
|
||||||
glog.V(3).Infof("dynamic ep %v and svc : %v ", endpoint, service)
|
glog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service)
|
||||||
return &v1.GlusterfsVolumeSource{
|
return &v1.GlusterfsVolumeSource{
|
||||||
EndpointsName: endpoint.Name,
|
EndpointsName: endpoint.Name,
|
||||||
Path: volume.Name,
|
Path: volume.Name,
|
||||||
@ -824,12 +826,12 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS
|
|||||||
}
|
}
|
||||||
_, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint)
|
_, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint)
|
||||||
if err != nil && errors.IsAlreadyExists(err) {
|
if err != nil && errors.IsAlreadyExists(err) {
|
||||||
glog.V(1).Infof("endpoint [%s] already exist in namespace [%s]", endpoint, namespace)
|
glog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace)
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to create endpoint: %v", err)
|
glog.Errorf("failed to create endpoint: %v", err)
|
||||||
return nil, nil, fmt.Errorf("error creating endpoint: %v", err)
|
return nil, nil, fmt.Errorf("failed to create endpoint: %v", err)
|
||||||
}
|
}
|
||||||
service = &v1.Service{
|
service = &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -844,7 +846,7 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS
|
|||||||
{Protocol: "TCP", Port: 1}}}}
|
{Protocol: "TCP", Port: 1}}}}
|
||||||
_, err = kubeClient.CoreV1().Services(namespace).Create(service)
|
_, err = kubeClient.CoreV1().Services(namespace).Create(service)
|
||||||
if err != nil && errors.IsAlreadyExists(err) {
|
if err != nil && errors.IsAlreadyExists(err) {
|
||||||
glog.V(1).Infof("service [%s] already exist in namespace [%s]", service, namespace)
|
glog.V(1).Infof("service %s already exist in namespace %s", service, namespace)
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -861,10 +863,10 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi
|
|||||||
}
|
}
|
||||||
err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil)
|
err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err)
|
glog.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err)
|
||||||
return fmt.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err)
|
return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err)
|
||||||
}
|
}
|
||||||
glog.V(1).Infof("service/endpoint %s/%s deleted successfully", namespace, epServiceName)
|
glog.V(1).Infof("service/endpoint: %s/%s deleted successfully", namespace, epServiceName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -872,7 +874,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi
|
|||||||
func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) {
|
func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) {
|
||||||
secretMap, err := volutil.GetSecretForPV(namespace, secretName, glusterfsPluginName, kubeClient)
|
secretMap, err := volutil.GetSecretForPV(namespace, secretName, glusterfsPluginName, kubeClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err)
|
glog.Errorf("failed to get secret: %s/%s: %v", namespace, secretName, err)
|
||||||
return "", fmt.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err)
|
return "", fmt.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err)
|
||||||
}
|
}
|
||||||
if len(secretMap) == 0 {
|
if len(secretMap) == 0 {
|
||||||
@ -885,6 +887,7 @@ func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (
|
|||||||
}
|
}
|
||||||
secret = v
|
secret = v
|
||||||
}
|
}
|
||||||
|
|
||||||
// If not found, the last secret in the map wins as done before
|
// If not found, the last secret in the map wins as done before
|
||||||
return secret, nil
|
return secret, nil
|
||||||
}
|
}
|
||||||
@ -917,7 +920,7 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string,
|
|||||||
return dynamicHostIps, nil
|
return dynamicHostIps, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseClassParameters parses StorageClass.Parameters
|
// parseClassParameters parses StorageClass parameters.
|
||||||
func parseClassParameters(params map[string]string, kubeClient clientset.Interface) (*provisionerConfig, error) {
|
func parseClassParameters(params map[string]string, kubeClient clientset.Interface) (*provisionerConfig, error) {
|
||||||
var cfg provisionerConfig
|
var cfg provisionerConfig
|
||||||
var err error
|
var err error
|
||||||
@ -1001,7 +1004,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
|||||||
if len(parseVolumeTypeInfo) >= 2 {
|
if len(parseVolumeTypeInfo) >= 2 {
|
||||||
newReplicaCount, err := convertVolumeParam(parseVolumeTypeInfo[1])
|
newReplicaCount, err := convertVolumeParam(parseVolumeTypeInfo[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error [%v] when parsing value %q of option '%s' for volume plugin %s", err, parseVolumeTypeInfo[1], "volumetype", glusterfsPluginName)
|
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err)
|
||||||
}
|
}
|
||||||
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: newReplicaCount}}
|
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: newReplicaCount}}
|
||||||
} else {
|
} else {
|
||||||
@ -1011,11 +1014,11 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
|||||||
if len(parseVolumeTypeInfo) >= 3 {
|
if len(parseVolumeTypeInfo) >= 3 {
|
||||||
newDisperseData, err := convertVolumeParam(parseVolumeTypeInfo[1])
|
newDisperseData, err := convertVolumeParam(parseVolumeTypeInfo[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error [%v] when parsing value %q of option '%s' for volume plugin %s", parseVolumeTypeInfo[1], err, "volumetype", glusterfsPluginName)
|
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err)
|
||||||
}
|
}
|
||||||
newDisperseRedundancy, err := convertVolumeParam(parseVolumeTypeInfo[2])
|
newDisperseRedundancy, err := convertVolumeParam(parseVolumeTypeInfo[2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error [%v] when parsing value %q of option '%s' for volume plugin %s", err, parseVolumeTypeInfo[2], "volumetype", glusterfsPluginName)
|
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[2], err)
|
||||||
}
|
}
|
||||||
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityEC, Disperse: gapi.DisperseDurability{Data: newDisperseData, Redundancy: newDisperseRedundancy}}
|
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityEC, Disperse: gapi.DisperseDurability{Data: newDisperseData, Redundancy: newDisperseRedundancy}}
|
||||||
} else {
|
} else {
|
||||||
@ -1036,6 +1039,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 {
|
if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 {
|
||||||
|
|
||||||
// secretName + Namespace has precedence over userKey
|
// secretName + Namespace has precedence over userKey
|
||||||
if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 {
|
if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 {
|
||||||
cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient)
|
cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient)
|
||||||
@ -1056,7 +1060,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
|||||||
if len(parseVolumeOptions) != 0 {
|
if len(parseVolumeOptions) != 0 {
|
||||||
volOptions := dstrings.Split(parseVolumeOptions, ",")
|
volOptions := dstrings.Split(parseVolumeOptions, ",")
|
||||||
if len(volOptions) == 0 {
|
if len(volOptions) == 0 {
|
||||||
return nil, fmt.Errorf("StorageClass for provisioner %q must have valid ( for eg, 'client.ssl on') volume option", glusterfsPluginName)
|
return nil, fmt.Errorf("StorageClass for provisioner %q must have valid (for e.g., 'client.ssl on') volume option", glusterfsPluginName)
|
||||||
}
|
}
|
||||||
cfg.volumeOptions = volOptions
|
cfg.volumeOptions = volOptions
|
||||||
|
|
||||||
@ -1094,14 +1098,14 @@ func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) {
|
|||||||
func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
||||||
pvSpec := spec.PersistentVolume.Spec
|
pvSpec := spec.PersistentVolume.Spec
|
||||||
volumeName := pvSpec.Glusterfs.Path
|
volumeName := pvSpec.Glusterfs.Path
|
||||||
glog.V(2).Infof("Request to expand volume: [%s]", volumeName)
|
glog.V(2).Infof("Received request to expand volume %s", volumeName)
|
||||||
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
|
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oldSize, fmt.Errorf("failed to get volumeID for volume [%s], err: %v", volumeName, err)
|
return oldSize, fmt.Errorf("failed to get volumeID for volume %s: %v", volumeName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//Get details of SC.
|
//Get details of StorageClass.
|
||||||
class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
|
class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oldSize, err
|
return oldSize, err
|
||||||
@ -1111,7 +1115,7 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
|||||||
return oldSize, err
|
return oldSize, err
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("Expanding volume %q with configuration %+v", volumeID, cfg)
|
glog.V(4).Infof("expanding volume: %q with configuration: %+v", volumeID, cfg)
|
||||||
|
|
||||||
//Create REST server connection
|
//Create REST server connection
|
||||||
cli := gcli.NewClient(cfg.url, cfg.user, cfg.secretValue)
|
cli := gcli.NewClient(cfg.url, cfg.user, cfg.secretValue)
|
||||||
@ -1130,7 +1134,7 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
|||||||
//Check the existing volume size
|
//Check the existing volume size
|
||||||
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
|
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error when fetching details of volume[%s]: %v", volumeName, err)
|
glog.Errorf("error when fetching details of volume %s: %v", volumeName, err)
|
||||||
return oldSize, err
|
return oldSize, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1144,7 +1148,7 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
|||||||
// Expand the volume
|
// Expand the volume
|
||||||
volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq)
|
volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error when expanding the volume[%s]: %v", volumeName, err)
|
glog.Errorf("failed to expand volume %s: %v", volumeName, err)
|
||||||
return oldSize, err
|
return oldSize, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user