mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 13:02:14 +00:00
Merge pull request #45895 from humblec/glusterfs-defacto
Automatic merge from submit-queue Make interface references consistent across the gluster plugin code.
This commit is contained in:
commit
2cf0169092
@ -265,6 +265,7 @@ pkg/util/yaml
|
|||||||
pkg/version/prometheus
|
pkg/version/prometheus
|
||||||
pkg/volume
|
pkg/volume
|
||||||
pkg/volume/downwardapi
|
pkg/volume/downwardapi
|
||||||
|
pkg/volume/glusterfs
|
||||||
pkg/volume/projected
|
pkg/volume/projected
|
||||||
pkg/volume/quobyte
|
pkg/volume/quobyte
|
||||||
pkg/volume/util/nestedpendingoperations
|
pkg/volume/util/nestedpendingoperations
|
||||||
|
@ -45,7 +45,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is the primary entrypoint for volume plugins.
|
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||||
return []volume.VolumePlugin{&glusterfsPlugin{host: nil, exe: exec.New(), gidTable: make(map[string]*MinMaxAllocator)}}
|
return []volume.VolumePlugin{&glusterfsPlugin{host: nil, exe: exec.New(), gidTable: make(map[string]*MinMaxAllocator)}}
|
||||||
}
|
}
|
||||||
@ -203,9 +203,8 @@ func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*v1.Gl
|
|||||||
// Glusterfs volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
// Glusterfs volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||||
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
||||||
return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly
|
return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly
|
||||||
} else {
|
|
||||||
return spec.PersistentVolume.Spec.Glusterfs, spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
return spec.PersistentVolume.Spec.Glusterfs, spec.ReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface, exe exec.Interface) (volume.Mounter, error) {
|
func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface, exe exec.Interface) (volume.Mounter, error) {
|
||||||
@ -366,7 +365,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
var addrlist []string
|
var addrlist []string
|
||||||
if b.hosts == nil {
|
if b.hosts == nil {
|
||||||
return fmt.Errorf("glusterfs: endpoint is nil")
|
return fmt.Errorf("glusterfs: endpoint is nil")
|
||||||
} else {
|
}
|
||||||
addr := make(map[string]struct{})
|
addr := make(map[string]struct{})
|
||||||
if b.hosts.Subsets != nil {
|
if b.hosts.Subsets != nil {
|
||||||
for _, s := range b.hosts.Subsets {
|
for _, s := range b.hosts.Subsets {
|
||||||
@ -390,7 +389,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Failed mount scenario.
|
// Failed mount scenario.
|
||||||
// Since glusterfs does not return error text
|
// Since glusterfs does not return error text
|
||||||
@ -513,8 +511,8 @@ func (d *glusterfsVolumeDeleter) GetPath() string {
|
|||||||
// Traverse the PVs, fetching all the GIDs from those
|
// Traverse the PVs, fetching all the GIDs from those
|
||||||
// in a given storage class, and mark them in the table.
|
// in a given storage class, and mark them in the table.
|
||||||
//
|
//
|
||||||
func (p *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocator) error {
|
func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocator) error {
|
||||||
kubeClient := p.host.GetKubeClient()
|
kubeClient := plugin.host.GetKubeClient()
|
||||||
if kubeClient == nil {
|
if kubeClient == nil {
|
||||||
return fmt.Errorf("glusterfs: failed to get kube client when collecting gids")
|
return fmt.Errorf("glusterfs: failed to get kube client when collecting gids")
|
||||||
}
|
}
|
||||||
@ -562,11 +560,11 @@ func (p *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocato
|
|||||||
// used in PVs of this storage class by traversing the PVs.
|
// used in PVs of this storage class by traversing the PVs.
|
||||||
// - Adapt the range of the table to the current range of the SC.
|
// - Adapt the range of the table to the current range of the SC.
|
||||||
//
|
//
|
||||||
func (p *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinMaxAllocator, error) {
|
func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinMaxAllocator, error) {
|
||||||
var err error
|
var err error
|
||||||
p.gidTableLock.Lock()
|
plugin.gidTableLock.Lock()
|
||||||
gidTable, ok := p.gidTable[className]
|
gidTable, ok := plugin.gidTable[className]
|
||||||
p.gidTableLock.Unlock()
|
plugin.gidTableLock.Unlock()
|
||||||
|
|
||||||
if ok {
|
if ok {
|
||||||
err = gidTable.SetRange(min, max)
|
err = gidTable.SetRange(min, max)
|
||||||
@ -584,7 +582,7 @@ func (p *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// collect gids with the full range
|
// collect gids with the full range
|
||||||
err = p.collectGids(className, newGidTable)
|
err = plugin.collectGids(className, newGidTable)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -597,10 +595,10 @@ func (p *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinM
|
|||||||
|
|
||||||
// if in the meantime a table appeared, use it
|
// if in the meantime a table appeared, use it
|
||||||
|
|
||||||
p.gidTableLock.Lock()
|
plugin.gidTableLock.Lock()
|
||||||
defer p.gidTableLock.Unlock()
|
defer plugin.gidTableLock.Unlock()
|
||||||
|
|
||||||
gidTable, ok = p.gidTable[className]
|
gidTable, ok = plugin.gidTable[className]
|
||||||
if ok {
|
if ok {
|
||||||
err = gidTable.SetRange(min, max)
|
err = gidTable.SetRange(min, max)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -610,7 +608,7 @@ func (p *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinM
|
|||||||
return gidTable, nil
|
return gidTable, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
p.gidTable[className] = newGidTable
|
plugin.gidTable[className] = newGidTable
|
||||||
|
|
||||||
return newGidTable, nil
|
return newGidTable, nil
|
||||||
}
|
}
|
||||||
@ -768,23 +766,23 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
var err error
|
var err error
|
||||||
if r.options.PVC.Spec.Selector != nil {
|
if p.options.PVC.Spec.Selector != nil {
|
||||||
glog.V(4).Infof("glusterfs: not able to parse your claim Selector")
|
glog.V(4).Infof("glusterfs: not able to parse your claim Selector")
|
||||||
return nil, fmt.Errorf("glusterfs: not able to parse your claim Selector")
|
return nil, fmt.Errorf("glusterfs: not able to parse your claim Selector")
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("glusterfs: Provison VolumeOptions %v", r.options)
|
glog.V(4).Infof("glusterfs: Provison VolumeOptions %v", p.options)
|
||||||
scName := v1helper.GetPersistentVolumeClaimClass(r.options.PVC)
|
scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC)
|
||||||
cfg, err := parseClassParameters(r.options.Parameters, r.plugin.host.GetKubeClient())
|
cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.provisionerConfig = *cfg
|
p.provisionerConfig = *cfg
|
||||||
|
|
||||||
glog.V(4).Infof("glusterfs: creating volume with configuration %+v", r.provisionerConfig)
|
glog.V(4).Infof("glusterfs: creating volume with configuration %+v", p.provisionerConfig)
|
||||||
|
|
||||||
gidTable, err := r.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
|
gidTable, err := p.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("glusterfs: failed to get gidTable: %v", err)
|
return nil, fmt.Errorf("glusterfs: failed to get gidTable: %v", err)
|
||||||
}
|
}
|
||||||
@ -795,9 +793,9 @@ func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
|||||||
return nil, fmt.Errorf("glusterfs: failed to reserve gid from table: %v", err)
|
return nil, fmt.Errorf("glusterfs: failed to reserve gid from table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(2).Infof("glusterfs: got gid [%d] for PVC %s", gid, r.options.PVC.Name)
|
glog.V(2).Infof("glusterfs: got gid [%d] for PVC %s", gid, p.options.PVC.Name)
|
||||||
|
|
||||||
glusterfs, sizeGB, err := r.CreateVolume(gid)
|
glusterfs, sizeGB, err := p.CreateVolume(gid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if releaseErr := gidTable.Release(gid); releaseErr != nil {
|
if releaseErr := gidTable.Release(gid); releaseErr != nil {
|
||||||
glog.Errorf("glusterfs: error when releasing gid in storageclass: %s", scName)
|
glog.Errorf("glusterfs: error when releasing gid in storageclass: %s", scName)
|
||||||
@ -808,10 +806,10 @@ func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
|||||||
}
|
}
|
||||||
pv := new(v1.PersistentVolume)
|
pv := new(v1.PersistentVolume)
|
||||||
pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
|
pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
|
||||||
pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy
|
pv.Spec.PersistentVolumeReclaimPolicy = p.options.PersistentVolumeReclaimPolicy
|
||||||
pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes
|
pv.Spec.AccessModes = p.options.PVC.Spec.AccessModes
|
||||||
if len(pv.Spec.AccessModes) == 0 {
|
if len(pv.Spec.AccessModes) == 0 {
|
||||||
pv.Spec.AccessModes = r.plugin.GetAccessModes()
|
pv.Spec.AccessModes = p.plugin.GetAccessModes()
|
||||||
}
|
}
|
||||||
|
|
||||||
gidStr := strconv.FormatInt(int64(gid), 10)
|
gidStr := strconv.FormatInt(int64(gid), 10)
|
||||||
|
@ -28,14 +28,23 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrNotFound = errors.New("number not allocated")
|
//ErrConflict returned when value is already in use.
|
||||||
ErrConflict = errors.New("number already allocated")
|
ErrConflict = errors.New("number already allocated")
|
||||||
|
|
||||||
|
//ErrInvalidRange returned invalid range, for eg# min > max
|
||||||
ErrInvalidRange = errors.New("invalid range")
|
ErrInvalidRange = errors.New("invalid range")
|
||||||
|
|
||||||
|
//ErrOutOfRange returned when value is not in pool range.
|
||||||
ErrOutOfRange = errors.New("out of range")
|
ErrOutOfRange = errors.New("out of range")
|
||||||
|
|
||||||
|
//ErrRangeFull returned when no more free values in the pool.
|
||||||
ErrRangeFull = errors.New("range full")
|
ErrRangeFull = errors.New("range full")
|
||||||
|
|
||||||
|
//ErrInternal returned when no free item found, but a.free != 0.
|
||||||
ErrInternal = errors.New("internal error")
|
ErrInternal = errors.New("internal error")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//MinMaxAllocator defines allocator struct.
|
||||||
type MinMaxAllocator struct {
|
type MinMaxAllocator struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
min int
|
min int
|
||||||
@ -57,6 +66,7 @@ type Rangeable interface {
|
|||||||
SetRange(min, max int) error
|
SetRange(min, max int) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewMinMaxAllocator return a new allocator or error based on provided min/max value.
|
||||||
func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) {
|
func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) {
|
||||||
if min > max {
|
if min > max {
|
||||||
return nil, ErrInvalidRange
|
return nil, ErrInvalidRange
|
||||||
@ -69,6 +79,7 @@ func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//SetRange defines the range/pool with provided min and max values.
|
||||||
func (a *MinMaxAllocator) SetRange(min, max int) error {
|
func (a *MinMaxAllocator) SetRange(min, max int) error {
|
||||||
if min > max {
|
if min > max {
|
||||||
return ErrInvalidRange
|
return ErrInvalidRange
|
||||||
@ -86,17 +97,18 @@ func (a *MinMaxAllocator) SetRange(min, max int) error {
|
|||||||
a.max = max
|
a.max = max
|
||||||
|
|
||||||
// Recompute how many free we have in the range
|
// Recompute how many free we have in the range
|
||||||
num_used := 0
|
numUsed := 0
|
||||||
for i := range a.used {
|
for i := range a.used {
|
||||||
if a.inRange(i) {
|
if a.inRange(i) {
|
||||||
num_used++
|
numUsed++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.free = 1 + max - min - num_used
|
a.free = 1 + max - min - numUsed
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Allocate allocates provided value in the allocator and mark it as used.
|
||||||
func (a *MinMaxAllocator) Allocate(i int) (bool, error) {
|
func (a *MinMaxAllocator) Allocate(i int) (bool, error) {
|
||||||
a.lock.Lock()
|
a.lock.Lock()
|
||||||
defer a.lock.Unlock()
|
defer a.lock.Unlock()
|
||||||
@ -115,6 +127,7 @@ func (a *MinMaxAllocator) Allocate(i int) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//AllocateNext allocates next value from the allocator.
|
||||||
func (a *MinMaxAllocator) AllocateNext() (int, bool, error) {
|
func (a *MinMaxAllocator) AllocateNext() (int, bool, error) {
|
||||||
a.lock.Lock()
|
a.lock.Lock()
|
||||||
defer a.lock.Unlock()
|
defer a.lock.Unlock()
|
||||||
@ -137,6 +150,7 @@ func (a *MinMaxAllocator) AllocateNext() (int, bool, error) {
|
|||||||
return 0, false, ErrInternal
|
return 0, false, ErrInternal
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Release free/delete provided value from the allocator.
|
||||||
func (a *MinMaxAllocator) Release(i int) error {
|
func (a *MinMaxAllocator) Release(i int) error {
|
||||||
a.lock.Lock()
|
a.lock.Lock()
|
||||||
defer a.lock.Unlock()
|
defer a.lock.Unlock()
|
||||||
@ -159,6 +173,7 @@ func (a *MinMaxAllocator) has(i int) bool {
|
|||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Has check whether the provided value is used in the allocator
|
||||||
func (a *MinMaxAllocator) Has(i int) bool {
|
func (a *MinMaxAllocator) Has(i int) bool {
|
||||||
a.lock.Lock()
|
a.lock.Lock()
|
||||||
defer a.lock.Unlock()
|
defer a.lock.Unlock()
|
||||||
@ -166,6 +181,7 @@ func (a *MinMaxAllocator) Has(i int) bool {
|
|||||||
return a.has(i)
|
return a.has(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Free returns the number of free values in the allocator.
|
||||||
func (a *MinMaxAllocator) Free() int {
|
func (a *MinMaxAllocator) Free() int {
|
||||||
a.lock.Lock()
|
a.lock.Lock()
|
||||||
defer a.lock.Unlock()
|
defer a.lock.Unlock()
|
||||||
|
Loading…
Reference in New Issue
Block a user