Merge pull request #37886 from obnoxxx/gluster-dp-gid

Automatic merge from submit-queue

Implement GID security for the GlusterFS dynamic provisioner.

<!--  Thanks for sending a pull request!  Here are some tips for you:
1. If this is your first time, read our contributor guidelines https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md and developer guide https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md
2. If you want *faster* PR reviews, read how: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/faster_reviews.md
3. Follow the instructions for writing a release note: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/pull-requests.md#release-notes
-->

**What this PR does / why we need it**:

This PR implements GID security for the glusterfs dynamic provisioner.
It is a reworked version of PR #37549 .

<!--
**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:
-->

**Release note**:

<!--  Steps to write your release note:
1. Use the release-note-* labels to set the release note state (if you have access) 
2. Enter your extended release note in the below block; leaving it blank means using the PR title as the release note. If no release note is required, just write `NONE`. 
-->
```release-note
The glusterfs dynamic volume provisioner will now choose a unique GID for new persistent volumes from a range that can be configured in the storage class with the "gidMin" and "gidMax" parameters. The default range is 2000 - 4294967295 (max uint32).
```
This commit is contained in:
Kubernetes Submit Queue 2016-12-04 14:34:01 -08:00 committed by GitHub
commit bc342006bf
7 changed files with 629 additions and 9 deletions

View File

@ -78,6 +78,8 @@ parameters:
restuser: "admin"
secretNamespace: "default"
secretName: "heketi-secret"
gidMin: "40000"
gidMax: "50000"
```
* `resturl` : Gluster REST service/Heketi service url which provision gluster volumes on demand. The general format should be `IPaddress:Port` and this is a mandatory parameter for GlusterFS dynamic provisioner. If Heketi service is exposed as a routable service in openshift/kubernetes setup, this can have a format similar to
@ -92,6 +94,8 @@ When both `restuserkey` and `secretNamespace` + `secretName` is specified, the s
Example of a secret can be found in [glusterfs-provisioning-secret.yaml](glusterfs-provisioning-secret.yaml).
* `gidMin` + `gidMax` : The minimum and maximum value of GID range for the storage class. A unique value (GID) in this range ( gidMin-gidMax ) will be used for dynamically provisioned volumes. These are optional values. If not specified, the volume will be provisioned with a value between 2000-4294967295 which are defaults for gidMin and gidMax respectively.
Reference : ([How to configure Heketi](https://github.com/heketi/heketi/wiki/Setting-up-the-topology))
When the persistent volumes are dynamically provisioned, the Gluster plugin automatically create an endpoint and a headless service in the name `gluster-dynamic-<claimname>`. This dynamic endpoint and service will be deleted automatically when the persistent volume claim is deleted.

View File

@ -9,3 +9,5 @@ parameters:
restuser: "admin"
secretNamespace: "default"
secretName: "heketi-secret"
gidMin: "40000"
gidMax: "50000"

View File

@ -15,6 +15,7 @@ go_library(
srcs = [
"doc.go",
"glusterfs.go",
"glusterfs_minmax.go",
"glusterfs_util.go",
],
tags = ["automanaged"],
@ -22,13 +23,17 @@ go_library(
"//pkg/api/errors:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/storage/v1beta1/util:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/registry/core/service/allocator:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/heketi/heketi/client/api/go-client",
"//vendor:github.com/heketi/heketi/pkg/glusterfs/api",
@ -37,7 +42,10 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["glusterfs_test.go"],
srcs = [
"glusterfs_minmax_test.go",
"glusterfs_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [

View File

@ -18,10 +18,13 @@ package glusterfs
import (
"fmt"
"math"
"os"
"path"
"runtime"
"strconv"
dstrings "strings"
"sync"
"github.com/golang/glog"
gcli "github.com/heketi/heketi/client/api/go-client"
@ -29,23 +32,28 @@ import (
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&glusterfsPlugin{nil, exec.New()}}
return []volume.VolumePlugin{&glusterfsPlugin{host: nil, exe: exec.New(), gidTable: make(map[string]*MinMaxAllocator)}}
}
type glusterfsPlugin struct {
host volume.VolumeHost
exe exec.Interface
host volume.VolumeHost
exe exec.Interface
gidTable map[string]*MinMaxAllocator
gidTableLock sync.Mutex
}
var _ volume.VolumePlugin = &glusterfsPlugin{}
@ -63,6 +71,9 @@ const (
durabilityType = "replicate"
secretKeyName = "key" // key name used in secret
gciGlusterMountBinariesPath = "/sbin/mount.glusterfs"
defaultGidMin = 2000
defaultGidMax = math.MaxUint32
absoluteGidMax = math.MaxUint32
)
func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) error {
@ -381,6 +392,8 @@ type provisioningConfig struct {
secretName string
secretValue string
clusterId string
gidMin uint32
gidMax uint32
}
type glusterfsVolumeProvisioner struct {
@ -389,6 +402,15 @@ type glusterfsVolumeProvisioner struct {
options volume.VolumeOptions
}
func convertGid(inputGid string) (uint32, error) {
inputGid32, err := strconv.ParseUint(inputGid, 10, 32)
if err != nil {
return 0, fmt.Errorf("glusterfs: failed to parse gid %v ", inputGid)
}
outputGid := uint32(inputGid32)
return outputGid, nil
}
func (plugin *glusterfsPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec)
}
@ -420,6 +442,120 @@ func (d *glusterfsVolumeDeleter) GetPath() string {
return d.plugin.host.GetPodVolumeDir(d.glusterfsMounter.glusterfs.pod.UID, strings.EscapeQualifiedNameForDisk(name), d.glusterfsMounter.glusterfs.volName)
}
//
// Traverse the PVs, fetching all the GIDs from those
// in a given storage class, and mark them in the table.
//
func (p *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocator) error {
pvList, err := p.host.GetKubeClient().Core().PersistentVolumes().List(v1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
glog.Errorf("glusterfs: failed to get existing persistent volumes")
return err
}
for _, pv := range pvList.Items {
if storageutil.GetVolumeStorageClass(&pv) != className {
continue
}
pvName := pv.ObjectMeta.Name
gidStr, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]
if !ok {
glog.Warningf("glusterfs: no gid found in pv '%v'", pvName)
continue
}
gid, err := convertGid(gidStr)
if err != nil {
glog.Error(err)
continue
}
_, err = gidTable.Allocate(int(gid))
if err == ErrConflict {
glog.Warningf("glusterfs: gid %v found in pv %v was already allocated", gid)
} else if err != nil {
glog.Errorf("glusterfs: failed to store gid %v found in pv '%v': %v", gid, pvName, err)
return err
}
}
return nil
}
//
// Return the gid table for a storage class.
// - If this is the first time, fill it with all the gids
// used in PVs of this storage class by traversing the PVs.
// - Adapt the range of the table to the current range of the SC.
//
func (p *glusterfsPlugin) getGidTable(className string, min uint32, max uint32) (*MinMaxAllocator, error) {
var err error
p.gidTableLock.Lock()
gidTable, ok := p.gidTable[className]
p.gidTableLock.Unlock()
if ok {
err = gidTable.SetRange(int(min), int(max))
if err != nil {
return nil, err
}
return gidTable, nil
}
// create a new table and fill it
newGidTable, err := NewMinMaxAllocator(0, absoluteGidMax)
if err != nil {
return nil, err
}
// collect gids with the full range
err = p.collectGids(className, newGidTable)
if err != nil {
return nil, err
}
// and only reduce the range afterwards
err = newGidTable.SetRange(int(min), int(max))
if err != nil {
return nil, err
}
// if in the meantime a table appeared, use it
p.gidTableLock.Lock()
defer p.gidTableLock.Unlock()
gidTable, ok = p.gidTable[className]
if ok {
err = gidTable.SetRange(int(min), int(max))
if err != nil {
return nil, err
}
return gidTable, nil
}
p.gidTable[className] = newGidTable
return newGidTable, nil
}
func (d *glusterfsVolumeDeleter) getGid() (uint32, bool, error) {
gidStr, ok := d.spec.Annotations[volumehelper.VolumeGidAnnotationKey]
if !ok {
return 0, false, nil
}
gid, err := convertGid(gidStr)
return gid, true, err
}
func (d *glusterfsVolumeDeleter) Delete() error {
var err error
glog.V(2).Infof("glusterfs: delete volume: %s ", d.glusterfsMounter.path)
@ -438,6 +574,21 @@ func (d *glusterfsVolumeDeleter) Delete() error {
glog.V(4).Infof("glusterfs: deleting volume %q with configuration %+v", volumeId, d.provisioningConfig)
gid, exists, err := d.getGid()
if err != nil {
glog.Error(err)
} else if exists {
gidTable, err := d.plugin.getGidTable(class.Name, cfg.gidMin, cfg.gidMax)
if err != nil {
return fmt.Errorf("glusterfs: failed to get gidTable: %v", err)
}
err = gidTable.Release(int(gid))
if err != nil {
return fmt.Errorf("glusterfs: failed to release gid %v: %v", gid, err)
}
}
cli := gcli.NewClient(d.url, d.user, d.secretValue)
if cli == nil {
glog.Errorf("glusterfs: failed to create glusterfs rest client")
@ -482,7 +633,7 @@ func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
return nil, fmt.Errorf("glusterfs: not able to parse your claim Selector")
}
glog.V(4).Infof("glusterfs: Provison VolumeOptions %v", r.options)
scName := storageutil.GetClaimStorageClass(r.options.PVC)
cfg, err := parseClassParameters(r.options.Parameters, r.plugin.host.GetKubeClient())
if err != nil {
return nil, err
@ -490,8 +641,26 @@ func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
r.provisioningConfig = *cfg
glog.V(4).Infof("glusterfs: creating volume with configuration %+v", r.provisioningConfig)
glusterfs, sizeGB, err := r.CreateVolume()
gidTable, err := r.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
if err != nil {
return nil, fmt.Errorf("glusterfs: failed to get gidTable: %v", err)
}
gid, _, err := gidTable.AllocateNext()
if err != nil {
glog.Errorf("glusterfs: failed to reserve gid from table: %v", err)
return nil, fmt.Errorf("glusterfs: failed to reserve gid from table: %v", err)
}
glog.V(2).Infof("glusterfs: got gid [%d] for PVC %s", gid, r.options.PVC.Name)
glusterfs, sizeGB, err := r.CreateVolume(gid)
if err != nil {
if release_err := gidTable.Release(gid); release_err != nil {
glog.Errorf("glusterfs: error when releasing gid in storageclass: %s", scName)
}
glog.Errorf("glusterfs: create volume err: %v.", err)
return nil, fmt.Errorf("glusterfs: create volume err: %v.", err)
}
@ -502,13 +671,17 @@ func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = r.plugin.GetAccessModes()
}
gidStr := strconv.FormatInt(int64(gid), 10)
pv.Annotations = map[string]string{volumehelper.VolumeGidAnnotationKey: gidStr}
pv.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
}
return pv, nil
}
func (p *glusterfsVolumeProvisioner) CreateVolume() (r *v1.GlusterfsVolumeSource, size int, err error) {
func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, err error) {
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
@ -523,7 +696,8 @@ func (p *glusterfsVolumeProvisioner) CreateVolume() (r *v1.GlusterfsVolumeSource
return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
}
clusterIds := dstrings.Split(p.clusterId, ",")
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Clusters: clusterIds, Durability: gapi.VolumeDurabilityInfo{Type: durabilityType, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}}
gid64 := int64(gid)
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Clusters: clusterIds, Gid: gid64, Durability: gapi.VolumeDurabilityInfo{Type: durabilityType, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}}
volume, err := cli.VolumeCreate(volumeReq)
if err != nil {
glog.Errorf("glusterfs: error creating volume %v ", err)
@ -681,6 +855,18 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
cfg.clusterId = v
case "restauthenabled":
authEnabled = dstrings.ToLower(v) == "true"
case "gidmin":
parseGidMin, err := convertGid(v)
if err != nil {
return nil, fmt.Errorf("glusterfs: invalid value %q for volume plugin %s", k, glusterfsPluginName)
}
cfg.gidMin = parseGidMin
case "gidmax":
parseGidMax, err := convertGid(v)
if err != nil {
return nil, fmt.Errorf("glusterfs: invalid value %q for volume plugin %s", k, glusterfsPluginName)
}
cfg.gidMax = parseGidMax
default:
return nil, fmt.Errorf("glusterfs: invalid option %q for volume plugin %s", k, glusterfsPluginName)
}
@ -711,5 +897,18 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
} else {
cfg.secretValue = cfg.userKey
}
if cfg.gidMin == 0 {
cfg.gidMin = defaultGidMin
}
if cfg.gidMax == 0 {
cfg.gidMax = defaultGidMax
}
if cfg.gidMin > cfg.gidMax {
return nil, fmt.Errorf("StorageClass for provisioner %q must have gidMax value >= gidMin", glusterfsPluginName)
}
return &cfg, nil
}

View File

@ -0,0 +1,175 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// This implementation is space-efficient for a sparse
// allocation over a big range. Could be optimized
// for high absolute allocation number with a bitmap.
//
package glusterfs
import (
"errors"
"sync"
"k8s.io/kubernetes/pkg/registry/core/service/allocator"
)
var (
ErrNotFound = errors.New("number not allocated")
ErrConflict = errors.New("number already allocated")
ErrInvalidRange = errors.New("invalid range")
ErrOutOfRange = errors.New("out of range")
ErrRangeFull = errors.New("range full")
ErrInternal = errors.New("internal error")
)
type MinMaxAllocator struct {
lock sync.Mutex
min int
max int
free int
used map[int]bool
}
var _ Rangeable = &MinMaxAllocator{}
// Rangeable is an Interface that can adjust its min/max range.
// Rangeable should be threadsafe
type Rangeable interface {
allocator.Interface
SetRange(min, max int) error
}
func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) {
if min > max {
return nil, ErrInvalidRange
}
return &MinMaxAllocator{
min: min,
max: max,
free: 1 + max - min,
used: map[int]bool{},
}, nil
}
func (a *MinMaxAllocator) SetRange(min, max int) error {
if min > max {
return ErrInvalidRange
}
a.lock.Lock()
defer a.lock.Unlock()
// Check if we need to change
if a.min == min && a.max == max {
return nil
}
a.min = min
a.max = max
// Recompute how many free we have in the range
num_used := 0
for i := range a.used {
if a.inRange(i) {
num_used++
}
}
a.free = 1 + max - min - num_used
return nil
}
func (a *MinMaxAllocator) Allocate(i int) (bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
if !a.inRange(i) {
return false, ErrOutOfRange
}
if a.has(i) {
return false, ErrConflict
}
a.used[i] = true
a.free--
return true, nil
}
func (a *MinMaxAllocator) AllocateNext() (int, bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
// Fast check if we're out of items
if a.free <= 0 {
return 0, false, ErrRangeFull
}
// Scan from the minimum until we find a free item
for i := a.min; i <= a.max; i++ {
if !a.has(i) {
a.used[i] = true
a.free--
return i, true, nil
}
}
// no free item found, but a.free != 0
return 0, false, ErrInternal
}
func (a *MinMaxAllocator) Release(i int) error {
a.lock.Lock()
defer a.lock.Unlock()
if !a.has(i) {
return nil
}
delete(a.used, i)
if a.inRange(i) {
a.free++
}
return nil
}
func (a *MinMaxAllocator) has(i int) bool {
_, ok := a.used[i]
return ok
}
func (a *MinMaxAllocator) Has(i int) bool {
a.lock.Lock()
defer a.lock.Unlock()
return a.has(i)
}
func (a *MinMaxAllocator) Free() int {
a.lock.Lock()
defer a.lock.Unlock()
return a.free
}
func (a *MinMaxAllocator) inRange(i int) bool {
return a.min <= i && i <= a.max
}

View File

@ -0,0 +1,226 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"testing"
)
func TestNewFree(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if f := m.Free(); f != (max - min + 1) {
t.Errorf("expect to get %d free, but got %d", (max - min + 1), f)
}
}
func TestNewInvalidRange(t *testing.T) {
if _, err := NewMinMaxAllocator(10, 1); err != ErrInvalidRange {
t.Errorf("expect to get Error '%v', got '%v'", ErrInvalidRange, err)
}
}
func TestSetRange(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if err = m.SetRange(10, 1); err != ErrInvalidRange {
t.Errorf("expected to get error '%v', got '%v'", ErrInvalidRange, err)
}
if err = m.SetRange(1, 2); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 2 {
t.Errorf("expect to get %d free, but got %d", 2, f)
}
if ok, _ := m.Allocate(1); !ok {
t.Errorf("error allocate offset %v", 1)
}
if f := m.Free(); f != 1 {
t.Errorf("expect to get 1 free, but got %d", f)
}
if err = m.SetRange(1, 1); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 0 {
t.Errorf("expect to get 0 free, but got %d", f)
}
if err = m.SetRange(2, 2); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 1 {
t.Errorf("expect to get 1 free, but got %d", f)
}
}
func TestAllocateNext(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
el, ok, _ := m.AllocateNext()
if !ok {
t.Fatalf("unexpected error")
}
if !m.Has(el) {
t.Errorf("expect element %v allocated", el)
}
if f := m.Free(); f != (max-min+1)-1 {
t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f)
}
}
func TestAllocateMax(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
for i := 1; i <= max; i++ {
if _, ok, _ := m.AllocateNext(); !ok {
t.Fatalf("unexpected error")
}
}
if _, ok, _ := m.AllocateNext(); ok {
t.Errorf("unexpected success")
}
if f := m.Free(); f != 0 {
t.Errorf("expect to get %d free, but got %d", 0, f)
}
}
func TestAllocate(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
if !m.Has(offset) {
t.Errorf("expect element %v allocated", offset)
}
if f := m.Free(); f != (max-min+1)-1 {
t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f)
}
}
func TestAllocateConflict(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
ok, err := m.Allocate(offset)
if ok {
t.Errorf("unexpected success")
}
if err != ErrConflict {
t.Errorf("expected error '%v', got '%v'", ErrConflict, err)
}
}
func TestAllocateOutOfRange(t *testing.T) {
min := 1
max := 10
offset := 11
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
ok, err := m.Allocate(offset)
if ok {
t.Errorf("unexpected success")
}
if err != ErrOutOfRange {
t.Errorf("expected error '%v', got '%v'", ErrOutOfRange, err)
}
}
func TestRelease(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
if !m.Has(offset) {
t.Errorf("expect offset %v allocated", offset)
}
if err = m.Release(offset); err != nil {
t.Errorf("unexpected error: %v", err)
}
if m.Has(offset) {
t.Errorf("expect offset %v not allocated", offset)
}
}

View File

@ -269,6 +269,8 @@ func TestParseClassParameters(t *testing.T) {
user: "admin",
userKey: "password",
secretValue: "password",
gidMin: 2000,
gidMax: 4294967295,
},
},
{
@ -287,6 +289,8 @@ func TestParseClassParameters(t *testing.T) {
secretName: "mysecret",
secretNamespace: "default",
secretValue: "mypassword",
gidMin: 2000,
gidMax: 4294967295,
},
},
{
@ -298,7 +302,9 @@ func TestParseClassParameters(t *testing.T) {
&secret,
false, // expect error
&provisioningConfig{
url: "https://localhost:8080",
url: "https://localhost:8080",
gidMin: 2000,
gidMax: 4294967295,
},
},
{