mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
merge util into one file
This commit is contained in:
parent
2ca2f442ac
commit
f268bb1605
@ -1,523 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
// GB - GigaByte size
|
||||
GB = 1000 * 1000 * 1000
|
||||
// GIB - GibiByte size
|
||||
GIB = 1024 * 1024 * 1024
|
||||
)
|
||||
|
||||
type RecycleEventRecorder func(eventtype, message string)
|
||||
|
||||
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
|
||||
// Recyclers. This function will save the given Pod to the API and watch it
|
||||
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
|
||||
// whichever comes first. An attempt to delete a recycler pod is always
|
||||
// attempted before returning.
|
||||
//
|
||||
// In case there is a pod with the same namespace+name already running, this
|
||||
// function deletes it as it is not able to judge if it is an old recycler
|
||||
// or user has forged a fake recycler to block Kubernetes from recycling.//
|
||||
//
|
||||
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
|
||||
// will be overwritten with unique name based on PV.Name.
|
||||
// client - kube client for API operations.
|
||||
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
|
||||
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
|
||||
}
|
||||
|
||||
// same as above func comments, except 'recyclerClient' is a narrower pod API
|
||||
// interface to ease testing
|
||||
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
|
||||
glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
|
||||
|
||||
// Generate unique name for the recycler pod - we need to get "already
|
||||
// exists" error when a previous controller has already started recycling
|
||||
// the volume. Here we assume that pv.Name is already unique.
|
||||
pod.Name = "recycler-for-" + pvName
|
||||
pod.GenerateName = ""
|
||||
|
||||
stopChannel := make(chan struct{})
|
||||
defer close(stopChannel)
|
||||
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start the pod
|
||||
_, err = recyclerClient.CreatePod(pod)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
||||
if deleteErr != nil {
|
||||
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
|
||||
}
|
||||
// Recycler will try again and the old pod will be hopefully deleted
|
||||
// at that time.
|
||||
return fmt.Errorf("old recycler pod found, will retry later")
|
||||
}
|
||||
return fmt.Errorf("unexpected error creating recycler pod: %+v", err)
|
||||
}
|
||||
err = waitForPod(pod, recyclerClient, podCh)
|
||||
|
||||
// In all cases delete the recycler pod and log its result.
|
||||
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
|
||||
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
||||
if deleteErr != nil {
|
||||
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
// Returning recycler error is preferred, the pod will be deleted again on
|
||||
// the next retry.
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to recycle volume: %s", err)
|
||||
}
|
||||
|
||||
// Recycle succeeded but we failed to delete the recycler pod. Report it,
|
||||
// the controller will re-try recycling the PV again shortly.
|
||||
if deleteErr != nil {
|
||||
return fmt.Errorf("failed to delete recycler pod: %s", deleteErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForPod watches the pod it until it finishes and send all events on the
|
||||
// pod to the PV.
|
||||
func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error {
|
||||
for {
|
||||
event, ok := <-podCh
|
||||
if !ok {
|
||||
return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name)
|
||||
}
|
||||
switch event.Object.(type) {
|
||||
case *v1.Pod:
|
||||
// POD changed
|
||||
pod := event.Object.(*v1.Pod)
|
||||
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
|
||||
switch event.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
if pod.Status.Phase == v1.PodSucceeded {
|
||||
// Recycle succeeded.
|
||||
return nil
|
||||
}
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
if pod.Status.Message != "" {
|
||||
return fmt.Errorf(pod.Status.Message)
|
||||
} else {
|
||||
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
|
||||
}
|
||||
}
|
||||
|
||||
case watch.Deleted:
|
||||
return fmt.Errorf("recycler pod was deleted")
|
||||
|
||||
case watch.Error:
|
||||
return fmt.Errorf("recycler pod watcher failed")
|
||||
}
|
||||
|
||||
case *v1.Event:
|
||||
// Event received
|
||||
podEvent := event.Object.(*v1.Event)
|
||||
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
|
||||
if event.Type == watch.Added {
|
||||
recyclerClient.Event(podEvent.Type, podEvent.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recyclerClient abstracts access to a Pod by providing a narrower interface.
|
||||
// This makes it easier to mock a client for testing.
|
||||
type recyclerClient interface {
|
||||
CreatePod(pod *v1.Pod) (*v1.Pod, error)
|
||||
GetPod(name, namespace string) (*v1.Pod, error)
|
||||
DeletePod(name, namespace string) error
|
||||
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
|
||||
// to close the reflector backing the watch. The caller is responsible for
|
||||
// derring a close on the channel to stop the reflector.
|
||||
WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
|
||||
// Event sends an event to the volume that is being recycled.
|
||||
Event(eventtype, message string)
|
||||
}
|
||||
|
||||
func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
|
||||
return &realRecyclerClient{
|
||||
client,
|
||||
recorder,
|
||||
}
|
||||
}
|
||||
|
||||
type realRecyclerClient struct {
|
||||
client clientset.Interface
|
||||
recorder RecycleEventRecorder
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
|
||||
return c.client.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
|
||||
return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
|
||||
return c.client.CoreV1().Pods(namespace).Delete(name, nil)
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) Event(eventtype, message string) {
|
||||
c.recorder(eventtype, message)
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
|
||||
podSelector, err := fields.ParseSelector("metadata.name=" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := metav1.ListOptions{
|
||||
FieldSelector: podSelector.String(),
|
||||
Watch: true,
|
||||
}
|
||||
|
||||
podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
|
||||
eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{
|
||||
FieldSelector: eventSelector.String(),
|
||||
Watch: true,
|
||||
})
|
||||
if err != nil {
|
||||
podWatch.Stop()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eventCh := make(chan watch.Event, 30)
|
||||
|
||||
go func() {
|
||||
defer eventWatch.Stop()
|
||||
defer podWatch.Stop()
|
||||
defer close(eventCh)
|
||||
var podWatchChannelClosed bool
|
||||
var eventWatchChannelClosed bool
|
||||
for {
|
||||
select {
|
||||
case _ = <-stopChannel:
|
||||
return
|
||||
|
||||
case podEvent, ok := <-podWatch.ResultChan():
|
||||
if !ok {
|
||||
podWatchChannelClosed = true
|
||||
} else {
|
||||
eventCh <- podEvent
|
||||
}
|
||||
case eventEvent, ok := <-eventWatch.ResultChan():
|
||||
if !ok {
|
||||
eventWatchChannelClosed = true
|
||||
} else {
|
||||
eventCh <- eventEvent
|
||||
}
|
||||
}
|
||||
if podWatchChannelClosed && eventWatchChannelClosed {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return eventCh, nil
|
||||
}
|
||||
|
||||
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
|
||||
// recycle operation. The calculation and return value is either the
|
||||
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
|
||||
// greater.
|
||||
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
|
||||
giQty := resource.MustParse("1Gi")
|
||||
pvQty := pv.Spec.Capacity[v1.ResourceStorage]
|
||||
giSize := giQty.Value()
|
||||
pvSize := pvQty.Value()
|
||||
timeout := (pvSize / giSize) * int64(timeoutIncrement)
|
||||
if timeout < int64(minimumTimeout) {
|
||||
return int64(minimumTimeout)
|
||||
}
|
||||
return timeout
|
||||
}
|
||||
|
||||
// RoundUpSize calculates how many allocation units are needed to accommodate
|
||||
// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS
|
||||
// allocates volumes in gibibyte-sized chunks,
|
||||
// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
|
||||
// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
|
||||
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
|
||||
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
|
||||
}
|
||||
|
||||
// RoundUpToGB rounds up given quantity to chunks of GB
|
||||
func RoundUpToGB(size resource.Quantity) int64 {
|
||||
requestBytes := size.Value()
|
||||
return RoundUpSize(requestBytes, GB)
|
||||
}
|
||||
|
||||
// RoundUpToGiB rounds up given quantity upto chunks of GiB
|
||||
func RoundUpToGiB(size resource.Quantity) int64 {
|
||||
requestBytes := size.Value()
|
||||
return RoundUpSize(requestBytes, GIB)
|
||||
}
|
||||
|
||||
// GenerateVolumeName returns a PV name with clusterName prefix. The function
|
||||
// should be used to generate a name of GCE PD or Cinder volume. It basically
|
||||
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
|
||||
// string fits given length and cuts "dynamic" if not.
|
||||
func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
|
||||
prefix := clusterName + "-dynamic"
|
||||
pvLen := len(pvName)
|
||||
|
||||
// cut the "<clusterName>-dynamic" to fit full pvName into maxLength
|
||||
// +1 for the '-' dash
|
||||
if pvLen+1+len(prefix) > maxLength {
|
||||
prefix = prefix[:maxLength-pvLen-1]
|
||||
}
|
||||
return prefix + "-" + pvName
|
||||
}
|
||||
|
||||
// GetPath checks if the path from the mounter is empty.
|
||||
func GetPath(mounter Mounter) (string, error) {
|
||||
path := mounter.GetPath()
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String())
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name
|
||||
// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name.
|
||||
// However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash.
|
||||
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
|
||||
// assuming the id values are consecutive.
|
||||
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
|
||||
// We create the volume in a zone determined by the name
|
||||
// Eventually the scheduler will coordinate placement into an available zone
|
||||
hash, index := getPVCNameHashAndIndexOffset(pvcName)
|
||||
|
||||
// Zones.List returns zones in a consistent order (sorted)
|
||||
// We do have a potential failure case where volumes will not be properly spread,
|
||||
// if the set of zones changes during StatefulSet volume creation. However, this is
|
||||
// probably relatively unlikely because we expect the set of zones to be essentially
|
||||
// static for clusters.
|
||||
// Hopefully we can address this problem if/when we do full scheduler integration of
|
||||
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
|
||||
// unhealthy zones)
|
||||
zoneSlice := zones.List()
|
||||
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
|
||||
|
||||
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
|
||||
return zone
|
||||
}
|
||||
|
||||
// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks.
|
||||
func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String {
|
||||
// We create the volume in a zone determined by the name
|
||||
// Eventually the scheduler will coordinate placement into an available zone
|
||||
hash, index := getPVCNameHashAndIndexOffset(pvcName)
|
||||
|
||||
// Zones.List returns zones in a consistent order (sorted)
|
||||
// We do have a potential failure case where volumes will not be properly spread,
|
||||
// if the set of zones changes during StatefulSet volume creation. However, this is
|
||||
// probably relatively unlikely because we expect the set of zones to be essentially
|
||||
// static for clusters.
|
||||
// Hopefully we can address this problem if/when we do full scheduler integration of
|
||||
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
|
||||
// unhealthy zones)
|
||||
zoneSlice := zones.List()
|
||||
replicaZones := sets.NewString()
|
||||
|
||||
startingIndex := index * numZones
|
||||
for index = startingIndex; index < startingIndex+numZones; index++ {
|
||||
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
|
||||
replicaZones.Insert(zone)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q",
|
||||
pvcName, replicaZones.UnsortedList(), zoneSlice)
|
||||
return replicaZones
|
||||
}
|
||||
|
||||
func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
|
||||
if pvcName == "" {
|
||||
// We should always be called with a name; this shouldn't happen
|
||||
glog.Warningf("No name defined during volume create; choosing random zone")
|
||||
|
||||
hash = rand.Uint32()
|
||||
} else {
|
||||
hashString := pvcName
|
||||
|
||||
// Heuristic to make sure that volumes in a StatefulSet are spread across zones
|
||||
// StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id,
|
||||
// where Id is an integer index.
|
||||
// Note though that if a StatefulSet pod has multiple claims, we need them to be
|
||||
// in the same zone, because otherwise the pod will be unable to mount both volumes,
|
||||
// and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when
|
||||
// it looks like `ClaimName-StatefulSetName-Id`.
|
||||
// We continue to round-robin volume names that look like `Name-Id` also; this is a useful
|
||||
// feature for users that are creating statefulset-like functionality without using statefulsets.
|
||||
lastDash := strings.LastIndexByte(pvcName, '-')
|
||||
if lastDash != -1 {
|
||||
statefulsetIDString := pvcName[lastDash+1:]
|
||||
statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32)
|
||||
if err == nil {
|
||||
// Offset by the statefulsetID, so we round-robin across zones
|
||||
index = uint32(statefulsetID)
|
||||
// We still hash the volume name, but only the prefix
|
||||
hashString = pvcName[:lastDash]
|
||||
|
||||
// In the special case where it looks like `ClaimName-StatefulSetName-Id`,
|
||||
// hash only the StatefulSetName, so that different claims on the same StatefulSet
|
||||
// member end up in the same zone.
|
||||
// Note that StatefulSetName (and ClaimName) might themselves both have dashes.
|
||||
// We actually just take the portion after the final - of ClaimName-StatefulSetName.
|
||||
// For our purposes it doesn't much matter (just suboptimal spreading).
|
||||
lastDash := strings.LastIndexByte(hashString, '-')
|
||||
if lastDash != -1 {
|
||||
hashString = hashString[lastDash+1:]
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
|
||||
}
|
||||
}
|
||||
|
||||
// We hash the (base) volume name, so we don't bias towards the first N zones
|
||||
h := fnv.New32()
|
||||
h.Write([]byte(hashString))
|
||||
hash = h.Sum32()
|
||||
}
|
||||
|
||||
return hash, index
|
||||
}
|
||||
|
||||
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
|
||||
// to empty_dir
|
||||
func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error {
|
||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the teardown.
|
||||
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrapped.TearDownAt(dir)
|
||||
}
|
||||
|
||||
// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options
|
||||
func MountOptionFromSpec(spec *Spec, options ...string) []string {
|
||||
pv := spec.PersistentVolume
|
||||
|
||||
if pv != nil {
|
||||
// Use beta annotation first
|
||||
if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
|
||||
moList := strings.Split(mo, ",")
|
||||
return JoinMountOptions(moList, options)
|
||||
}
|
||||
|
||||
if len(pv.Spec.MountOptions) > 0 {
|
||||
return JoinMountOptions(pv.Spec.MountOptions, options)
|
||||
}
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
// JoinMountOptions joins mount options eliminating duplicates
|
||||
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
|
||||
allMountOptions := sets.NewString()
|
||||
|
||||
for _, mountOption := range userOptions {
|
||||
if len(mountOption) > 0 {
|
||||
allMountOptions.Insert(mountOption)
|
||||
}
|
||||
}
|
||||
|
||||
for _, mountOption := range systemOptions {
|
||||
allMountOptions.Insert(mountOption)
|
||||
}
|
||||
return allMountOptions.UnsortedList()
|
||||
}
|
||||
|
||||
// ValidateZone returns:
|
||||
// - an error in case zone is an empty string or contains only any combination of spaces and tab characters
|
||||
// - nil otherwise
|
||||
func ValidateZone(zone string) error {
|
||||
if strings.TrimSpace(zone) == "" {
|
||||
return fmt.Errorf("the provided %q zone is not valid, it's an empty string or contains only spaces and tab characters", zone)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AccessModesContains returns whether the requested mode is contained by modes
|
||||
func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// AccessModesContainedInAll returns whether all of the requested modes are contained by modes
|
||||
func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
|
||||
for _, mode := range requestedModes {
|
||||
if !AccessModesContains(indexedModes, mode) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetWindowsPath get a windows path
|
||||
func GetWindowsPath(path string) string {
|
||||
windowsPath := strings.Replace(path, "/", "\\", -1)
|
||||
if strings.HasPrefix(windowsPath, "\\") {
|
||||
windowsPath = "c:" + windowsPath
|
||||
}
|
||||
return windowsPath
|
||||
}
|
@ -74,7 +74,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po
|
||||
if deleteErr != nil {
|
||||
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
|
||||
}
|
||||
// Recycler will try again and the old pod will be hopefuly deleted
|
||||
// Recycler will try again and the old pod will be hopefully deleted
|
||||
// at that time.
|
||||
return fmt.Errorf("old recycler pod found, will retry later")
|
||||
}
|
||||
|
@ -88,8 +88,8 @@ func TestRecyclerPod(t *testing.T) {
|
||||
// Pod gets Running and Succeeded
|
||||
newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"),
|
||||
newEvent(v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""),
|
||||
newEvent(v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""),
|
||||
newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"),
|
||||
newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"),
|
||||
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""),
|
||||
@ -97,8 +97,8 @@ func TestRecyclerPod(t *testing.T) {
|
||||
},
|
||||
expectedEvents: []mockEvent{
|
||||
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"},
|
||||
{v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""},
|
||||
{v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""},
|
||||
{v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""},
|
||||
{v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""},
|
||||
{v1.EventTypeNormal, "Created container with docker id 83d929aeac82"},
|
||||
{v1.EventTypeNormal, "Started container with docker id 83d929aeac82"},
|
||||
},
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
@ -31,21 +30,51 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
|
||||
"reflect"
|
||||
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
|
||||
const (
|
||||
readyFileName = "ready"
|
||||
losetupPath = "losetup"
|
||||
// GB - GigaByte size
|
||||
GB = 1000 * 1000 * 1000
|
||||
// GIB - GibiByte size
|
||||
GIB = 1024 * 1024 * 1024
|
||||
|
||||
ErrDeviceNotFound = "device not found"
|
||||
ErrDeviceNotSupported = "device not supported"
|
||||
readyFileName = "ready"
|
||||
|
||||
// ControllerManagedAttachAnnotation is the key of the annotation on Node
|
||||
// objects that indicates attach/detach operations for the node should be
|
||||
// managed by the attach/detach controller
|
||||
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
|
||||
|
||||
// KeepTerminatedPodVolumesAnnotation is the key of the annotation on Node
|
||||
// that decides if pod volumes are unmounted when pod is terminated
|
||||
KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes"
|
||||
|
||||
// VolumeGidAnnotationKey is the of the annotation on the PersistentVolume
|
||||
// object that specifies a supplemental GID.
|
||||
VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
|
||||
|
||||
// VolumeDynamicallyCreatedByKey is the key of the annotation on PersistentVolume
|
||||
// object created dynamically
|
||||
VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby"
|
||||
)
|
||||
|
||||
// IsReady checks for the existence of a regular file
|
||||
@ -341,200 +370,382 @@ func stringToSet(str, delimiter string) (sets.String, error) {
|
||||
return zonesSet, nil
|
||||
}
|
||||
|
||||
// BlockVolumePathHandler defines a set of operations for handling block volume-related operations
|
||||
type BlockVolumePathHandler interface {
|
||||
// MapDevice creates a symbolic link to block device under specified map path
|
||||
MapDevice(devicePath string, mapPath string, linkName string) error
|
||||
// UnmapDevice removes a symbolic link to block device under specified map path
|
||||
UnmapDevice(mapPath string, linkName string) error
|
||||
// RemovePath removes a file or directory on specified map path
|
||||
RemoveMapPath(mapPath string) error
|
||||
// IsSymlinkExist returns true if specified symbolic link exists
|
||||
IsSymlinkExist(mapPath string) (bool, error)
|
||||
// GetDeviceSymlinkRefs searches symbolic links under global map path
|
||||
GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error)
|
||||
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
|
||||
// corresponding to map path symlink, and then return global map path with pod uuid.
|
||||
FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error)
|
||||
// AttachFileDevice takes a path to a regular file and makes it available as an
|
||||
// attached block device.
|
||||
AttachFileDevice(path string) (string, error)
|
||||
// GetLoopDevice returns the full path to the loop device associated with the given path.
|
||||
GetLoopDevice(path string) (string, error)
|
||||
// RemoveLoopDevice removes specified loopback device
|
||||
RemoveLoopDevice(device string) error
|
||||
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
|
||||
// recycle operation. The calculation and return value is either the
|
||||
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
|
||||
// greater.
|
||||
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
|
||||
giQty := resource.MustParse("1Gi")
|
||||
pvQty := pv.Spec.Capacity[v1.ResourceStorage]
|
||||
giSize := giQty.Value()
|
||||
pvSize := pvQty.Value()
|
||||
timeout := (pvSize / giSize) * int64(timeoutIncrement)
|
||||
if timeout < int64(minimumTimeout) {
|
||||
return int64(minimumTimeout)
|
||||
}
|
||||
return timeout
|
||||
}
|
||||
|
||||
// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler.
|
||||
func NewBlockVolumePathHandler() BlockVolumePathHandler {
|
||||
var volumePathHandler VolumePathHandler
|
||||
return volumePathHandler
|
||||
// RoundUpSize calculates how many allocation units are needed to accommodate
|
||||
// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS
|
||||
// allocates volumes in gibibyte-sized chunks,
|
||||
// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
|
||||
// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
|
||||
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
|
||||
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
|
||||
}
|
||||
|
||||
// VolumePathHandler is path related operation handlers for block volume
|
||||
type VolumePathHandler struct {
|
||||
// RoundUpToGB rounds up given quantity to chunks of GB
|
||||
func RoundUpToGB(size resource.Quantity) int64 {
|
||||
requestBytes := size.Value()
|
||||
return RoundUpSize(requestBytes, GB)
|
||||
}
|
||||
|
||||
// MapDevice creates a symbolic link to block device under specified map path
|
||||
func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error {
|
||||
// Example of global map path:
|
||||
// globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid}
|
||||
// linkName: {podUid}
|
||||
//
|
||||
// Example of pod device map path:
|
||||
// podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
|
||||
// linkName: {volumeName}
|
||||
if len(devicePath) == 0 {
|
||||
return fmt.Errorf("Failed to map device to map path. devicePath is empty")
|
||||
}
|
||||
if len(mapPath) == 0 {
|
||||
return fmt.Errorf("Failed to map device to map path. mapPath is empty")
|
||||
}
|
||||
if !filepath.IsAbs(mapPath) {
|
||||
return fmt.Errorf("The map path should be absolute: map path: %s", mapPath)
|
||||
}
|
||||
glog.V(5).Infof("MapDevice: devicePath %s", devicePath)
|
||||
glog.V(5).Infof("MapDevice: mapPath %s", mapPath)
|
||||
glog.V(5).Infof("MapDevice: linkName %s", linkName)
|
||||
// RoundUpToGiB rounds up given quantity upto chunks of GiB
|
||||
func RoundUpToGiB(size resource.Quantity) int64 {
|
||||
requestBytes := size.Value()
|
||||
return RoundUpSize(requestBytes, GIB)
|
||||
}
|
||||
|
||||
// Check and create mapPath
|
||||
_, err := os.Stat(mapPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("cannot validate map path: %s", mapPath)
|
||||
// GenerateVolumeName returns a PV name with clusterName prefix. The function
|
||||
// should be used to generate a name of GCE PD or Cinder volume. It basically
|
||||
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
|
||||
// string fits given length and cuts "dynamic" if not.
|
||||
func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
|
||||
prefix := clusterName + "-dynamic"
|
||||
pvLen := len(pvName)
|
||||
|
||||
// cut the "<clusterName>-dynamic" to fit full pvName into maxLength
|
||||
// +1 for the '-' dash
|
||||
if pvLen+1+len(prefix) > maxLength {
|
||||
prefix = prefix[:maxLength-pvLen-1]
|
||||
}
|
||||
return prefix + "-" + pvName
|
||||
}
|
||||
|
||||
// GetPath checks if the path from the mounter is empty.
|
||||
func GetPath(mounter volume.Mounter) (string, error) {
|
||||
path := mounter.GetPath()
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String())
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name
|
||||
// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name.
|
||||
// However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash.
|
||||
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
|
||||
// assuming the id values are consecutive.
|
||||
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
|
||||
// We create the volume in a zone determined by the name
|
||||
// Eventually the scheduler will coordinate placement into an available zone
|
||||
hash, index := getPVCNameHashAndIndexOffset(pvcName)
|
||||
|
||||
// Zones.List returns zones in a consistent order (sorted)
|
||||
// We do have a potential failure case where volumes will not be properly spread,
|
||||
// if the set of zones changes during StatefulSet volume creation. However, this is
|
||||
// probably relatively unlikely because we expect the set of zones to be essentially
|
||||
// static for clusters.
|
||||
// Hopefully we can address this problem if/when we do full scheduler integration of
|
||||
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
|
||||
// unhealthy zones)
|
||||
zoneSlice := zones.List()
|
||||
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
|
||||
|
||||
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
|
||||
return zone
|
||||
}
|
||||
|
||||
// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks.
|
||||
func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String {
|
||||
// We create the volume in a zone determined by the name
|
||||
// Eventually the scheduler will coordinate placement into an available zone
|
||||
hash, index := getPVCNameHashAndIndexOffset(pvcName)
|
||||
|
||||
// Zones.List returns zones in a consistent order (sorted)
|
||||
// We do have a potential failure case where volumes will not be properly spread,
|
||||
// if the set of zones changes during StatefulSet volume creation. However, this is
|
||||
// probably relatively unlikely because we expect the set of zones to be essentially
|
||||
// static for clusters.
|
||||
// Hopefully we can address this problem if/when we do full scheduler integration of
|
||||
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
|
||||
// unhealthy zones)
|
||||
zoneSlice := zones.List()
|
||||
replicaZones := sets.NewString()
|
||||
|
||||
startingIndex := index * numZones
|
||||
for index = startingIndex; index < startingIndex+numZones; index++ {
|
||||
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
|
||||
replicaZones.Insert(zone)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q",
|
||||
pvcName, replicaZones.UnsortedList(), zoneSlice)
|
||||
return replicaZones
|
||||
}
|
||||
|
||||
func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
|
||||
if pvcName == "" {
|
||||
// We should always be called with a name; this shouldn't happen
|
||||
glog.Warningf("No name defined during volume create; choosing random zone")
|
||||
|
||||
hash = rand.Uint32()
|
||||
} else {
|
||||
hashString := pvcName
|
||||
|
||||
// Heuristic to make sure that volumes in a StatefulSet are spread across zones
|
||||
// StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id,
|
||||
// where Id is an integer index.
|
||||
// Note though that if a StatefulSet pod has multiple claims, we need them to be
|
||||
// in the same zone, because otherwise the pod will be unable to mount both volumes,
|
||||
// and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when
|
||||
// it looks like `ClaimName-StatefulSetName-Id`.
|
||||
// We continue to round-robin volume names that look like `Name-Id` also; this is a useful
|
||||
// feature for users that are creating statefulset-like functionality without using statefulsets.
|
||||
lastDash := strings.LastIndexByte(pvcName, '-')
|
||||
if lastDash != -1 {
|
||||
statefulsetIDString := pvcName[lastDash+1:]
|
||||
statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32)
|
||||
if err == nil {
|
||||
// Offset by the statefulsetID, so we round-robin across zones
|
||||
index = uint32(statefulsetID)
|
||||
// We still hash the volume name, but only the prefix
|
||||
hashString = pvcName[:lastDash]
|
||||
|
||||
// In the special case where it looks like `ClaimName-StatefulSetName-Id`,
|
||||
// hash only the StatefulSetName, so that different claims on the same StatefulSet
|
||||
// member end up in the same zone.
|
||||
// Note that StatefulSetName (and ClaimName) might themselves both have dashes.
|
||||
// We actually just take the portion after the final - of ClaimName-StatefulSetName.
|
||||
// For our purposes it doesn't much matter (just suboptimal spreading).
|
||||
lastDash := strings.LastIndexByte(hashString, '-')
|
||||
if lastDash != -1 {
|
||||
hashString = hashString[lastDash+1:]
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
|
||||
}
|
||||
}
|
||||
|
||||
// We hash the (base) volume name, so we don't bias towards the first N zones
|
||||
h := fnv.New32()
|
||||
h.Write([]byte(hashString))
|
||||
hash = h.Sum32()
|
||||
}
|
||||
|
||||
return hash, index
|
||||
}
|
||||
|
||||
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
|
||||
// to empty_dir
|
||||
func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error {
|
||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the teardown.
|
||||
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = os.MkdirAll(mapPath, 0750); err != nil {
|
||||
return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err)
|
||||
}
|
||||
// Remove old symbolic link(or file) then create new one.
|
||||
// This should be done because current symbolic link is
|
||||
// stale across node reboot.
|
||||
linkPath := path.Join(mapPath, string(linkName))
|
||||
if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
err = os.Symlink(devicePath, linkPath)
|
||||
return err
|
||||
return wrapped.TearDownAt(dir)
|
||||
}
|
||||
|
||||
// UnmapDevice removes a symbolic link associated to block device under specified map path
|
||||
func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error {
|
||||
if len(mapPath) == 0 {
|
||||
return fmt.Errorf("Failed to unmap device from map path. mapPath is empty")
|
||||
}
|
||||
glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath)
|
||||
glog.V(5).Infof("UnmapDevice: linkName %s", linkName)
|
||||
// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options
|
||||
func MountOptionFromSpec(spec *volume.Spec, options ...string) []string {
|
||||
pv := spec.PersistentVolume
|
||||
|
||||
// Check symbolic link exists
|
||||
linkPath := path.Join(mapPath, string(linkName))
|
||||
if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil {
|
||||
return checkErr
|
||||
} else if !islinkExist {
|
||||
glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath)
|
||||
return nil
|
||||
if pv != nil {
|
||||
// Use beta annotation first
|
||||
if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
|
||||
moList := strings.Split(mo, ",")
|
||||
return JoinMountOptions(moList, options)
|
||||
}
|
||||
|
||||
if len(pv.Spec.MountOptions) > 0 {
|
||||
return JoinMountOptions(pv.Spec.MountOptions, options)
|
||||
}
|
||||
}
|
||||
err := os.Remove(linkPath)
|
||||
return err
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
// RemoveMapPath removes a file or directory on specified map path
|
||||
func (v VolumePathHandler) RemoveMapPath(mapPath string) error {
|
||||
if len(mapPath) == 0 {
|
||||
return fmt.Errorf("Failed to remove map path. mapPath is empty")
|
||||
// JoinMountOptions joins mount options eliminating duplicates
|
||||
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
|
||||
allMountOptions := sets.NewString()
|
||||
|
||||
for _, mountOption := range userOptions {
|
||||
if len(mountOption) > 0 {
|
||||
allMountOptions.Insert(mountOption)
|
||||
}
|
||||
}
|
||||
glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath)
|
||||
err := os.RemoveAll(mapPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
|
||||
for _, mountOption := range systemOptions {
|
||||
allMountOptions.Insert(mountOption)
|
||||
}
|
||||
return allMountOptions.UnsortedList()
|
||||
}
|
||||
|
||||
// ValidateZone returns:
|
||||
// - an error in case zone is an empty string or contains only any combination of spaces and tab characters
|
||||
// - nil otherwise
|
||||
func ValidateZone(zone string) error {
|
||||
if strings.TrimSpace(zone) == "" {
|
||||
return fmt.Errorf("the provided %q zone is not valid, it's an empty string or contains only spaces and tab characters", zone)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsSymlinkExist returns true if specified file exists and the type is symbolik link.
|
||||
// If file doesn't exist, or file exists but not symbolick link, return false with no error.
|
||||
// On other cases, return false with error from Lstat().
|
||||
func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) {
|
||||
fi, err := os.Lstat(mapPath)
|
||||
if err == nil {
|
||||
// If file exits and it's symbolick link, return true and no error
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
return true, nil
|
||||
// AccessModesContains returns whether the requested mode is contained by modes
|
||||
func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
// If file exits but it's not symbolick link, return fale and no error
|
||||
return false, nil
|
||||
}
|
||||
// If file doesn't exist, return false and no error
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
// Return error from Lstat()
|
||||
return false, err
|
||||
return false
|
||||
}
|
||||
|
||||
// GetDeviceSymlinkRefs searches symbolic links under global map path
|
||||
func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) {
|
||||
var refs []string
|
||||
files, err := ioutil.ReadDir(mapPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Directory cannot read %v", err)
|
||||
}
|
||||
for _, file := range files {
|
||||
if file.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
continue
|
||||
// AccessModesContainedInAll returns whether all of the requested modes are contained by modes
|
||||
func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
|
||||
for _, mode := range requestedModes {
|
||||
if !AccessModesContains(indexedModes, mode) {
|
||||
return false
|
||||
}
|
||||
filename := file.Name()
|
||||
filepath, err := os.Readlink(path.Join(mapPath, filename))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetWindowsPath get a windows path
|
||||
func GetWindowsPath(path string) string {
|
||||
windowsPath := strings.Replace(path, "/", "\\", -1)
|
||||
if strings.HasPrefix(windowsPath, "\\") {
|
||||
windowsPath = "c:" + windowsPath
|
||||
}
|
||||
return windowsPath
|
||||
}
|
||||
|
||||
// GetUniquePodName returns a unique identifier to reference a pod by
|
||||
func GetUniquePodName(pod *v1.Pod) types.UniquePodName {
|
||||
return types.UniquePodName(pod.UID)
|
||||
}
|
||||
|
||||
// GetUniqueVolumeName returns a unique name representing the volume/plugin.
|
||||
// Caller should ensure that volumeName is a name/ID uniquely identifying the
|
||||
// actual backing device, directory, path, etc. for a particular volume.
|
||||
// The returned name can be used to uniquely reference the volume, for example,
|
||||
// to prevent operations (attach/detach or mount/unmount) from being triggered
|
||||
// on the same volume.
|
||||
func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName {
|
||||
return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
|
||||
}
|
||||
|
||||
// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name
|
||||
// for a non-attachable volume.
|
||||
func GetUniqueVolumeNameForNonAttachableVolume(
|
||||
podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName {
|
||||
return v1.UniqueVolumeName(
|
||||
fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name()))
|
||||
}
|
||||
|
||||
// GetUniqueVolumeNameFromSpec uses the given VolumePlugin to generate a unique
|
||||
// name representing the volume defined in the specified volume spec.
|
||||
// This returned name can be used to uniquely reference the actual backing
|
||||
// device, directory, path, etc. referenced by the given volumeSpec.
|
||||
// If the given plugin does not support the volume spec, this returns an error.
|
||||
func GetUniqueVolumeNameFromSpec(
|
||||
volumePlugin volume.VolumePlugin,
|
||||
volumeSpec *volume.Spec) (v1.UniqueVolumeName, error) {
|
||||
if volumePlugin == nil {
|
||||
return "", fmt.Errorf(
|
||||
"volumePlugin should not be nil. volumeSpec.Name=%q",
|
||||
volumeSpec.Name())
|
||||
}
|
||||
|
||||
volumeName, err := volumePlugin.GetVolumeName(volumeSpec)
|
||||
if err != nil || volumeName == "" {
|
||||
return "", fmt.Errorf(
|
||||
"failed to GetVolumeName from volumePlugin for volumeSpec %q err=%v",
|
||||
volumeSpec.Name(),
|
||||
err)
|
||||
}
|
||||
|
||||
return GetUniqueVolumeName(
|
||||
volumePlugin.GetPluginName(),
|
||||
volumeName),
|
||||
nil
|
||||
}
|
||||
|
||||
// IsPodTerminated checks if pod is terminated
|
||||
func IsPodTerminated(pod *v1.Pod, podStatus v1.PodStatus) bool {
|
||||
return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.ContainerStatuses))
|
||||
}
|
||||
|
||||
// notRunning returns true if every status is terminated or waiting, or the status list
|
||||
// is empty.
|
||||
func notRunning(statuses []v1.ContainerStatus) bool {
|
||||
for _, status := range statuses {
|
||||
if status.State.Terminated == nil && status.State.Waiting == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow
|
||||
// the fromat plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
|
||||
// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of
|
||||
// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface
|
||||
// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs
|
||||
// the unique volume names.
|
||||
func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) {
|
||||
components := strings.SplitN(string(uniqueName), "/", 3)
|
||||
if len(components) != 3 {
|
||||
return "", "", fmt.Errorf("cannot split volume unique name %s to plugin/volume components", uniqueName)
|
||||
}
|
||||
pluginName := fmt.Sprintf("%s/%s", components[0], components[1])
|
||||
return pluginName, components[2], nil
|
||||
}
|
||||
|
||||
// NewSafeFormatAndMountFromHost creates a new SafeFormatAndMount with Mounter
|
||||
// and Exec taken from given VolumeHost.
|
||||
func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount {
|
||||
mounter := host.GetMounter(pluginName)
|
||||
exec := host.GetExec(pluginName)
|
||||
return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}
|
||||
}
|
||||
|
||||
// GetVolumeMode retrieves VolumeMode from pv.
|
||||
// If the volume doesn't have PersistentVolume, it's an inline volume,
|
||||
// should return volumeMode as filesystem to keep existing behavior.
|
||||
func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) {
|
||||
if volumeSpec == nil || volumeSpec.PersistentVolume == nil {
|
||||
return v1.PersistentVolumeFilesystem, nil
|
||||
}
|
||||
if volumeSpec.PersistentVolume.Spec.VolumeMode != nil {
|
||||
return *volumeSpec.PersistentVolume.Spec.VolumeMode, nil
|
||||
}
|
||||
return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name())
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClaimVolumeMode retrieves VolumeMode from pvc.
|
||||
func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.PersistentVolumeMode, error) {
|
||||
if claim.Spec.VolumeMode != nil {
|
||||
return *claim.Spec.VolumeMode, nil
|
||||
}
|
||||
return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name)
|
||||
}
|
||||
|
||||
// CheckVolumeModeFilesystem checks VolumeMode.
|
||||
// If the mode is Filesystem, return true otherwise return false.
|
||||
func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volumeMode, err := GetVolumeMode(volumeSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err)
|
||||
return true, err
|
||||
}
|
||||
glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath)
|
||||
if filepath == devPath {
|
||||
refs = append(refs, path.Join(mapPath, filename))
|
||||
if volumeMode == v1.PersistentVolumeBlock {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs)
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
|
||||
// corresponding to map path symlink, and then return global map path with pod uuid.
|
||||
// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX
|
||||
// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX
|
||||
func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) {
|
||||
var globalMapPathUUID string
|
||||
// Find symbolic link named pod uuid under plugin dir
|
||||
err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) {
|
||||
glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath)
|
||||
if res, err := compareSymlinks(path, mapPath); err == nil && res {
|
||||
globalMapPathUUID = path
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID)
|
||||
// Return path contains global map path + {pod uuid}
|
||||
return globalMapPathUUID, nil
|
||||
}
|
||||
|
||||
func compareSymlinks(global, pod string) (bool, error) {
|
||||
devGlobal, err := os.Readlink(global)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
devPod, err := os.Readlink(pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod)
|
||||
if devGlobal == devPod {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
@ -1,108 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// AttachFileDevice takes a path to a regular file and makes it available as an
|
||||
// attached block device.
|
||||
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
|
||||
blockDevicePath, err := v.GetLoopDevice(path)
|
||||
if err != nil && err.Error() != ErrDeviceNotFound {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If no existing loop device for the path, create one
|
||||
if blockDevicePath == "" {
|
||||
glog.V(4).Infof("Creating device for path: %s", path)
|
||||
blockDevicePath, err = makeLoopDevice(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return blockDevicePath, nil
|
||||
}
|
||||
|
||||
// GetLoopDevice returns the full path to the loop device associated with the given path.
|
||||
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
|
||||
_, err := os.Stat(path)
|
||||
if os.IsNotExist(err) {
|
||||
return "", errors.New(ErrDeviceNotFound)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("not attachable: %v", err)
|
||||
}
|
||||
|
||||
args := []string{"-j", path}
|
||||
cmd := exec.Command(losetupPath, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out)
|
||||
return "", err
|
||||
}
|
||||
return parseLosetupOutputForDevice(out)
|
||||
}
|
||||
|
||||
func makeLoopDevice(path string) (string, error) {
|
||||
args := []string{"-f", "--show", path}
|
||||
cmd := exec.Command(losetupPath, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out)
|
||||
return "", err
|
||||
}
|
||||
return parseLosetupOutputForDevice(out)
|
||||
}
|
||||
|
||||
// RemoveLoopDevice removes specified loopback device
|
||||
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
|
||||
args := []string{"-d", device}
|
||||
cmd := exec.Command(losetupPath, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
if _, err := os.Stat(device); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
glog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseLosetupOutputForDevice(output []byte) (string, error) {
|
||||
if len(output) == 0 {
|
||||
return "", errors.New(ErrDeviceNotFound)
|
||||
}
|
||||
|
||||
// losetup returns device in the format:
|
||||
// /dev/loop1: [0073]:148662 (/dev/sda)
|
||||
device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0])
|
||||
if len(device) == 0 {
|
||||
return "", errors.New(ErrDeviceNotFound)
|
||||
}
|
||||
return device, nil
|
||||
}
|
@ -22,14 +22,23 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
// util.go uses api.Codecs.LegacyCodec so import this package to do some
|
||||
// resource initialization.
|
||||
"hash/fnv"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
var nodeLabels map[string]string = map[string]string{
|
||||
@ -418,3 +427,664 @@ func TestDoUnmountMountPoint(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalculateTimeoutForVolume(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("500M"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
timeout := CalculateTimeoutForVolume(50, 30, pv)
|
||||
if timeout != 50 {
|
||||
t.Errorf("Expected 50 for timeout but got %v", timeout)
|
||||
}
|
||||
|
||||
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("2Gi")
|
||||
timeout = CalculateTimeoutForVolume(50, 30, pv)
|
||||
if timeout != 60 {
|
||||
t.Errorf("Expected 60 for timeout but got %v", timeout)
|
||||
}
|
||||
|
||||
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("150Gi")
|
||||
timeout = CalculateTimeoutForVolume(50, 30, pv)
|
||||
if timeout != 4500 {
|
||||
t.Errorf("Expected 4500 for timeout but got %v", timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateVolumeName(t *testing.T) {
|
||||
|
||||
// Normal operation, no truncate
|
||||
v1 := GenerateVolumeName("kubernetes", "pv-cinder-abcde", 255)
|
||||
if v1 != "kubernetes-dynamic-pv-cinder-abcde" {
|
||||
t.Errorf("Expected kubernetes-dynamic-pv-cinder-abcde, got %s", v1)
|
||||
}
|
||||
|
||||
// Truncate trailing "6789-dynamic"
|
||||
prefix := strings.Repeat("0123456789", 9) // 90 characters prefix + 8 chars. of "-dynamic"
|
||||
v2 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
|
||||
expect := prefix[:84] + "-pv-cinder-abcde"
|
||||
if v2 != expect {
|
||||
t.Errorf("Expected %s, got %s", expect, v2)
|
||||
}
|
||||
|
||||
// Truncate really long cluster name
|
||||
prefix = strings.Repeat("0123456789", 1000) // 10000 characters prefix
|
||||
v3 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
|
||||
if v3 != expect {
|
||||
t.Errorf("Expected %s, got %s", expect, v3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountOptionFromSpec(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
volume *volume.Spec
|
||||
expectedMountList []string
|
||||
systemOptions []string
|
||||
}{
|
||||
"volume-with-mount-options": {
|
||||
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
|
||||
},
|
||||
}),
|
||||
expectedMountList: []string{"ro", "nfsvers=3"},
|
||||
systemOptions: nil,
|
||||
},
|
||||
"volume-with-bad-mount-options": {
|
||||
volume: createVolumeSpecWithMountOption("good-mount-opts", "", v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
|
||||
},
|
||||
}),
|
||||
expectedMountList: []string{},
|
||||
systemOptions: nil,
|
||||
},
|
||||
"vol-with-sys-opts": {
|
||||
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
|
||||
},
|
||||
}),
|
||||
expectedMountList: []string{"ro", "nfsvers=3", "fsid=100", "hard"},
|
||||
systemOptions: []string{"fsid=100", "hard"},
|
||||
},
|
||||
"vol-with-sys-opts-with-dup": {
|
||||
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
|
||||
},
|
||||
}),
|
||||
expectedMountList: []string{"ro", "nfsvers=3", "fsid=100"},
|
||||
systemOptions: []string{"fsid=100", "ro"},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
mountOptions := MountOptionFromSpec(scenario.volume, scenario.systemOptions...)
|
||||
if !reflect.DeepEqual(slice.SortStrings(mountOptions), slice.SortStrings(scenario.expectedMountList)) {
|
||||
t.Errorf("for %s expected mount options : %v got %v", name, scenario.expectedMountList, mountOptions)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createVolumeSpecWithMountOption(name string, mountOptions string, spec v1.PersistentVolumeSpec) *volume.Spec {
|
||||
annotations := map[string]string{
|
||||
v1.MountOptionAnnotation: mountOptions,
|
||||
}
|
||||
objMeta := metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: annotations,
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: objMeta,
|
||||
Spec: spec,
|
||||
}
|
||||
return &volume.Spec{PersistentVolume: pv}
|
||||
}
|
||||
|
||||
func checkFnv32(t *testing.T, s string, expected int) {
|
||||
h := fnv.New32()
|
||||
h.Write([]byte(s))
|
||||
h.Sum32()
|
||||
|
||||
if int(h.Sum32()) != expected {
|
||||
t.Fatalf("hash of %q was %v, expected %v", s, h.Sum32(), expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChooseZoneForVolume(t *testing.T) {
|
||||
checkFnv32(t, "henley", 1180403676)
|
||||
// 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection
|
||||
|
||||
// A few others
|
||||
checkFnv32(t, "henley-", 2652299129)
|
||||
checkFnv32(t, "henley-a", 1459735322)
|
||||
checkFnv32(t, "", 2166136261)
|
||||
|
||||
tests := []struct {
|
||||
Zones sets.String
|
||||
VolumeName string
|
||||
Expected string
|
||||
}{
|
||||
// Test for PVC names that don't have a dash
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley",
|
||||
Expected: "a", // hash("henley") == 0
|
||||
},
|
||||
// Tests for PVC names that end in - number, but don't look like statefulset PVCs
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-0",
|
||||
Expected: "a", // hash("henley") == 0
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-1",
|
||||
Expected: "b", // hash("henley") + 1 == 1
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-2",
|
||||
Expected: "c", // hash("henley") + 2 == 2
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-3",
|
||||
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-4",
|
||||
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
|
||||
},
|
||||
// Tests for PVC names that are edge cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-",
|
||||
Expected: "c", // hash("henley-") = 2652299129 === 2 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-a",
|
||||
Expected: "c", // hash("henley-a") = 1459735322 === 2 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium--1",
|
||||
Expected: "c", // hash("") + 1 == 2166136261 + 1 === 2 mod 3
|
||||
},
|
||||
// Tests for PVC names for simple StatefulSet cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-1",
|
||||
Expected: "b", // hash("henley") + 1 == 1
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "loud-henley-1",
|
||||
Expected: "b", // hash("henley") + 1 == 1
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "quiet-henley-2",
|
||||
Expected: "c", // hash("henley") + 2 == 2
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-2",
|
||||
Expected: "c", // hash("henley") + 2 == 2
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-3",
|
||||
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-4",
|
||||
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
|
||||
},
|
||||
// Tests for statefulsets (or claims) with dashes in the names
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-alpha-henley-2",
|
||||
Expected: "c", // hash("henley") + 2 == 2
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-beta-henley-3",
|
||||
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-gamma-henley-4",
|
||||
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
|
||||
},
|
||||
// Tests for statefulsets name ending in -
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--2",
|
||||
Expected: "a", // hash("") + 2 == 0 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--3",
|
||||
Expected: "b", // hash("") + 3 == 1 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
Expected: "c", // hash("") + 4 == 2 mod 3
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := ChooseZoneForVolume(test.Zones, test.VolumeName)
|
||||
|
||||
if actual != test.Expected {
|
||||
t.Errorf("Test %v failed, expected zone %q, actual %q", test, test.Expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChooseZonesForVolume(t *testing.T) {
|
||||
checkFnv32(t, "henley", 1180403676)
|
||||
// 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection
|
||||
|
||||
// A few others
|
||||
checkFnv32(t, "henley-", 2652299129)
|
||||
checkFnv32(t, "henley-a", 1459735322)
|
||||
checkFnv32(t, "", 2166136261)
|
||||
|
||||
tests := []struct {
|
||||
Zones sets.String
|
||||
VolumeName string
|
||||
NumZones uint32
|
||||
Expected sets.String
|
||||
}{
|
||||
// Test for PVC names that don't have a dash
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
|
||||
},
|
||||
// Tests for PVC names that end in - number, but don't look like statefulset PVCs
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-0",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-0",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-1",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-3",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-4",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-4",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 */, "a"),
|
||||
},
|
||||
// Tests for PVC names that are edge cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 = 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-a",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-a",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 = 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium--1",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("") + 1 == 2166136261 + 1 === 2 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium--1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("") + 1 + 1(startingIndex) == 2166136261 + 1 + 1 === 3 mod 3 = 0 */, "b"),
|
||||
},
|
||||
// Tests for PVC names for simple StatefulSet cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-1",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
|
||||
},
|
||||
// Tests for PVC names for simple StatefulSet cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "loud-henley-1",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "loud-henley-1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "quiet-henley-2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "quiet-henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-3",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 6 mod 3 = 0 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-4",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-4",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"),
|
||||
},
|
||||
// Tests for statefulsets (or claims) with dashes in the names
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-alpha-henley-2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-alpha-henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-beta-henley-3",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-beta-henley-3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 0 mod 3 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-gamma-henley-4",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-gamma-henley-4",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"),
|
||||
},
|
||||
// Tests for statefulsets name ending in -
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("") + 2 == 0 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("") + 2 + 2(startingIndex) == 2 mod 3 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--3",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("") + 3 == 1 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("") + 3 + 3(startingIndex) == 1 mod 3 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("") + 4 + 4(startingIndex) == 0 mod 3 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */, "a", "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
NumZones: 4,
|
||||
Expected: sets.NewString("c" /* hash("") + 4 + 9(startingIndex) == 2 mod 3 */, "a", "b", "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-0",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") == 0 + 2 */, "d"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("e" /* hash("henley") == 0 + 2 + 2(startingIndex) */, "f"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-0",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b", "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-1",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("d" /* hash("henley") == 0 + 1 + 2(startingIndex) */, "e", "f"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-2",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h", "i"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-3",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 + 3 + 6(startingIndex) */, "b", "c"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := ChooseZonesForVolume(test.Zones, test.VolumeName, test.NumZones)
|
||||
|
||||
if !actual.Equal(test.Expected) {
|
||||
t.Errorf("Test %v failed, expected zone %#v, actual %#v", test, test.Expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateZone(t *testing.T) {
|
||||
functionUnderTest := "ValidateZone"
|
||||
|
||||
// First part: want an error
|
||||
errCases := []string{"", " "}
|
||||
for _, errCase := range errCases {
|
||||
if got := ValidateZone(errCase); got == nil {
|
||||
t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, errCase, got, "an error")
|
||||
}
|
||||
}
|
||||
|
||||
// Second part: want no error
|
||||
succCases := []string{" us-east-1a "}
|
||||
for _, succCase := range succCases {
|
||||
if got := ValidateZone(succCase); got != nil {
|
||||
t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, succCase, got, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWindowsPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expectedPath string
|
||||
}{
|
||||
{
|
||||
path: `/var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4/volumes/kubernetes.io~disk`,
|
||||
expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
|
||||
},
|
||||
{
|
||||
path: `\var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
|
||||
expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
|
||||
},
|
||||
{
|
||||
path: `/`,
|
||||
expectedPath: `c:\`,
|
||||
},
|
||||
{
|
||||
path: ``,
|
||||
expectedPath: ``,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := GetWindowsPath(test.path)
|
||||
if result != test.expectedPath {
|
||||
t.Errorf("GetWindowsPath(%v) returned (%v), want (%v)", test.path, result, test.expectedPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,39 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// AttachFileDevice takes a path to a regular file and makes it available as an
|
||||
// attached block device.
|
||||
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
|
||||
return "", fmt.Errorf("AttachFileDevice not supported for this build.")
|
||||
}
|
||||
|
||||
// GetLoopDevice returns the full path to the loop device associated with the given path.
|
||||
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
|
||||
return "", fmt.Errorf("GetLoopDevice not supported for this build.")
|
||||
}
|
||||
|
||||
// RemoveLoopDevice removes specified loopback device
|
||||
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
|
||||
return fmt.Errorf("RemoveLoopDevice not supported for this build.")
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["volumehelper.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/util/volumehelper",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
@ -1,176 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package volumehelper contains consts and helper methods used by various
|
||||
// volume components (attach/detach controller, kubelet, etc.).
|
||||
package volumehelper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// ControllerManagedAttachAnnotation is the key of the annotation on Node
|
||||
// objects that indicates attach/detach operations for the node should be
|
||||
// managed by the attach/detach controller
|
||||
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
|
||||
|
||||
// KeepTerminatedPodVolumesAnnotation is the key of the annotation on Node
|
||||
// that decides if pod volumes are unmounted when pod is terminated
|
||||
KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes"
|
||||
|
||||
// VolumeGidAnnotationKey is the of the annotation on the PersistentVolume
|
||||
// object that specifies a supplemental GID.
|
||||
VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
|
||||
|
||||
// VolumeDynamicallyCreatedByKey is the key of the annotation on PersistentVolume
|
||||
// object created dynamically
|
||||
VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby"
|
||||
)
|
||||
|
||||
// GetUniquePodName returns a unique identifier to reference a pod by
|
||||
func GetUniquePodName(pod *v1.Pod) types.UniquePodName {
|
||||
return types.UniquePodName(pod.UID)
|
||||
}
|
||||
|
||||
// GetUniqueVolumeName returns a unique name representing the volume/plugin.
|
||||
// Caller should ensure that volumeName is a name/ID uniquely identifying the
|
||||
// actual backing device, directory, path, etc. for a particular volume.
|
||||
// The returned name can be used to uniquely reference the volume, for example,
|
||||
// to prevent operations (attach/detach or mount/unmount) from being triggered
|
||||
// on the same volume.
|
||||
func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName {
|
||||
return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
|
||||
}
|
||||
|
||||
// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name
|
||||
// for a non-attachable volume.
|
||||
func GetUniqueVolumeNameForNonAttachableVolume(
|
||||
podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName {
|
||||
return v1.UniqueVolumeName(
|
||||
fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name()))
|
||||
}
|
||||
|
||||
// GetUniqueVolumeNameFromSpec uses the given VolumePlugin to generate a unique
|
||||
// name representing the volume defined in the specified volume spec.
|
||||
// This returned name can be used to uniquely reference the actual backing
|
||||
// device, directory, path, etc. referenced by the given volumeSpec.
|
||||
// If the given plugin does not support the volume spec, this returns an error.
|
||||
func GetUniqueVolumeNameFromSpec(
|
||||
volumePlugin volume.VolumePlugin,
|
||||
volumeSpec *volume.Spec) (v1.UniqueVolumeName, error) {
|
||||
if volumePlugin == nil {
|
||||
return "", fmt.Errorf(
|
||||
"volumePlugin should not be nil. volumeSpec.Name=%q",
|
||||
volumeSpec.Name())
|
||||
}
|
||||
|
||||
volumeName, err := volumePlugin.GetVolumeName(volumeSpec)
|
||||
if err != nil || volumeName == "" {
|
||||
return "", fmt.Errorf(
|
||||
"failed to GetVolumeName from volumePlugin for volumeSpec %q err=%v",
|
||||
volumeSpec.Name(),
|
||||
err)
|
||||
}
|
||||
|
||||
return GetUniqueVolumeName(
|
||||
volumePlugin.GetPluginName(),
|
||||
volumeName),
|
||||
nil
|
||||
}
|
||||
|
||||
// IsPodTerminated checks if pod is terminated
|
||||
func IsPodTerminated(pod *v1.Pod, podStatus v1.PodStatus) bool {
|
||||
return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.ContainerStatuses))
|
||||
}
|
||||
|
||||
// notRunning returns true if every status is terminated or waiting, or the status list
|
||||
// is empty.
|
||||
func notRunning(statuses []v1.ContainerStatus) bool {
|
||||
for _, status := range statuses {
|
||||
if status.State.Terminated == nil && status.State.Waiting == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow
|
||||
// the fromat plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
|
||||
// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of
|
||||
// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface
|
||||
// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs
|
||||
// the unique volume names.
|
||||
func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) {
|
||||
components := strings.SplitN(string(uniqueName), "/", 3)
|
||||
if len(components) != 3 {
|
||||
return "", "", fmt.Errorf("cannot split volume unique name %s to plugin/volume components", uniqueName)
|
||||
}
|
||||
pluginName := fmt.Sprintf("%s/%s", components[0], components[1])
|
||||
return pluginName, components[2], nil
|
||||
}
|
||||
|
||||
// NewSafeFormatAndMountFromHost creates a new SafeFormatAndMount with Mounter
|
||||
// and Exec taken from given VolumeHost.
|
||||
func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount {
|
||||
mounter := host.GetMounter(pluginName)
|
||||
exec := host.GetExec(pluginName)
|
||||
return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}
|
||||
}
|
||||
|
||||
// GetVolumeMode retrieves VolumeMode from pv.
|
||||
// If the volume doesn't have PersistentVolume, it's an inline volume,
|
||||
// should return volumeMode as filesystem to keep existing behavior.
|
||||
func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) {
|
||||
if volumeSpec == nil || volumeSpec.PersistentVolume == nil {
|
||||
return v1.PersistentVolumeFilesystem, nil
|
||||
}
|
||||
if volumeSpec.PersistentVolume.Spec.VolumeMode != nil {
|
||||
return *volumeSpec.PersistentVolume.Spec.VolumeMode, nil
|
||||
}
|
||||
return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name())
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClaimVolumeMode retrieves VolumeMode from pvc.
|
||||
func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.PersistentVolumeMode, error) {
|
||||
if claim.Spec.VolumeMode != nil {
|
||||
return *claim.Spec.VolumeMode, nil
|
||||
}
|
||||
return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name)
|
||||
}
|
||||
|
||||
// CheckVolumeModeFilesystem checks VolumeMode.
|
||||
// If the mode is Filesystem, return true otherwise return false.
|
||||
func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
volumeMode, err := GetVolumeMode(volumeSpec)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if volumeMode == v1.PersistentVolumeBlock {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
@ -61,7 +61,7 @@ func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
|
||||
cmd := exec.Command(losetupPath, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed device discover command for path %s: %v", path, err)
|
||||
glog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out)
|
||||
return "", err
|
||||
}
|
||||
return parseLosetupOutputForDevice(out)
|
||||
@ -72,7 +72,7 @@ func makeLoopDevice(path string) (string, error) {
|
||||
cmd := exec.Command(losetupPath, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed device create command for path %s: %v", path, err)
|
||||
glog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out)
|
||||
return "", err
|
||||
}
|
||||
return parseLosetupOutputForDevice(out)
|
||||
|
@ -1,902 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
)
|
||||
|
||||
type testcase struct {
|
||||
// Input of the test
|
||||
name string
|
||||
existingPod *v1.Pod
|
||||
createPod *v1.Pod
|
||||
// eventSequence is list of events that are simulated during recycling. It
|
||||
// can be either event generated by a recycler pod or a state change of
|
||||
// the pod. (see newPodEvent and newEvent below).
|
||||
eventSequence []watch.Event
|
||||
|
||||
// Expected output.
|
||||
// expectedEvents is list of events that were sent to the volume that was
|
||||
// recycled.
|
||||
expectedEvents []mockEvent
|
||||
expectedError string
|
||||
}
|
||||
|
||||
func newPodEvent(eventtype watch.EventType, name string, phase v1.PodPhase, message string) watch.Event {
|
||||
return watch.Event{
|
||||
Type: eventtype,
|
||||
Object: newPod(name, phase, message),
|
||||
}
|
||||
}
|
||||
|
||||
func newEvent(eventtype, message string) watch.Event {
|
||||
return watch.Event{
|
||||
Type: watch.Added,
|
||||
Object: &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Reason: "MockEvent",
|
||||
Message: message,
|
||||
Type: eventtype,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(name string, phase v1.PodPhase, message string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Name: name,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: phase,
|
||||
Message: message,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecyclerPod(t *testing.T) {
|
||||
tests := []testcase{
|
||||
{
|
||||
// Test recycler success with some events
|
||||
name: "RecyclerSuccess",
|
||||
createPod: newPod("podRecyclerSuccess", v1.PodPending, ""),
|
||||
eventSequence: []watch.Event{
|
||||
// Pod gets Running and Succeeded
|
||||
newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"),
|
||||
newEvent(v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""),
|
||||
newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"),
|
||||
newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"),
|
||||
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""),
|
||||
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodSucceeded, ""),
|
||||
},
|
||||
expectedEvents: []mockEvent{
|
||||
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"},
|
||||
{v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""},
|
||||
{v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""},
|
||||
{v1.EventTypeNormal, "Created container with docker id 83d929aeac82"},
|
||||
{v1.EventTypeNormal, "Started container with docker id 83d929aeac82"},
|
||||
},
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
// Test recycler failure with some events
|
||||
name: "RecyclerFailure",
|
||||
createPod: newPod("podRecyclerFailure", v1.PodPending, ""),
|
||||
eventSequence: []watch.Event{
|
||||
// Pod gets Running and Succeeded
|
||||
newPodEvent(watch.Added, "podRecyclerFailure", v1.PodPending, ""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"),
|
||||
newEvent(v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"),
|
||||
newEvent(v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"),
|
||||
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodRunning, ""),
|
||||
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodFailed, "Pod was active on the node longer than specified deadline"),
|
||||
},
|
||||
expectedEvents: []mockEvent{
|
||||
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"},
|
||||
{v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"},
|
||||
{v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"},
|
||||
},
|
||||
expectedError: "failed to recycle volume: Pod was active on the node longer than specified deadline",
|
||||
},
|
||||
{
|
||||
// Recycler pod gets deleted
|
||||
name: "RecyclerDeleted",
|
||||
createPod: newPod("podRecyclerDeleted", v1.PodPending, ""),
|
||||
eventSequence: []watch.Event{
|
||||
// Pod gets Running and Succeeded
|
||||
newPodEvent(watch.Added, "podRecyclerDeleted", v1.PodPending, ""),
|
||||
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"),
|
||||
newPodEvent(watch.Deleted, "podRecyclerDeleted", v1.PodPending, ""),
|
||||
},
|
||||
expectedEvents: []mockEvent{
|
||||
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"},
|
||||
},
|
||||
expectedError: "failed to recycle volume: recycler pod was deleted",
|
||||
},
|
||||
{
|
||||
// Another recycler pod is already running
|
||||
name: "RecyclerRunning",
|
||||
existingPod: newPod("podOldRecycler", v1.PodRunning, ""),
|
||||
createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"),
|
||||
eventSequence: []watch.Event{},
|
||||
expectedError: "old recycler pod found, will retry later",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Logf("Test %q", test.name)
|
||||
client := &mockRecyclerClient{
|
||||
events: test.eventSequence,
|
||||
pod: test.existingPod,
|
||||
}
|
||||
err := internalRecycleVolumeByWatchingPodUntilCompletion(test.createPod.Name, test.createPod, client)
|
||||
receivedError := ""
|
||||
if err != nil {
|
||||
receivedError = err.Error()
|
||||
}
|
||||
if receivedError != test.expectedError {
|
||||
t.Errorf("Test %q failed, expected error %q, got %q", test.name, test.expectedError, receivedError)
|
||||
continue
|
||||
}
|
||||
if !client.deletedCalled {
|
||||
t.Errorf("Test %q failed, expected deferred client.Delete to be called on recycler pod", test.name)
|
||||
continue
|
||||
}
|
||||
for i, expectedEvent := range test.expectedEvents {
|
||||
if len(client.receivedEvents) <= i {
|
||||
t.Errorf("Test %q failed, expected event %d: %q not received", test.name, i, expectedEvent.message)
|
||||
continue
|
||||
}
|
||||
receivedEvent := client.receivedEvents[i]
|
||||
if expectedEvent.eventtype != receivedEvent.eventtype {
|
||||
t.Errorf("Test %q failed, event %d does not match: expected eventtype %q, got %q", test.name, i, expectedEvent.eventtype, receivedEvent.eventtype)
|
||||
}
|
||||
if expectedEvent.message != receivedEvent.message {
|
||||
t.Errorf("Test %q failed, event %d does not match: expected message %q, got %q", test.name, i, expectedEvent.message, receivedEvent.message)
|
||||
}
|
||||
}
|
||||
for i := len(test.expectedEvents); i < len(client.receivedEvents); i++ {
|
||||
t.Errorf("Test %q failed, unexpected event received: %s, %q", test.name, client.receivedEvents[i].eventtype, client.receivedEvents[i].message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mockRecyclerClient struct {
|
||||
pod *v1.Pod
|
||||
deletedCalled bool
|
||||
receivedEvents []mockEvent
|
||||
events []watch.Event
|
||||
}
|
||||
|
||||
type mockEvent struct {
|
||||
eventtype, message string
|
||||
}
|
||||
|
||||
func (c *mockRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
|
||||
if c.pod == nil {
|
||||
c.pod = pod
|
||||
return c.pod, nil
|
||||
}
|
||||
// Simulate "already exists" error
|
||||
return nil, errors.NewAlreadyExists(api.Resource("pods"), pod.Name)
|
||||
}
|
||||
|
||||
func (c *mockRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
|
||||
if c.pod != nil {
|
||||
return c.pod, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("pod does not exist")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *mockRecyclerClient) DeletePod(name, namespace string) error {
|
||||
c.deletedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
|
||||
eventCh := make(chan watch.Event, 0)
|
||||
go func() {
|
||||
for _, e := range c.events {
|
||||
eventCh <- e
|
||||
}
|
||||
}()
|
||||
return eventCh, nil
|
||||
}
|
||||
|
||||
func (c *mockRecyclerClient) Event(eventtype, message string) {
|
||||
c.receivedEvents = append(c.receivedEvents, mockEvent{eventtype, message})
|
||||
}
|
||||
|
||||
func TestCalculateTimeoutForVolume(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("500M"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
timeout := CalculateTimeoutForVolume(50, 30, pv)
|
||||
if timeout != 50 {
|
||||
t.Errorf("Expected 50 for timeout but got %v", timeout)
|
||||
}
|
||||
|
||||
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("2Gi")
|
||||
timeout = CalculateTimeoutForVolume(50, 30, pv)
|
||||
if timeout != 60 {
|
||||
t.Errorf("Expected 60 for timeout but got %v", timeout)
|
||||
}
|
||||
|
||||
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("150Gi")
|
||||
timeout = CalculateTimeoutForVolume(50, 30, pv)
|
||||
if timeout != 4500 {
|
||||
t.Errorf("Expected 4500 for timeout but got %v", timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateVolumeName(t *testing.T) {
|
||||
|
||||
// Normal operation, no truncate
|
||||
v1 := GenerateVolumeName("kubernetes", "pv-cinder-abcde", 255)
|
||||
if v1 != "kubernetes-dynamic-pv-cinder-abcde" {
|
||||
t.Errorf("Expected kubernetes-dynamic-pv-cinder-abcde, got %s", v1)
|
||||
}
|
||||
|
||||
// Truncate trailing "6789-dynamic"
|
||||
prefix := strings.Repeat("0123456789", 9) // 90 characters prefix + 8 chars. of "-dynamic"
|
||||
v2 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
|
||||
expect := prefix[:84] + "-pv-cinder-abcde"
|
||||
if v2 != expect {
|
||||
t.Errorf("Expected %s, got %s", expect, v2)
|
||||
}
|
||||
|
||||
// Truncate really long cluster name
|
||||
prefix = strings.Repeat("0123456789", 1000) // 10000 characters prefix
|
||||
v3 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
|
||||
if v3 != expect {
|
||||
t.Errorf("Expected %s, got %s", expect, v3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountOptionFromSpec(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
volume *Spec
|
||||
expectedMountList []string
|
||||
systemOptions []string
|
||||
}{
|
||||
"volume-with-mount-options": {
|
||||
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
|
||||
},
|
||||
}),
|
||||
expectedMountList: []string{"ro", "nfsvers=3"},
|
||||
systemOptions: nil,
|
||||
},
|
||||
"volume-with-bad-mount-options": {
|
||||
volume: createVolumeSpecWithMountOption("good-mount-opts", "", v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
|
||||
},
|
||||
}),
|
||||
expectedMountList: []string{},
|
||||
systemOptions: nil,
|
||||
},
|
||||
"vol-with-sys-opts": {
|
||||
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
|
||||
},
|
||||
}),
|
||||
expectedMountList: []string{"ro", "nfsvers=3", "fsid=100", "hard"},
|
||||
systemOptions: []string{"fsid=100", "hard"},
|
||||
},
|
||||
"vol-with-sys-opts-with-dup": {
|
||||
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
|
||||
},
|
||||
}),
|
||||
expectedMountList: []string{"ro", "nfsvers=3", "fsid=100"},
|
||||
systemOptions: []string{"fsid=100", "ro"},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
mountOptions := MountOptionFromSpec(scenario.volume, scenario.systemOptions...)
|
||||
if !reflect.DeepEqual(slice.SortStrings(mountOptions), slice.SortStrings(scenario.expectedMountList)) {
|
||||
t.Errorf("for %s expected mount options : %v got %v", name, scenario.expectedMountList, mountOptions)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createVolumeSpecWithMountOption(name string, mountOptions string, spec v1.PersistentVolumeSpec) *Spec {
|
||||
annotations := map[string]string{
|
||||
v1.MountOptionAnnotation: mountOptions,
|
||||
}
|
||||
objMeta := metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: annotations,
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: objMeta,
|
||||
Spec: spec,
|
||||
}
|
||||
return &Spec{PersistentVolume: pv}
|
||||
}
|
||||
|
||||
func checkFnv32(t *testing.T, s string, expected int) {
|
||||
h := fnv.New32()
|
||||
h.Write([]byte(s))
|
||||
h.Sum32()
|
||||
|
||||
if int(h.Sum32()) != expected {
|
||||
t.Fatalf("hash of %q was %v, expected %v", s, h.Sum32(), expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChooseZoneForVolume(t *testing.T) {
|
||||
checkFnv32(t, "henley", 1180403676)
|
||||
// 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection
|
||||
|
||||
// A few others
|
||||
checkFnv32(t, "henley-", 2652299129)
|
||||
checkFnv32(t, "henley-a", 1459735322)
|
||||
checkFnv32(t, "", 2166136261)
|
||||
|
||||
tests := []struct {
|
||||
Zones sets.String
|
||||
VolumeName string
|
||||
Expected string
|
||||
}{
|
||||
// Test for PVC names that don't have a dash
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley",
|
||||
Expected: "a", // hash("henley") == 0
|
||||
},
|
||||
// Tests for PVC names that end in - number, but don't look like statefulset PVCs
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-0",
|
||||
Expected: "a", // hash("henley") == 0
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-1",
|
||||
Expected: "b", // hash("henley") + 1 == 1
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-2",
|
||||
Expected: "c", // hash("henley") + 2 == 2
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-3",
|
||||
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-4",
|
||||
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
|
||||
},
|
||||
// Tests for PVC names that are edge cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-",
|
||||
Expected: "c", // hash("henley-") = 2652299129 === 2 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-a",
|
||||
Expected: "c", // hash("henley-a") = 1459735322 === 2 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium--1",
|
||||
Expected: "c", // hash("") + 1 == 2166136261 + 1 === 2 mod 3
|
||||
},
|
||||
// Tests for PVC names for simple StatefulSet cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-1",
|
||||
Expected: "b", // hash("henley") + 1 == 1
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "loud-henley-1",
|
||||
Expected: "b", // hash("henley") + 1 == 1
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "quiet-henley-2",
|
||||
Expected: "c", // hash("henley") + 2 == 2
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-2",
|
||||
Expected: "c", // hash("henley") + 2 == 2
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-3",
|
||||
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-4",
|
||||
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
|
||||
},
|
||||
// Tests for statefulsets (or claims) with dashes in the names
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-alpha-henley-2",
|
||||
Expected: "c", // hash("henley") + 2 == 2
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-beta-henley-3",
|
||||
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-gamma-henley-4",
|
||||
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
|
||||
},
|
||||
// Tests for statefulsets name ending in -
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--2",
|
||||
Expected: "a", // hash("") + 2 == 0 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--3",
|
||||
Expected: "b", // hash("") + 3 == 1 mod 3
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
Expected: "c", // hash("") + 4 == 2 mod 3
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := ChooseZoneForVolume(test.Zones, test.VolumeName)
|
||||
|
||||
if actual != test.Expected {
|
||||
t.Errorf("Test %v failed, expected zone %q, actual %q", test, test.Expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChooseZonesForVolume(t *testing.T) {
|
||||
checkFnv32(t, "henley", 1180403676)
|
||||
// 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection
|
||||
|
||||
// A few others
|
||||
checkFnv32(t, "henley-", 2652299129)
|
||||
checkFnv32(t, "henley-a", 1459735322)
|
||||
checkFnv32(t, "", 2166136261)
|
||||
|
||||
tests := []struct {
|
||||
Zones sets.String
|
||||
VolumeName string
|
||||
NumZones uint32
|
||||
Expected sets.String
|
||||
}{
|
||||
// Test for PVC names that don't have a dash
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
|
||||
},
|
||||
// Tests for PVC names that end in - number, but don't look like statefulset PVCs
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-0",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-0",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-1",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-3",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-4",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-4",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 */, "a"),
|
||||
},
|
||||
// Tests for PVC names that are edge cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 = 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-a",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "henley-a",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 = 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium--1",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("") + 1 == 2166136261 + 1 === 2 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium--1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("") + 1 + 1(startingIndex) == 2166136261 + 1 + 1 === 3 mod 3 = 0 */, "b"),
|
||||
},
|
||||
// Tests for PVC names for simple StatefulSet cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-1",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
|
||||
},
|
||||
// Tests for PVC names for simple StatefulSet cases
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "loud-henley-1",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "loud-henley-1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "quiet-henley-2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "quiet-henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-3",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 6 mod 3 = 0 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-4",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley-4",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"),
|
||||
},
|
||||
// Tests for statefulsets (or claims) with dashes in the names
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-alpha-henley-2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-alpha-henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-beta-henley-3",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-beta-henley-3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 0 mod 3 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-gamma-henley-4",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-gamma-henley-4",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"),
|
||||
},
|
||||
// Tests for statefulsets name ending in -
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--2",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("a" /* hash("") + 2 == 0 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("") + 2 + 2(startingIndex) == 2 mod 3 */, "a"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--3",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("b" /* hash("") + 3 == 1 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("b" /* hash("") + 3 + 3(startingIndex) == 1 mod 3 */, "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
NumZones: 1,
|
||||
Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("") + 4 + 4(startingIndex) == 0 mod 3 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */, "a", "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c"),
|
||||
VolumeName: "medium-henley--4",
|
||||
NumZones: 4,
|
||||
Expected: sets.NewString("c" /* hash("") + 4 + 9(startingIndex) == 2 mod 3 */, "a", "b", "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-0",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-1",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("c" /* hash("henley") == 0 + 2 */, "d"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-2",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("e" /* hash("henley") == 0 + 2 + 2(startingIndex) */, "f"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-3",
|
||||
NumZones: 2,
|
||||
Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-0",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b", "c"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-1",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("d" /* hash("henley") == 0 + 1 + 2(startingIndex) */, "e", "f"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-2",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h", "i"),
|
||||
},
|
||||
{
|
||||
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
|
||||
VolumeName: "henley-3",
|
||||
NumZones: 3,
|
||||
Expected: sets.NewString("a" /* hash("henley") == 0 + 3 + 6(startingIndex) */, "b", "c"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := ChooseZonesForVolume(test.Zones, test.VolumeName, test.NumZones)
|
||||
|
||||
if !actual.Equal(test.Expected) {
|
||||
t.Errorf("Test %v failed, expected zone %#v, actual %#v", test, test.Expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateZone(t *testing.T) {
|
||||
functionUnderTest := "ValidateZone"
|
||||
|
||||
// First part: want an error
|
||||
errCases := []string{"", " "}
|
||||
for _, errCase := range errCases {
|
||||
if got := ValidateZone(errCase); got == nil {
|
||||
t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, errCase, got, "an error")
|
||||
}
|
||||
}
|
||||
|
||||
// Second part: want no error
|
||||
succCases := []string{" us-east-1a "}
|
||||
for _, succCase := range succCases {
|
||||
if got := ValidateZone(succCase); got != nil {
|
||||
t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, succCase, got, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWindowsPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expectedPath string
|
||||
}{
|
||||
{
|
||||
path: `/var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4/volumes/kubernetes.io~disk`,
|
||||
expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
|
||||
},
|
||||
{
|
||||
path: `\var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
|
||||
expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
|
||||
},
|
||||
{
|
||||
path: `/`,
|
||||
expectedPath: `c:\`,
|
||||
},
|
||||
{
|
||||
path: ``,
|
||||
expectedPath: ``,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := GetWindowsPath(test.path)
|
||||
if result != test.expectedPath {
|
||||
t.Errorf("GetWindowsPath(%v) returned (%v), want (%v)", test.path, result, test.expectedPath)
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user