Merge pull request #25457 from saad-ali/expectedStateOfWorldDataStructure

Automatic merge from submit-queue

Attach Detach Controller Business Logic

This PR adds the meat of the attach/detach controller proposed in #20262.

The PR splits the in-memory cache into a desired and actual state of the world.
This commit is contained in:
k8s-merge-robot 2016-05-26 00:41:54 -07:00
commit bda0dc88aa
26 changed files with 3905 additions and 1074 deletions

View File

@ -197,9 +197,13 @@ func Run(s *options.CMServer) error {
func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *restclient.Config, stop <-chan struct{}) error {
podInformer := informers.CreateSharedPodIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pod-informer")), ResyncPeriod(s)())
nodeInformer := informers.CreateSharedNodeIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-informer")), ResyncPeriod(s)())
pvcInformer := informers.CreateSharedPVCIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pvc-informer")), ResyncPeriod(s)())
pvInformer := informers.CreateSharedPVIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pv-informer")), ResyncPeriod(s)())
informers := map[reflect.Type]framework.SharedIndexInformer{}
informers[reflect.TypeOf(&api.Pod{})] = podInformer
informers[reflect.TypeOf(&api.Node{})] = nodeInformer
informers[reflect.TypeOf(&api.PersistentVolumeClaim{})] = pvcInformer
informers[reflect.TypeOf(&api.PersistentVolume{})] = pvInformer
go endpointcontroller.NewEndpointController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))).
Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop)
@ -391,9 +395,21 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
volumeController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
go volume.NewAttachDetachController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")), podInformer, nodeInformer, ResyncPeriod(s)()).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
attachDetachController, attachDetachControllerErr :=
volume.NewAttachDetachController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")),
podInformer,
nodeInformer,
pvcInformer,
pvInformer,
cloud,
ProbeAttachableVolumePlugins(s.VolumeConfiguration))
if attachDetachControllerErr != nil {
glog.Fatalf("Failed to start attach/detach controller: %v", attachDetachControllerErr)
} else {
go attachDetachController.Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
var rootCA []byte

View File

@ -81,6 +81,7 @@ func NewCMServer() *CMServer {
MinimumTimeoutHostPath: 60,
IncrementTimeoutHostPath: 30,
},
FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
},
ContentType: "application/vnd.kubernetes.protobuf",
KubeAPIQPS: 20.0,
@ -122,6 +123,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.BoolVar(&s.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
fs.StringVar(&s.VolumeConfiguration.FlexVolumePluginDir, "flex-volume-plugin-dir", s.VolumeConfiguration.FlexVolumePluginDir, "Full path of the directory in which the flex volume plugin should search for additional third party volume plugins.")
fs.Int32Var(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.")
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.")
fs.DurationVar(&s.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.")

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/aws_ebs"
"k8s.io/kubernetes/pkg/volume/cinder"
"k8s.io/kubernetes/pkg/volume/flexvolume"
"k8s.io/kubernetes/pkg/volume/gce_pd"
"k8s.io/kubernetes/pkg/volume/host_path"
"k8s.io/kubernetes/pkg/volume/nfs"
@ -45,6 +46,22 @@ import (
"github.com/golang/glog"
)
// ProbeAttachableVolumePlugins collects all volume plugins for the attach/
// detach controller. VolumeConfiguration is used ot get FlexVolumePluginDir
// which specifies the directory to search for additional third party volume
// plugins.
// The list of plugins is manually compiled. This code and the plugin
// initialization code for kubelet really, really need a through refactor.
func ProbeAttachableVolumePlugins(config componentconfig.VolumeConfiguration) []volume.VolumePlugin {
allPlugins := []volume.VolumePlugin{}
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, flexvolume.ProbeVolumePlugins(config.FlexVolumePluginDir)...)
return allPlugins
}
// ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list.
func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []volume.VolumePlugin {
allPlugins := []volume.VolumePlugin{}

View File

@ -73,6 +73,7 @@ kube-controller-manager
--deleting-pods-qps=0.1: Number of nodes per second on which pods are deleted in case of node failure.
--deployment-controller-sync-period=30s: Period for syncing the deployments.
--enable-hostpath-provisioner[=false]: Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.
--flex-volume-plugin-dir="/usr/libexec/kubernetes/kubelet-plugins/volume/exec/": Full path of the directory in which the flex volume plugin should search for additional third party volume plugins.
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
--horizontal-pod-autoscaler-sync-period=30s: The period for syncing the number of pods in horizontal pod autoscaler.
--kube-api-burst=30: Burst to use while talking with kubernetes apiserver

View File

@ -146,6 +146,7 @@ federated-api-qps
file-check-frequency
file-suffix
file_content_in_loop
flex-volume-plugin-dir
forward-services
framework-name
framework-store-uri

View File

@ -359,5 +359,6 @@ func DeepCopy_componentconfig_VolumeConfiguration(in VolumeConfiguration, out *V
if err := DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in.PersistentVolumeRecyclerConfiguration, &out.PersistentVolumeRecyclerConfiguration, c); err != nil {
return err
}
out.FlexVolumePluginDir = in.FlexVolumePluginDir
return nil
}

View File

@ -8709,14 +8709,14 @@ func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [2]bool
var yyq2 [3]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(2)
r.EncodeArrayStart(3)
} else {
yynn2 = 2
yynn2 = 3
for _, b := range yyq2 {
if b {
yynn2++
@ -8755,6 +8755,25 @@ func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
yy9 := &x.PersistentVolumeRecyclerConfiguration
yy9.CodecEncodeSelf(e)
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
yym12 := z.EncBinary()
_ = yym12
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir))
}
} else {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("flexVolumePluginDir"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym13 := z.EncBinary()
_ = yym13
if false {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir))
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
@ -8829,6 +8848,12 @@ func (x *VolumeConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder
yyv5 := &x.PersistentVolumeRecyclerConfiguration
yyv5.CodecDecodeSelf(d)
}
case "flexVolumePluginDir":
if r.TryDecodeAsNil() {
x.FlexVolumePluginDir = ""
} else {
x.FlexVolumePluginDir = string(r.DecodeString())
}
default:
z.DecStructFieldNotFound(-1, yys3)
} // end switch yys3
@ -8840,16 +8865,16 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj6 int
var yyb6 bool
var yyhl6 bool = l >= 0
yyj6++
if yyhl6 {
yyb6 = yyj6 > l
var yyj7 int
var yyb7 bool
var yyhl7 bool = l >= 0
yyj7++
if yyhl7 {
yyb7 = yyj7 > l
} else {
yyb6 = r.CheckBreak()
yyb7 = r.CheckBreak()
}
if yyb6 {
if yyb7 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -8859,13 +8884,13 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod
} else {
x.EnableHostPathProvisioning = bool(r.DecodeBool())
}
yyj6++
if yyhl6 {
yyb6 = yyj6 > l
yyj7++
if yyhl7 {
yyb7 = yyj7 > l
} else {
yyb6 = r.CheckBreak()
yyb7 = r.CheckBreak()
}
if yyb6 {
if yyb7 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -8873,21 +8898,37 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod
if r.TryDecodeAsNil() {
x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{}
} else {
yyv8 := &x.PersistentVolumeRecyclerConfiguration
yyv8.CodecDecodeSelf(d)
yyv9 := &x.PersistentVolumeRecyclerConfiguration
yyv9.CodecDecodeSelf(d)
}
yyj7++
if yyhl7 {
yyb7 = yyj7 > l
} else {
yyb7 = r.CheckBreak()
}
if yyb7 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
x.FlexVolumePluginDir = ""
} else {
x.FlexVolumePluginDir = string(r.DecodeString())
}
for {
yyj6++
if yyhl6 {
yyb6 = yyj6 > l
yyj7++
if yyhl7 {
yyb7 = yyj7 > l
} else {
yyb6 = r.CheckBreak()
yyb7 = r.CheckBreak()
}
if yyb6 {
if yyb7 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj6-1, "")
z.DecStructFieldNotFound(yyj7-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}

View File

@ -564,6 +564,9 @@ type VolumeConfiguration struct {
EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"`
// persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins.
PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"`
// volumePluginDir is the full path of the directory in which the flex
// volume plugin should search for additional third party volume plugins
FlexVolumePluginDir string `json:"flexVolumePluginDir"`
}
type PersistentVolumeRecyclerConfiguration struct {

View File

@ -82,3 +82,39 @@ func CreateSharedNodeIndexInformer(client clientset.Interface, resyncPeriod time
return sharedIndexInformer
}
// CreateSharedPVCIndexInformer returns a SharedIndexInformer that lists and watches all PVCs
func CreateSharedPVCIndexInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
},
},
&api.PersistentVolumeClaim{},
resyncPeriod,
cache.Indexers{})
return sharedIndexInformer
}
// CreateSharedPVIndexInformer returns a SharedIndexInformer that lists and watches all PVs
func CreateSharedPVIndexInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().PersistentVolumes().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.Core().PersistentVolumes().Watch(options)
},
},
&api.PersistentVolume{},
resyncPeriod,
cache.Indexers{})
return sharedIndexInformer
}

View File

@ -19,13 +19,45 @@ limitations under the License.
package volume
import (
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/volume/attacherdetacher"
"k8s.io/kubernetes/pkg/controller/volume/cache"
"k8s.io/kubernetes/pkg/controller/volume/reconciler"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/volume"
)
const (
// ControllerManagedAnnotation is the key of the annotation on Node objects
// that indicates attach/detach operations for the node should be managed
// by the attach/detach controller
ControllerManagedAnnotation string = "volumes.kubernetes.io/controller-managed-attach"
// SafeToDetachAnnotation is the annotation added to the Node object by
// kubelet in the format "volumes.kubernetes.io/safetodetach/{volumename}"
// to indicate the volume has been unmounted and is safe to detach.
SafeToDetachAnnotation string = "volumes.kubernetes.io/safetodetach-"
// loopPeriod is the ammount of time the reconciler loop waits between
// successive executions
reconcilerLoopPeriod time.Duration = 100 * time.Millisecond
// reconcilerMaxSafeToDetachDuration is the maximum amount of time the
// attach detach controller will wait for a volume to be safely detached
// from its node. Once this time has expired, the controller will assume the
// node or kubelet are unresponsive and will detach the volume anyway.
reconcilerMaxSafeToDetachDuration time.Duration = 10 * time.Minute
)
// AttachDetachController defines the operations supported by this controller.
@ -33,49 +65,34 @@ type AttachDetachController interface {
Run(stopCh <-chan struct{})
}
type attachDetachController struct {
// internalPodInformer is the shared pod informer used to fetch and store
// pod objects from the API server. It is shared with other controllers and
// therefore the pod objects in its store should be treated as immutable.
internalPodInformer framework.SharedInformer
// selfCreatedPodInformer is true if the internalPodInformer was created
// during initialization, not passed in.
selfCreatedPodInformer bool
// internalNodeInformer is the shared node informer used to fetch and store
// node objects from the API server. It is shared with other controllers
// and therefore the node objects in its store should be treated as
// immutable.
internalNodeInformer framework.SharedInformer
// selfCreatedNodeInformer is true if the internalNodeInformer was created
// during initialization, not passed in.
selfCreatedNodeInformer bool
}
// NewAttachDetachController returns a new instance of AttachDetachController.
func NewAttachDetachController(
kubeClient internalclientset.Interface,
podInformer framework.SharedInformer,
nodeInformer framework.SharedInformer,
resyncPeriod time.Duration) AttachDetachController {
selfCreatedPodInformer := false
selfCreatedNodeInformer := false
if podInformer == nil {
podInformer = informers.CreateSharedPodInformer(kubeClient, resyncPeriod)
selfCreatedPodInformer = true
}
if nodeInformer == nil {
nodeInformer = informers.CreateSharedNodeIndexInformer(kubeClient, resyncPeriod)
selfCreatedNodeInformer = true
}
pvcInformer framework.SharedInformer,
pvInformer framework.SharedInformer,
cloud cloudprovider.Interface,
plugins []volume.VolumePlugin) (AttachDetachController, error) {
// TODO: The default resyncPeriod for shared informers is 12 hours, this is
// unacceptable for the attach/detach controller. For example, if a pod is
// skipped because the node it is scheduled to didn't set its annotation in
// time, we don't want to have to wait 12hrs before processing the pod
// again.
// Luckily https://github.com/kubernetes/kubernetes/issues/23394 is being
// worked on and will split resync in to resync and relist. Once that
// happens the resync period can be set to something much faster (30
// seconds).
// If that issue is not resolved in time, then this controller will have to
// consider some unappealing alternate options: use a non-shared informer
// and set a faster resync period even if it causes relist, or requeue
// dropped pods so they are continuously processed until it is accepted or
// deleted (probably can't do this with sharedInformer), etc.
adc := &attachDetachController{
internalPodInformer: podInformer,
selfCreatedPodInformer: selfCreatedPodInformer,
internalNodeInformer: nodeInformer,
selfCreatedNodeInformer: selfCreatedNodeInformer,
kubeClient: kubeClient,
pvcInformer: pvcInformer,
pvInformer: pvInformer,
cloud: cloud,
}
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
@ -90,46 +107,426 @@ func NewAttachDetachController(
DeleteFunc: adc.nodeDelete,
})
return adc
if err := adc.volumePluginMgr.InitPlugins(plugins, adc); err != nil {
return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err)
}
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
adc.attacherDetacher = attacherdetacher.NewAttacherDetacher(&adc.volumePluginMgr)
adc.reconciler = reconciler.NewReconciler(
reconcilerLoopPeriod,
reconcilerMaxSafeToDetachDuration,
adc.desiredStateOfWorld,
adc.actualStateOfWorld,
adc.attacherDetacher)
return adc, nil
}
type attachDetachController struct {
// kubeClient is the kube API client used by volumehost to communicate with
// the API server.
kubeClient internalclientset.Interface
// pvcInformer is the shared PVC informer used to fetch and store PVC
// objects from the API server. It is shared with other controllers and
// therefore the PVC objects in its store should be treated as immutable.
pvcInformer framework.SharedInformer
// pvInformer is the shared PV informer used to fetch and store PV objects
// from the API server. It is shared with other controllers and therefore
// the PV objects in its store should be treated as immutable.
pvInformer framework.SharedInformer
// cloud provider used by volume host
cloud cloudprovider.Interface
// volumePluginMgr used to initialize and fetch volume plugins
volumePluginMgr volume.VolumePluginMgr
// desiredStateOfWorld is a data structure containing the desired state of
// the world according to this controller: i.e. what nodes the controller
// is managing, what volumes it wants be attached to these nodes, and which
// pods are scheduled to those nodes referencing the volumes.
// The data structure is populated by the controller using a stream of node
// and pod API server objects fetched by the informers.
desiredStateOfWorld cache.DesiredStateOfWorld
// actualStateOfWorld is a data structure containing the actual state of
// the world according to this controller: i.e. which volumes are attached
// to which nodes.
// The data structure is populated upon successful completion of attach and
// detach actions triggered by the controller and a periodic sync with
// storage providers for the "true" state of the world.
actualStateOfWorld cache.ActualStateOfWorld
// attacherDetacher is used to start asynchronous attach and operations
attacherDetacher attacherdetacher.AttacherDetacher
// reconciler is used to run an asynchronous periodic loop to reconcile the
// desiredStateOfWorld with the actualStateOfWorld by triggering attach
// detach operations using the attacherDetacher.
reconciler reconciler.Reconciler
}
func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
defer runtime.HandleCrash()
glog.Infof("Starting Attach Detach Controller")
// Start self-created shared informers
if adc.selfCreatedPodInformer {
go adc.internalPodInformer.Run(stopCh)
}
if adc.selfCreatedNodeInformer {
go adc.internalNodeInformer.Run(stopCh)
}
go adc.reconciler.Run(stopCh)
<-stopCh
glog.Infof("Shutting down Attach Detach Controller")
}
func (adc *attachDetachController) podAdd(obj interface{}) {
// No op for now
pod, ok := obj.(*api.Pod)
if pod == nil || !ok {
return
}
if pod.Spec.NodeName == "" {
// Ignore pods without NodeName, indicating they are not scheduled.
return
}
adc.processPodVolumes(pod, true /* addVolumes */)
}
func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) {
// No op for now
// The flow for update is the same as add.
adc.podAdd(newObj)
}
func (adc *attachDetachController) podDelete(obj interface{}) {
// No op for now
pod, ok := obj.(*api.Pod)
if pod == nil || !ok {
return
}
adc.processPodVolumes(pod, false /* addVolumes */)
}
func (adc *attachDetachController) nodeAdd(obj interface{}) {
// No op for now
node, ok := obj.(*api.Node)
if node == nil || !ok {
return
}
nodeName := node.Name
if _, exists := node.Annotations[ControllerManagedAnnotation]; exists {
// Node specifies annotation indicating it should be managed by attach
// detach controller. Add it to desired state of world.
adc.desiredStateOfWorld.AddNode(nodeName)
}
adc.processSafeToDetachAnnotations(nodeName, node.Annotations)
}
func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) {
// No op for now
// The flow for update is the same as add.
adc.nodeAdd(newObj)
}
func (adc *attachDetachController) nodeDelete(obj interface{}) {
// No op for now
node, ok := obj.(*api.Node)
if node == nil || !ok {
return
}
nodeName := node.Name
if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil {
glog.V(10).Infof("%v", err)
}
adc.processSafeToDetachAnnotations(nodeName, node.Annotations)
}
// processPodVolumes processes the volumes in the given pod and adds them to the
// desired state of the world if addVolumes is true, otherwise it removes them.
func (adc *attachDetachController) processPodVolumes(
pod *api.Pod, addVolumes bool) {
if pod == nil {
return
}
if len(pod.Spec.Volumes) <= 0 {
return
}
if !adc.desiredStateOfWorld.NodeExists(pod.Spec.NodeName) {
// If the node the pod is scheduled to does not exist in the desired
// state of the world data structure, that indicates the node is not
// yet managed by the controller. Therefore, ignore the pod.
// If the node is added to the list of managed nodes in the future,
// future adds and updates to the pod will be processed.
glog.V(10).Infof(
"Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.",
pod.Namespace,
pod.Name,
pod.Spec.NodeName)
return
}
// Process volume spec for each volume defined in pod
for _, podVolume := range pod.Spec.Volumes {
volumeSpec, err := adc.createVolumeSpec(podVolume, pod.Namespace)
if err != nil {
glog.V(10).Infof(
"Error processing volume %q for pod %q/%q: %v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
continue
}
attachableVolumePlugin, err :=
adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
glog.V(10).Infof(
"Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
continue
}
if addVolumes {
// Add volume to desired state of world
_, err := adc.desiredStateOfWorld.AddPod(
getUniquePodName(pod), volumeSpec, pod.Spec.NodeName)
if err != nil {
glog.V(10).Infof(
"Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
}
} else {
// Remove volume from desired state of world
uniqueVolumeName, err := attachableVolumePlugin.GetUniqueVolumeName(volumeSpec)
if err != nil {
glog.V(10).Infof(
"Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GetUniqueVolumeName failed with %v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
continue
}
adc.desiredStateOfWorld.DeletePod(
getUniquePodName(pod), uniqueVolumeName, pod.Spec.NodeName)
}
}
return
}
// createVolumeSpec creates and returns a mutatable volume.Spec object for the
// specified volume. It dereference any PVC to get PV objects, if needed.
func (adc *attachDetachController) createVolumeSpec(
podVolume api.Volume, podNamespace string) (*volume.Spec, error) {
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
// If podVolume is a PVC, fetch the real PV behind the claim
pvName, pvcUID, err := adc.getPVCFromCacheExtractPV(
podNamespace, pvcSource.ClaimName)
if err != nil {
return nil, fmt.Errorf("error processing PVC %q: %v", pvcSource.ClaimName, err)
}
// Fetch actual PV object
volumeSpec, err := adc.getPVSpecFromCache(
pvName, pvcSource.ReadOnly, pvcUID)
if err != nil {
return nil, fmt.Errorf("error processing PVC %q: %v", pvcSource.ClaimName, err)
}
return volumeSpec, nil
}
// Do not return the original volume object, since it's from the shared
// informer it may be mutated by another consumer.
clonedPodVolumeObj, err := api.Scheme.DeepCopy(podVolume)
if err != nil || clonedPodVolumeObj == nil {
return nil, fmt.Errorf("failed to deep copy %q volume object", podVolume.Name)
}
clonedPodVolume, ok := clonedPodVolumeObj.(api.Volume)
if !ok {
return nil, fmt.Errorf("failed to cast clonedPodVolume %#v to api.Volume", clonedPodVolumeObj)
}
return volume.NewSpecFromVolume(&clonedPodVolume), nil
}
// getPVCFromCacheExtractPV fetches the PVC object with the given namespace and
// name from the shared internal PVC store extracts the name of the PV it is
// pointing to and returns it.
// This method returns an error if a PVC object does not exist in the cache
// with the given namespace/name.
// This method returns an error if the PVC object's phase is not "Bound".
func (adc *attachDetachController) getPVCFromCacheExtractPV(
namespace string, name string) (string, types.UID, error) {
key := name
if len(namespace) > 0 {
key = namespace + "/" + name
}
pvcObj, exists, err := adc.pvcInformer.GetStore().Get(key)
if pvcObj == nil || !exists || err != nil {
return "", "", fmt.Errorf(
"failed to find PVC %q in PVCInformer cache. %v",
key,
err)
}
pvc, ok := pvcObj.(*api.PersistentVolumeClaim)
if ok || pvc == nil {
return "", "", fmt.Errorf(
"failed to cast %q object %#v to PersistentVolumeClaim",
key,
pvcObj)
}
if pvc.Status.Phase != api.ClaimBound || pvc.Spec.VolumeName == "" {
return "", "", fmt.Errorf(
"PVC %q has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)",
key,
pvc.Status.Phase,
pvc.Spec.VolumeName)
}
return pvc.Spec.VolumeName, pvc.UID, nil
}
// getPVSpecFromCache fetches the PV object with the given name from the shared
// internal PV store and returns a volume.Spec representing it.
// This method returns an error if a PV object does not exist in the cache with
// the given name.
// This method deep copies the PV object so the caller may use the returned
// volume.Spec object without worrying about it mutating unexpectedly.
func (adc *attachDetachController) getPVSpecFromCache(
name string,
pvcReadOnly bool,
expectedClaimUID types.UID) (*volume.Spec, error) {
pvObj, exists, err := adc.pvInformer.GetStore().Get(name)
if pvObj == nil || !exists || err != nil {
return nil, fmt.Errorf(
"failed to find PV %q in PVInformer cache. %v", name, err)
}
pv, ok := pvObj.(*api.PersistentVolume)
if ok || pv == nil {
return nil, fmt.Errorf(
"failed to cast %q object %#v to PersistentVolume", name, pvObj)
}
if pv.Spec.ClaimRef == nil {
return nil, fmt.Errorf(
"found PV object %q but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim",
name)
}
if pv.Spec.ClaimRef.UID != expectedClaimUID {
return nil, fmt.Errorf(
"found PV object %q but its pv.Spec.ClaimRef.UID (%q) does not point to claim.UID (%q)",
name,
pv.Spec.ClaimRef.UID,
expectedClaimUID)
}
// Do not return the object from the informer, since the store is shared it
// may be mutated by another consumer.
clonedPVObj, err := api.Scheme.DeepCopy(pv)
if err != nil || clonedPVObj == nil {
return nil, fmt.Errorf("failed to deep copy %q PV object", name)
}
clonedPV, ok := clonedPVObj.(api.PersistentVolume)
if !ok {
return nil, fmt.Errorf(
"failed to cast %q clonedPV %#v to PersistentVolume", name, pvObj)
}
return volume.NewSpecFromPersistentVolume(&clonedPV, pvcReadOnly), nil
}
// processSafeToDetachAnnotations processes the "safe to detach" annotations for
// the given node. It makes calls to delete any annotations referring to volumes
// it is not aware of. For volumes it is aware of, it marks them safe to detach
// in the "actual state of world" data structure.
func (adc *attachDetachController) processSafeToDetachAnnotations(
nodeName string, annotations map[string]string) {
var annotationsToRemove []string
for annotation := range annotations {
// Check annotations for "safe to detach" volumes
annotation = strings.ToLower(annotation)
if strings.HasPrefix(annotation, SafeToDetachAnnotation) {
// If volume exists in "actual state of world" mark it as safe to detach
safeToAttachVolume := strings.TrimPrefix(annotation, SafeToDetachAnnotation)
if err := adc.actualStateOfWorld.MarkVolumeNodeSafeToDetach(safeToAttachVolume, nodeName); err != nil {
// If volume doesn't exist in "actual state of world" remove
// the "safe to detach" annotation from the node
annotationsToRemove = append(annotationsToRemove, annotation)
}
}
}
// TODO: Call out to API server to delete annotationsToRemove from Node
}
// getUniquePodName returns a unique name to reference pod by in memory caches
func getUniquePodName(pod *api.Pod) string {
return types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}.String()
}
// VolumeHost implementation
// This is an unfortunate requirement of the current factoring of volume plugin
// initializing code. It requires kubelet specific methods used by the mounting
// code to be implemented by all initializers even if the initializer does not
// do mounting (like this attach/detach controller).
// Issue kubernetes/kubernetes/issues/14217 to fix this.
func (adc *attachDetachController) GetPluginDir(podUID string) string {
return ""
}
func (adc *attachDetachController) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
return ""
}
func (adc *attachDetachController) GetPodPluginDir(podUID types.UID, pluginName string) string {
return ""
}
func (adc *attachDetachController) GetKubeClient() internalclientset.Interface {
return adc.kubeClient
}
func (adc *attachDetachController) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return nil, fmt.Errorf("NewWrapperMounter not supported by Attach/Detach controller's VolumeHost implementation")
}
func (adc *attachDetachController) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
return nil, fmt.Errorf("NewWrapperUnmounter not supported by Attach/Detach controller's VolumeHost implementation")
}
func (adc *attachDetachController) GetCloudProvider() cloudprovider.Interface {
return adc.cloud
}
func (adc *attachDetachController) GetMounter() mount.Interface {
return nil
}
func (adc *attachDetachController) GetWriter() io.Writer {
return nil
}
func (adc *attachDetachController) GetHostName() string {
return ""
}

View File

@ -0,0 +1,114 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
)
func Test_NewAttachDetachController_Positive(t *testing.T) {
// Arrange
fakeKubeClient := createTestClient()
resyncPeriod := 5 * time.Minute
podInformer := informers.CreateSharedPodIndexInformer(fakeKubeClient, resyncPeriod)
nodeInformer := informers.CreateSharedNodeIndexInformer(fakeKubeClient, resyncPeriod)
pvcInformer := informers.CreateSharedPVCIndexInformer(fakeKubeClient, resyncPeriod)
pvInformer := informers.CreateSharedPVIndexInformer(fakeKubeClient, resyncPeriod)
// Act
_, err := NewAttachDetachController(
fakeKubeClient,
podInformer,
nodeInformer,
pvcInformer,
pvInformer,
nil, /* cloud */
nil /* plugins */)
// Assert
if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err)
}
}
func createTestClient() *fake.Clientset {
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &api.PodList{}
podNamePrefix := "mypod"
namespace := "mynamespace"
for i := 0; i < 5; i++ {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := api.Pod{
Status: api.PodStatus{
Phase: api.PodRunning,
},
ObjectMeta: api.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: map[string]string{
"name": podName,
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "containerName",
Image: "containerImage",
VolumeMounts: []api.VolumeMount{
{
Name: "volumeMountName",
ReadOnly: false,
MountPath: "/mnt",
},
},
},
},
Volumes: []api.Volume{
{
Name: "volumeName",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "pdName",
FSType: "ext4",
ReadOnly: false,
},
},
},
},
},
}
obj.Items = append(obj.Items, pod)
}
return true, obj, nil
})
fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
return fakeClient
}

View File

@ -0,0 +1,183 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package attacherdetacher implements interfaces that enable triggering attach
// and detach operations on volumes.
package attacherdetacher
import (
"fmt"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/controller/volume/cache"
"k8s.io/kubernetes/pkg/util/goroutinemap"
"k8s.io/kubernetes/pkg/volume"
)
// AttacherDetacher defines a set of operations for attaching or detaching a
// volume from a node.
type AttacherDetacher interface {
// Spawns a new goroutine to execute volume-specific logic to attach the
// volume to the node specified in the volumeToAttach.
// Once attachment completes successfully, the actualStateOfWorld is updated
// to indicate the volume is attached to the node.
// If there is an error indicating the volume is already attached to the
// specified node, attachment is assumed to be successful (plugins are
// responsible for implmenting this behavior).
// All other errors are logged and the goroutine terminates without updating
// actualStateOfWorld (caller is responsible for retrying as needed).
AttachVolume(volumeToAttach *cache.VolumeToAttach, actualStateOfWorld cache.ActualStateOfWorld) error
// Spawns a new goroutine to execute volume-specific logic to detach the
// volume from the node specified in volumeToDetach.
// Once detachment completes successfully, the actualStateOfWorld is updated
// to remove the volume/node combo.
// If there is an error indicating the volume is already detached from the
// specified node, detachment is assumed to be successful (plugins are
// responsible for implmenting this behavior).
// All other errors are logged and the goroutine terminates without updating
// actualStateOfWorld (caller is responsible for retrying as needed).
DetachVolume(volumeToDetach *cache.AttachedVolume, actualStateOfWorld cache.ActualStateOfWorld) error
}
// NewAttacherDetacher returns a new instance of AttacherDetacher.
func NewAttacherDetacher(volumePluginMgr *volume.VolumePluginMgr) AttacherDetacher {
return &attacherDetacher{
volumePluginMgr: volumePluginMgr,
pendingOperations: goroutinemap.NewGoRoutineMap(),
}
}
type attacherDetacher struct {
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
// pendingOperations keeps track of pending attach and detach operations so
// multiple operations are not started on the same volume
pendingOperations goroutinemap.GoRoutineMap
}
func (ad *attacherDetacher) AttachVolume(
volumeToAttach *cache.VolumeToAttach,
actualStateOfWorld cache.ActualStateOfWorld) error {
attachFunc, err := ad.generateAttachVolumeFunc(volumeToAttach, actualStateOfWorld)
if err != nil {
return err
}
return ad.pendingOperations.Run(volumeToAttach.VolumeName, attachFunc)
}
func (ad *attacherDetacher) DetachVolume(
volumeToDetach *cache.AttachedVolume,
actualStateOfWorld cache.ActualStateOfWorld) error {
detachFunc, err := ad.generateDetachVolumeFunc(volumeToDetach, actualStateOfWorld)
if err != nil {
return err
}
return ad.pendingOperations.Run(volumeToDetach.VolumeName, detachFunc)
}
func (ad *attacherDetacher) generateAttachVolumeFunc(
volumeToAttach *cache.VolumeToAttach,
actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) {
// Get attacher plugin
attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec)
if err != nil || attachableVolumePlugin == nil {
return nil, fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeToAttach.VolumeSpec.Name(),
err)
}
volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher()
if newAttacherErr != nil {
return nil, fmt.Errorf(
"failed to get NewAttacher from volumeSpec for volume %q err=%v",
volumeToAttach.VolumeSpec.Name(),
newAttacherErr)
}
return func() error {
// Execute attach
attachErr := volumeAttacher.Attach(volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
if attachErr != nil {
// On failure, just log and exit. The controller will retry
glog.Errorf("Attach operation for %q failed with: %v", volumeToAttach.VolumeName, attachErr)
return attachErr
}
// Update actual state of world
_, addVolumeNodeErr := actualStateOfWorld.AddVolumeNode(volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
if addVolumeNodeErr != nil {
// On failure, just log and exit. The controller will retry
glog.Errorf("Attach operation for %q succeeded but updating actualStateOfWorld failed with: %v", volumeToAttach.VolumeName, addVolumeNodeErr)
return addVolumeNodeErr
}
return nil
}, nil
}
func (ad *attacherDetacher) generateDetachVolumeFunc(
volumeToDetach *cache.AttachedVolume,
actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) {
// Get attacher plugin
attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec)
if err != nil || attachableVolumePlugin == nil {
return nil, fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeToDetach.VolumeSpec.Name(),
err)
}
deviceName, err := attachableVolumePlugin.GetDeviceName(volumeToDetach.VolumeSpec)
if err != nil {
return nil, fmt.Errorf(
"failed to GetUniqueVolumeName from AttachablePlugin for volumeSpec %q err=%v",
volumeToDetach.VolumeSpec.Name(),
err)
}
volumeDetacher, err := attachableVolumePlugin.NewDetacher()
if err != nil {
return nil, fmt.Errorf(
"failed to get NewDetacher from volumeSpec for volume %q err=%v",
volumeToDetach.VolumeSpec.Name(),
err)
}
return func() error {
// Execute detach
detachErr := volumeDetacher.Detach(deviceName, volumeToDetach.NodeName)
if detachErr != nil {
// On failure, just log and exit. The controller will retry
glog.Errorf("Detach operation for %q failed with: %v", volumeToDetach.VolumeName, detachErr)
return detachErr
}
// TODO: Reset "safe to detach" annotation on Node
// Update actual state of world
actualStateOfWorld.DeleteVolumeNode(volumeToDetach.VolumeName, volumeToDetach.NodeName)
return nil
}, nil
}

View File

@ -0,0 +1,314 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them.
*/
package cache
import (
"fmt"
"sync"
"time"
"k8s.io/kubernetes/pkg/volume"
)
// ActualStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's actual state of the world cache.
// This cache contains volumes->nodes i.e. a set of all volumes and the nodes
// the attach/detach controller believes are successfully attached.
type ActualStateOfWorld interface {
// AddVolumeNode adds the given volume and node to the underlying store
// indicating the specified volume is attached to the specified node.
// A unique volumeName is generated from the volumeSpec and returned on
// success.
// If the volume/node combo already exists, this is a no-op.
// If volumeSpec is not an attachable volume plugin, an error is returned.
// If no volume with the name volumeName exists in the store, the volume is
// added.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, the node is added.
AddVolumeNode(volumeSpec *volume.Spec, nodeName string) (string, error)
// MarkVolumeNodeSafeToDetach marks the given volume as safe to detach from
// the given node.
// If no volume with the name volumeName exists in the store, an error is
// returned.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned.
MarkVolumeNodeSafeToDetach(volumeName, nodeName string) error
// MarkDesireToDetach returns the difference between the current time and
// the DetachRequestedTime for the given volume/node combo. If the
// DetachRequestedTime is zero, it is set to the current time.
// If no volume with the name volumeName exists in the store, an error is
// returned.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned.
MarkDesireToDetach(volumeName, nodeName string) (time.Duration, error)
// DeleteVolumeNode removes the given volume and node from the underlying
// store indicating the specified volume is no longer attached to the
// specified node.
// If the volume/node combo does not exist, this is a no-op.
// If after deleting the node, the specified volume contains no other child
// nodes, the volume is also deleted.
DeleteVolumeNode(volumeName, nodeName string)
// VolumeNodeExists returns true if the specified volume/node combo exists
// in the underlying store indicating the specified volume is attached to
// the specified node.
VolumeNodeExists(volumeName, nodeName string) bool
// GetAttachedVolumes generates and returns a list of volumes/node pairs
// reflecting which volumes are attached to which nodes based on the
// current actual state of the world.
GetAttachedVolumes() []AttachedVolume
}
// AttachedVolume represents a volume that is attached to a node.
type AttachedVolume struct {
// VolumeName is the unique identifier for the volume that is attached.
VolumeName string
// VolumeSpec is the volume spec containing the specification for the
// volume that is attached.
VolumeSpec *volume.Spec
// NodeName is the identifier for the node that the volume is attached to.
NodeName string
// SafeToDetach indicates that this volume has been been unmounted from the
// node and is safe to detach.
// The value is set by MarkVolumeNodeSafeToDetach(...) and reset on
// AddVolumeNode(...) calls.
SafeToDetach bool
// DetachRequestedTime is used to capture the desire to detach this volume.
// When the volume is newly created this value is set to time zero.
// It is set to current time, when MarkDesireToDetach(...) is called, if it
// was previously set to zero (other wise its value remains the same).
// It is reset to zero on AddVolumeNode(...) calls.
DetachRequestedTime time.Time
}
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{
attachedVolumes: make(map[string]attachedVolume),
volumePluginMgr: volumePluginMgr,
}
}
type actualStateOfWorld struct {
// attachedVolumes is a map containing the set of volumes the attach/detach
// controller believes to be successfully attached to the nodes it is
// managing. The key in this map is the name of the volume and the value is
// an object containing more information about the attached volume.
attachedVolumes map[string]attachedVolume
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// The volume object represents a volume the the attach/detach controller
// believes to be succesfully attached to a node it is managing.
type attachedVolume struct {
// volumeName contains the unique identifier for this volume.
volumeName string
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
// methods.
spec *volume.Spec
// nodesAttachedTo is a map containing the set of nodes this volume has
// successfully been attached to. The key in this map is the name of the
// node and the value is a node object containing more information about
// the node.
nodesAttachedTo map[string]nodeAttachedTo
}
// The nodeAttachedTo object represents a node that .
type nodeAttachedTo struct {
// nodeName contains the name of this node.
nodeName string
// safeToDetach indicates that this node/volume combo has been unmounted
// by the node and is safe to detach
safeToDetach bool
// detachRequestedTime used to capture the desire to detach this volume
detachRequestedTime time.Time
}
func (asw *actualStateOfWorld) AddVolumeNode(volumeSpec *volume.Spec, nodeName string) (string, error) {
asw.Lock()
defer asw.Unlock()
attachableVolumePlugin, err := asw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err := attachableVolumePlugin.GetUniqueVolumeName(volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeName from AttachablePlugin for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
volumeObj = attachedVolume{
volumeName: volumeName,
spec: volumeSpec,
nodesAttachedTo: make(map[string]nodeAttachedTo),
}
asw.attachedVolumes[volumeName] = volumeObj
}
nodeObj, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if !nodeExists || nodeObj.safeToDetach || !nodeObj.detachRequestedTime.IsZero() {
// Create object if it doesn't exist.
// Reset safeToDeatch and detachRequestedTime values if it does.
volumeObj.nodesAttachedTo[nodeName] = nodeAttachedTo{
nodeName: nodeName,
safeToDetach: false,
detachRequestedTime: time.Time{},
}
}
return volumeName, nil
}
func (asw *actualStateOfWorld) MarkVolumeNodeSafeToDetach(
volumeName, nodeName string) error {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return fmt.Errorf(
"failed to MarkVolumeNodeSafeToDetach(volumeName=%q, nodeName=%q) volumeName does not exist",
volumeName,
nodeName)
}
nodeObj, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if !nodeExists {
return fmt.Errorf(
"failed to MarkVolumeNodeSafeToDetach(volumeName=%q, nodeName=%q) nodeName does not exist",
volumeName,
nodeName)
}
// Reset safe to detach
nodeObj.safeToDetach = true
volumeObj.nodesAttachedTo[nodeName] = nodeObj
return nil
}
func (asw *actualStateOfWorld) MarkDesireToDetach(
volumeName, nodeName string) (time.Duration, error) {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return time.Millisecond * 0, fmt.Errorf(
"failed to MarkVolumeNodeSafeToDetach(volumeName=%q, nodeName=%q) volumeName does not exist",
volumeName,
nodeName)
}
nodeObj, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if !nodeExists {
return time.Millisecond * 0, fmt.Errorf(
"failed to MarkVolumeNodeSafeToDetach(volumeName=%q, nodeName=%q) nodeName does not exist",
volumeName,
nodeName)
}
if nodeObj.detachRequestedTime.IsZero() {
nodeObj.detachRequestedTime = time.Now()
volumeObj.nodesAttachedTo[nodeName] = nodeObj
}
return time.Since(volumeObj.nodesAttachedTo[nodeName].detachRequestedTime), nil
}
func (asw *actualStateOfWorld) DeleteVolumeNode(volumeName, nodeName string) {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return
}
_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if nodeExists {
delete(asw.attachedVolumes[volumeName].nodesAttachedTo, nodeName)
}
if len(volumeObj.nodesAttachedTo) == 0 {
delete(asw.attachedVolumes, volumeName)
}
}
func (asw *actualStateOfWorld) VolumeNodeExists(volumeName, nodeName string) bool {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if volumeExists {
if _, nodeExists := volumeObj.nodesAttachedTo[nodeName]; nodeExists {
return true
}
}
return false
}
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
attachedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for volumeName, volumeObj := range asw.attachedVolumes {
for nodeName, nodeObj := range volumeObj.nodesAttachedTo {
attachedVolumes = append(
attachedVolumes,
AttachedVolume{
NodeName: nodeName,
VolumeName: volumeName,
VolumeSpec: volumeObj.spec,
SafeToDetach: nodeObj.safeToDetach,
DetachRequestedTime: nodeObj.detachRequestedTime})
}
}
return attachedVolumes
}

View File

@ -0,0 +1,682 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"testing"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
)
func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
// Act
generatedVolumeName, err := asw.AddVolumeNode(volumeSpec, nodeName)
// Assert
if err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err)
}
volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, nodeName)
if !volumeNodeComboExists {
t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName, nodeName)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
node1Name := "node1-name"
node2Name := "node2-name"
// Act
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name)
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name)
// Assert
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
if generatedVolumeName1 != generatedVolumeName2 {
t.Fatalf(
"Generated volume names for the same volume should be the same but they are not: %q and %q",
generatedVolumeName1,
generatedVolumeName2)
}
volumeNode1ComboExists := asw.VolumeNodeExists(generatedVolumeName1, node1Name)
if !volumeNode1ComboExists {
t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, node1Name)
}
volumeNode2ComboExists := asw.VolumeNodeExists(generatedVolumeName1, node2Name)
if !volumeNode2ComboExists {
t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, node2Name)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 2 {
t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node1Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node2Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
// Act
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, nodeName)
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, nodeName)
// Assert
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
if generatedVolumeName1 != generatedVolumeName2 {
t.Fatalf(
"Generated volume names for the same volume should be the same but they are not: %q and %q",
generatedVolumeName1,
generatedVolumeName2)
}
volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName1, nodeName)
if !volumeNodeComboExists {
t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, nodeName)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
asw.DeleteVolumeNode(generatedVolumeName, nodeName)
// Assert
volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, nodeName)
if volumeNodeComboExists {
t.Fatalf("%q/%q volume/node combo exists, it should not.", generatedVolumeName, nodeName)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 0 {
t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes))
}
}
func Test_DeleteVolumeNode_Positive_VolumeDoesntExistNodeDoesntExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
nodeName := "node-name"
// Act
asw.DeleteVolumeNode(volumeName, nodeName)
// Assert
volumeNodeComboExists := asw.VolumeNodeExists(volumeName, nodeName)
if volumeNodeComboExists {
t.Fatalf("%q/%q volume/node combo exists, it should not.", volumeName, nodeName)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 0 {
t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes))
}
}
func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
node1Name := "node1-name"
node2Name := "node2-name"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name)
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name)
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
if generatedVolumeName1 != generatedVolumeName2 {
t.Fatalf(
"Generated volume names for the same volume should be the same but they are not: %q and %q",
generatedVolumeName1,
generatedVolumeName2)
}
// Act
asw.DeleteVolumeNode(generatedVolumeName1, node1Name)
// Assert
volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName1, node1Name)
if volumeNodeComboExists {
t.Fatalf("%q/%q volume/node combo exists, it should not.", generatedVolumeName1, node1Name)
}
volumeNodeComboExists = asw.VolumeNodeExists(generatedVolumeName1, node2Name)
if !volumeNodeComboExists {
t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, node2Name)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node2Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, nodeName)
// Assert
if !volumeNodeComboExists {
t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName, nodeName)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
node1Name := "node1-name"
node2Name := "node2-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, node1Name)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, node2Name)
// Assert
if volumeNodeComboExists {
t.Fatalf("%q/%q volume/node combo exists, it should not.", generatedVolumeName, node2Name)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, node1Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_VolumeNodeExists_Positive_VolumeAndNodeDontExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
nodeName := "node-name"
// Act
volumeNodeComboExists := asw.VolumeNodeExists(volumeName, nodeName)
// Assert
if volumeNodeComboExists {
t.Fatalf("%q/%q volume/node combo exists, it should not.", volumeName, nodeName)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 0 {
t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes))
}
}
func Test_GetAttachedVolumes_Positive_NoVolumesOrNodes(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
// Act
attachedVolumes := asw.GetAttachedVolumes()
// Assert
if len(attachedVolumes) != 0 {
t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes))
}
}
func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
attachedVolumes := asw.GetAttachedVolumes()
// Assert
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volume1Name := "volume1-name"
volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name)
node1Name := "node1-name"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volume1Spec, node1Name)
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
volume2Name := "volume2-name"
volume2Spec := controllervolumetesting.GetTestVolumeSpec(volume2Name, volume2Name)
node2Name := "node2-name"
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name)
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
// Act
attachedVolumes := asw.GetAttachedVolumes()
// Assert
if len(attachedVolumes) != 2 {
t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volume1Name, node1Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName2, volume2Name, node2Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
node1Name := "node1-name"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name)
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
node2Name := "node2-name"
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name)
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
if generatedVolumeName1 != generatedVolumeName2 {
t.Fatalf(
"Generated volume names for the same volume should be the same but they are not: %q and %q",
generatedVolumeName1,
generatedVolumeName2)
}
// Act
attachedVolumes := asw.GetAttachedVolumes()
// Assert
if len(attachedVolumes) != 2 {
t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node1Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node2Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_MarkVolumeNodeSafeToDetach_Positive_NotMarked(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act: do not mark -- test default value
// Assert
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_MarkVolumeNodeSafeToDetach_Positive_Marked(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
markSafeToDetachErr := asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName)
// Assert
if markSafeToDetachErr != nil {
t.Fatalf("MarkVolumeNodeSafeToDetach failed. Expected <no error> Actual: <%v>", markSafeToDetachErr)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, true /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_MarkVolumeNodeSafeToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
markSafeToDetachErr := asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName)
generatedVolumeName, addErr = asw.AddVolumeNode(volumeSpec, nodeName)
// Assert
if markSafeToDetachErr != nil {
t.Fatalf("MarkVolumeNodeSafeToDetach failed. Expected <no error> Actual: <%v>", markSafeToDetachErr)
}
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_MarkVolumeNodeSafeToDetach_Positive_MarkedVerifyDetachRequestedTimePerserved(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
_, err := asw.MarkDesireToDetach(generatedVolumeName, nodeName)
if err != nil {
t.Fatalf("MarkDesireToDetach failed. Expected: <no error> Actual: <%v>", err)
}
expectedDetachRequestedTime := asw.GetAttachedVolumes()[0].DetachRequestedTime
// Act
markSafeToDetachErr := asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName)
// Assert
if markSafeToDetachErr != nil {
t.Fatalf("MarkVolumeNodeSafeToDetach failed. Expected <no error> Actual: <%v>", markSafeToDetachErr)
}
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, true /* expectedSafeToDetach */, true /* expectNonZeroDetachRequestedTime */)
if !expectedDetachRequestedTime.Equal(attachedVolumes[0].DetachRequestedTime) {
t.Fatalf("DetachRequestedTime changed. Expected: <%v> Actual: <%v>", expectedDetachRequestedTime, attachedVolumes[0].DetachRequestedTime)
}
}
func Test_MarkDesireToDetach_Positive_NotMarked(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act: do not mark -- test default value
// Assert
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_MarkDesireToDetach_Positive_Marked(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
_, markDesireToDetachErr := asw.MarkDesireToDetach(generatedVolumeName, nodeName)
// Assert
if markDesireToDetachErr != nil {
t.Fatalf("MarkDesireToDetach failed. Expected: <no error> Actual: <%v>", markDesireToDetachErr)
}
// Assert
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, true /* expectNonZeroDetachRequestedTime */)
}
func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
_, markDesireToDetachErr := asw.MarkDesireToDetach(generatedVolumeName, nodeName)
generatedVolumeName, addErr = asw.AddVolumeNode(volumeSpec, nodeName)
// Assert
if markDesireToDetachErr != nil {
t.Fatalf("MarkDesireToDetach failed. Expected: <no error> Actual: <%v>", markDesireToDetachErr)
}
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Assert
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_MarkDesireToDetach_Positive_MarkedVerifySafeToDetachPreserved(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
markSafeToDetachErr := asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName)
if markSafeToDetachErr != nil {
t.Fatalf("MarkVolumeNodeSafeToDetach failed. Expected <no error> Actual: <%v>", markSafeToDetachErr)
}
// Act
_, markDesireToDetachErr := asw.MarkDesireToDetach(generatedVolumeName, nodeName)
// Assert
if markDesireToDetachErr != nil {
t.Fatalf("MarkDesireToDetach failed. Expected: <no error> Actual: <%v>", markDesireToDetachErr)
}
// Assert
attachedVolumes := asw.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, true /* expectedSafeToDetach */, true /* expectNonZeroDetachRequestedTime */)
}
func verifyAttachedVolume(
t *testing.T,
attachedVolumes []AttachedVolume,
expectedVolumeName,
expectedVolumeSpecName,
expectedNodeName string,
expectedSafeToDetach,
expectNonZeroDetachRequestedTime bool) {
for _, attachedVolume := range attachedVolumes {
if attachedVolume.VolumeName == expectedVolumeName &&
attachedVolume.VolumeSpec.Name() == expectedVolumeSpecName &&
attachedVolume.NodeName == expectedNodeName &&
attachedVolume.SafeToDetach == expectedSafeToDetach &&
attachedVolume.DetachRequestedTime.IsZero() == !expectNonZeroDetachRequestedTime {
return
}
}
t.Fatalf(
"attachedVolumes (%v) should contain the volume/node combo %q/%q with SafeToDetach=%v and NonZeroDetachRequestedTime=%v. It does not.",
attachedVolumes,
expectedVolumeName,
expectedNodeName,
expectedSafeToDetach,
expectNonZeroDetachRequestedTime)
}
// t.Logf("attachedVolumes: %v", asw.GetAttachedVolumes()) // TEMP

View File

@ -1,412 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements a data structure used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them. It is thread-safe.
*/
package cache
import (
"fmt"
"sync"
)
// AttachDetachVolumeCache defines the set of operations the volume cache
// supports.
type AttachDetachVolumeCache interface {
// AddVolume adds the given volume to the list of volumes managed by the
// attach detach controller.
// If the volume already exists, this is a no-op.
AddVolume(volumeName string)
// AddNode adds the given node to the list of nodes the specified volume is
// attached to.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
// If the node already exists for the specified volume, this is a no-op.
AddNode(nodeName, volumeName string) error
// AddPod adds the given pod to the list of pods that are scheduled to
// the specified node and referencing the specified volume.
// If no node with the name nodeName exists in the list of attached nodes,
// an error is returned.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
// If the pod already exists for the specified volume, this is a no-op.
AddPod(podName, nodeName, volumeName string) error
// DeleteVolume removes the given volume from the list of volumes managed
// by the attach detach controller.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
// All attachedNodes must be deleted from the volume before it is deleted.
// If the specified volume contains 1 or more attachedNodes, an error is
// returned.
DeleteVolume(volumeName string) error
// DeleteNode removes the given node from the list of nodes the specified
// volume is attached to.
// If no node with the name nodeName exists in the list of attached nodes,
// an error is returned.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
// All scheduledPods must be deleted from the node before it is deleted.
// If the specified node contains 1 or more scheduledPods, an error is
// returned.
DeleteNode(nodeName, volumeName string) error
// DeletePod removes the given pod from the list of pods that are scheduled
// to the specified node and referencing the specified volume.
// If no pod with the name podName exists for the specified volume/node, an
// error is returned.
// If no node with the name nodeName exists in the list of attached nodes,
// an error is returned.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
DeletePod(podName, nodeName, volumeName string) error
// VolumeExists returns true if the volume with the specified name exists
// in the list of volumes managed by the attach detach controller.
VolumeExists(volumeName string) bool
// NodeExists returns true if the node with the specified name exists in
// the list of nodes the specified volume is attached to.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
NodeExists(nodeName, volumeName string) (bool, error)
// PodExists returns true if the pod with the specified name exists in the
// list of pods that are scheduled to the specified node and referencing
// the specified volume.
// If no node with the name nodeName exists in the list of attached nodes,
// an error is returned.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
PodExists(podName, nodeName, volumeName string) (bool, error)
}
// NewAttachDetachVolumeCache returns a new instance of the
// AttachDetachVolumeCache.
func NewAttachDetachVolumeCache() AttachDetachVolumeCache {
return &attachDetachVolumeCache{
volumesManaged: make(map[string]volume),
}
}
type attachDetachVolumeCache struct {
// volumesManaged is a map containing the set of volumes managed by the
// attach/detach controller. The key in this map is the name of the unique
// volume identifier and the value is a volume object containing more
// information about the volume.
volumesManaged map[string]volume
sync.RWMutex
}
// The volume object represents a volume that is being tracked by the attach
// detach controller.
type volume struct {
// name contains the unique identifer for this volume.
name string
// attachedNodes is a map containing the set of nodes this volume has
// successfully been attached to. The key in this map is the name of the
// node and the value is a node object containing more information about
// the node.
attachedNodes map[string]node
}
// The node object represents a node that a volume is attached to.
type node struct {
// name contains the name of this node.
name string
// scheduledPods is a map containing the set of pods that are scheduled to
// this node and referencing the underlying volume. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
scheduledPods map[string]pod
}
// The pod object represents a pod that is scheduled to a node and referncing
// the underlying volume.
type pod struct {
// name contains the name of this pod.
name string
}
// AddVolume adds the given volume to the list of volumes managed by the attach
// detach controller.
// If the volume already exists, this is a no-op.
func (vc *attachDetachVolumeCache) AddVolume(volumeName string) {
vc.Lock()
defer vc.Unlock()
if _, exists := vc.volumesManaged[volumeName]; !exists {
vc.volumesManaged[volumeName] = volume{
name: volumeName,
attachedNodes: make(map[string]node),
}
}
}
// AddNode adds the given node to the list of nodes the specified volume is
// attached to.
// If no volume with the name volumeName exists in the list of managed volumes,
// an error is returned.
// If the node already exists for the specified volume, this is a no-op.
func (vc *attachDetachVolumeCache) AddNode(nodeName, volumeName string) error {
vc.Lock()
defer vc.Unlock()
vol, volExists := vc.volumesManaged[volumeName]
if !volExists {
return fmt.Errorf(
"failed to add node %q to volume %q--no volume with that name exists in the list of managed volumes",
nodeName,
volumeName)
}
if _, nodeExists := vol.attachedNodes[nodeName]; !nodeExists {
vc.volumesManaged[volumeName].attachedNodes[nodeName] = node{
name: nodeName,
scheduledPods: make(map[string]pod),
}
}
return nil
}
// AddPod adds the given pod to the list of pods that are scheduled to the
// specified node and referencing the specified volume.
// If no node with the name nodeName exists in the list of attached nodes,
// an error is returned.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
// If the pod already exists for the specified volume, this is a no-op.
func (vc *attachDetachVolumeCache) AddPod(podName, nodeName, volumeName string) error {
vc.Lock()
defer vc.Unlock()
volObj, volExists := vc.volumesManaged[volumeName]
if !volExists {
return fmt.Errorf(
"failed to add pod %q to node %q volume %q--no volume with that name exists in the list of managed volumes",
podName,
nodeName,
volumeName)
}
nodeObj, nodeExists := volObj.attachedNodes[nodeName]
if !nodeExists {
return fmt.Errorf(
"failed to add pod %q to node %q volume %q--no node with that name exists in the list of attached nodes for that volume",
podName,
nodeName,
volumeName)
}
if _, podExists := nodeObj.scheduledPods[podName]; !podExists {
vc.volumesManaged[volumeName].attachedNodes[nodeName].scheduledPods[podName] =
pod{
name: podName,
}
}
return nil
}
// DeleteVolume removes the given volume from the list of volumes managed by
// the attach detach controller.
// If no volume with the name volumeName exists in the list of managed volumes,
// an error is returned.
// All attachedNodes must be deleted from the volume before it is deleted.
// If the specified volume contains 1 or more attachedNodes, an error is
// returned.
func (vc *attachDetachVolumeCache) DeleteVolume(volumeName string) error {
vc.Lock()
defer vc.Unlock()
volObj, volExists := vc.volumesManaged[volumeName]
if !volExists {
return fmt.Errorf(
"failed to delete volume %q--no volume with that name exists in the list of managed volumes",
volumeName)
}
if len(volObj.attachedNodes) > 0 {
return fmt.Errorf(
"failed to remove volume %q from list of managed volumes--the volume still contains %v nodes in its list of attached nodes",
volumeName,
len(volObj.attachedNodes))
}
delete(
vc.volumesManaged,
volumeName)
return nil
}
// DeleteNode removes the given node from the list of nodes the specified
// volume is attached to.
// If no node with the name nodeName exists in the list of attached nodes, an
// error is returned.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
// All scheduledPods must be deleted from the node before it is deleted.
// If the specified node contains 1 or more scheduledPods, an error is
// returned.
func (vc *attachDetachVolumeCache) DeleteNode(nodeName, volumeName string) error {
vc.Lock()
defer vc.Unlock()
volObj, volExists := vc.volumesManaged[volumeName]
if !volExists {
return fmt.Errorf(
"failed to delete node %q from volume %q--no volume with that name exists in the list of managed volumes",
nodeName,
volumeName)
}
nodeObj, nodeExists := volObj.attachedNodes[nodeName]
if !nodeExists {
return fmt.Errorf(
"failed to delete node %q from volume %q--no node with the that name exists in the list of attached nodes for that volume",
nodeName,
volumeName)
}
if len(nodeObj.scheduledPods) > 0 {
return fmt.Errorf(
"failed to remove node %q from volume %q--the node still contains %v pods in its list of scheduled pods",
nodeName,
volumeName,
len(nodeObj.scheduledPods))
}
delete(
vc.volumesManaged[volumeName].attachedNodes,
nodeName)
return nil
}
// DeletePod removes the given pod from the list of pods that are scheduled
// to the specified node and referencing the specified volume.
// If no pod with the name podName exists for the specified volume/node, an
// error is returned.
// If no node with the name nodeName exists in the list of attached nodes,
// an error is returned.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
func (vc *attachDetachVolumeCache) DeletePod(podName, nodeName, volumeName string) error {
vc.Lock()
defer vc.Unlock()
volObj, volExists := vc.volumesManaged[volumeName]
if !volExists {
return fmt.Errorf(
"failed to delete pod %q from node %q volume %q--no volume with that name exists in the list of managed volumes",
podName,
nodeName,
volumeName)
}
nodeObj, nodeExists := volObj.attachedNodes[nodeName]
if !nodeExists {
return fmt.Errorf(
"failed to delete pod %q from node %q volume %q--no node with that name exists in the list of attached nodes for that volume",
podName,
nodeName,
volumeName)
}
if _, podExists := nodeObj.scheduledPods[podName]; !podExists {
return fmt.Errorf(
"failed to delete pod %q from node %q volume %q--no pod with that name exists in the list of scheduled pods under that node/volume",
podName,
nodeName,
volumeName)
}
delete(
vc.volumesManaged[volumeName].attachedNodes[nodeName].scheduledPods,
podName)
return nil
}
// VolumeExists returns true if the volume with the specified name exists in
// the list of volumes managed by the attach detach controller.
func (vc *attachDetachVolumeCache) VolumeExists(volumeName string) bool {
vc.RLock()
defer vc.RUnlock()
_, volExists := vc.volumesManaged[volumeName]
return volExists
}
// NodeExists returns true if the node with the specified name exists in the
// list of nodes the specified volume is attached to.
// If no volume with the name volumeName exists in the list of managed
// volumes, an error is returned.
func (vc *attachDetachVolumeCache) NodeExists(nodeName, volumeName string) (bool, error) {
vc.RLock()
defer vc.RUnlock()
volObj, volExists := vc.volumesManaged[volumeName]
if !volExists {
return false,
fmt.Errorf(
"failed to check if node %q exists under volume %q--no volume with that name exists in the list of managed volumes",
nodeName,
volumeName)
}
_, nodeExists := volObj.attachedNodes[nodeName]
return nodeExists, nil
}
// PodExists returns true if the pod with the specified name exists in the list
// of pods that are scheduled to the specified node and referencing the
// specified volume.
// If no node with the name nodeName exists in the list of attached nodes, an
// error is returned.
// If no volume with the name volumeName exists in the list of managed volumes,
// an error is returned.
func (vc *attachDetachVolumeCache) PodExists(podName, nodeName, volumeName string) (bool, error) {
vc.RLock()
defer vc.RUnlock()
volObj, volExists := vc.volumesManaged[volumeName]
if !volExists {
return false,
fmt.Errorf(
"failed to check if node %q exists under volume %q--no volume with that name exists in the list of managed volumes",
nodeName,
volumeName)
}
nodeObj, nodeExists := volObj.attachedNodes[nodeName]
if !nodeExists {
return false, fmt.Errorf(
"failed to check if pod %q exists under node %q volume %q--no node with that name exists in the list of attached nodes for that volume",
podName,
nodeName,
volumeName)
}
_, podExists := nodeObj.scheduledPods[podName]
return podExists, nil
}

View File

@ -1,579 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import "testing"
func Test_AddVolume_Positive_NewVolume(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
// Act
vc.AddVolume(volumeName)
// Assert
volumeExists := vc.VolumeExists(volumeName)
if !volumeExists {
t.Fatalf("Added volume %q does not exist, it should.", volumeName)
}
}
func Test_AddVolume_Positive_ExistingVolume(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
vc.AddVolume(volumeName)
// Act
vc.AddVolume(volumeName)
// Assert
volumeExists := vc.VolumeExists(volumeName)
if !volumeExists {
t.Fatalf("Added volume %q does not exist, it should.", volumeName)
}
}
func Test_AddNode_Positive_NewNodeVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
vc.AddVolume(volumeName)
// Act
nodeErr := vc.AddNode(nodeName, volumeName)
// Assert
if nodeErr != nil {
t.Fatalf("AddNode failed. Expected: <no error> Actual: <%v>", nodeErr)
}
nodeExists, nodeExistsErr := vc.NodeExists(nodeName, volumeName)
if nodeExistsErr != nil {
t.Fatalf("NodeExists failed. Expected: <no error> Actual: <%v>", nodeExistsErr)
}
if !nodeExists {
t.Fatalf("Added node %q does not exist, it should.", nodeName)
}
}
func Test_AddNode_Positive_NodeExistsVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
vc.AddVolume(volumeName)
nodeErr1 := vc.AddNode(nodeName, volumeName)
if nodeErr1 != nil {
t.Fatalf("First call to AddNode failed. Expected: <no error> Actual: <%v>", nodeErr1)
}
// Act
nodeErr2 := vc.AddNode(nodeName, volumeName)
// Assert
if nodeErr2 != nil {
t.Fatalf("Second call to AddNode failed. Expected: <no error> Actual: <%v>", nodeErr2)
}
nodeExists, nodeExistsErr := vc.NodeExists(nodeName, volumeName)
if nodeExistsErr != nil {
t.Fatalf("NodeExists failed. Expected: <no error> Actual: <%v>", nodeExistsErr)
}
if !nodeExists {
t.Fatalf("Added node %q does not exist, it should.", nodeName)
}
}
func Test_AddNode_Negative_NewNodeVolumeDoesntExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
// Act
nodeErr := vc.AddNode(nodeName, volumeName)
// Assert
if nodeErr == nil {
t.Fatalf("AddNode did not fail. Expected: <\"failed to add node...no volume with that name exists in the list of managed volumes\"> Actual: <no error>")
}
nodeExists, nodeExistsErr := vc.NodeExists(nodeName, volumeName)
if nodeExistsErr == nil {
t.Fatalf("NodeExists did not fail. Expected: <failed to check if node...no volume with that name exists in the list of managed volumes> Actual: <no error>")
}
if nodeExists {
t.Fatalf("Added node %q exists, it should not.", nodeName)
}
}
func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
podName := "pod-name"
vc.AddVolume(volumeName)
nodeErr := vc.AddNode(nodeName, volumeName)
if nodeErr != nil {
t.Fatalf("AddNode failed. Expected: <no error> Actual: <%v>", nodeErr)
}
// Act
podErr := vc.AddPod(podName, nodeName, volumeName)
// Assert
if podErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podErr)
}
podExists, podExistsErr := vc.PodExists(podName, nodeName, volumeName)
if podExistsErr != nil {
t.Fatalf("PodExists failed. Expected: <no error> Actual: <%v>", podExistsErr)
}
if !podExists {
t.Fatalf("Added pod %q does not exist, it should.", podName)
}
}
func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
podName := "pod-name"
vc.AddVolume(volumeName)
nodeErr := vc.AddNode(nodeName, volumeName)
if nodeErr != nil {
t.Fatalf("AddNode failed. Expected: <no error> Actual: <%v>", nodeErr)
}
podErr1 := vc.AddPod(podName, nodeName, volumeName)
if podErr1 != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podErr1)
}
// Act
podErr2 := vc.AddPod(podName, nodeName, volumeName)
// Assert
if podErr2 != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podErr2)
}
podExists, podExistsErr := vc.PodExists(podName, nodeName, volumeName)
if podExistsErr != nil {
t.Fatalf("PodExists failed. Expected: <no error> Actual: <%v>", podExistsErr)
}
if !podExists {
t.Fatalf("Added pod %q does not exist, it should.", podName)
}
}
func Test_AddPod_Negative_NewPodNodeDoesntExistsVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
podName := "pod-name"
vc.AddVolume(volumeName)
// Act
podErr := vc.AddPod(podName, nodeName, volumeName)
// Assert
if podErr == nil {
t.Fatalf("AddPod did not fail. Expected: <\"failed to add pod...no node with that name exists in the list of attached nodes for that volume\"> Actual: <no error>")
}
podExists, podExistsErr := vc.PodExists(podName, nodeName, volumeName)
if podExistsErr == nil {
t.Fatalf("PodExists did not fail. Expected: <\"failed to check if pod exists...no node with that name exists in the list of attached nodes for that volume\"> Actual: <no error>")
}
if podExists {
t.Fatalf("Added pod %q exists, it should not.", podName)
}
}
func Test_AddPod_Negative_NewPodNodeDoesntExistsVolumeDoesntExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
podName := "pod-name"
// Act
podErr := vc.AddPod(podName, nodeName, volumeName)
// Assert
if podErr == nil {
t.Fatalf("AddPod did not fail. Expected: <\"failed to add pod...no volume with that name exists in the list of managed volumes\"> Actual: <no error>")
}
podExists, podExistsErr := vc.PodExists(podName, nodeName, volumeName)
if podExistsErr == nil {
t.Fatalf("PodExists did not fail. Expected: <\"failed to check if node...no volume with that name exists in the list of managed volumes\"> Actual: <no error>")
}
if podExists {
t.Fatalf("Added pod %q exists, it should not.", podName)
}
}
func Test_VolumeExists_Positive_NonExistantVolume(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
notAddedVolumeName := "volume-not-added-name"
// Act
notAddedVolumeExists := vc.VolumeExists(notAddedVolumeName)
// Assert
if notAddedVolumeExists {
t.Fatalf("Not added volume %q exists, it should not.", notAddedVolumeName)
}
}
func Test_NodeExists_Positive_NonExistantNodeVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
notAddedNodeName := "node-not-added-name"
vc.AddVolume(volumeName)
// Act
notAddedNodeExists, notAddedNodeExistsErr := vc.NodeExists(notAddedNodeName, volumeName)
// Assert
if notAddedNodeExistsErr != nil {
t.Fatalf("NodeExists failed. Expected: <no error> Actual: <%v>", notAddedNodeExistsErr)
}
if notAddedNodeExists {
t.Fatalf("Not added node %q exists, it should not.", notAddedNodeName)
}
}
func Test_NodeExists_Negative_NonExistantNodeVolumeDoesntExist(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
notAddedNodeName := "node-not-added-name"
// Act
notAddedNodeExists, notAddedNodeExistsErr := vc.NodeExists(notAddedNodeName, volumeName)
// Assert
if notAddedNodeExistsErr == nil {
t.Fatalf("NodeExists did not fail. Expected: <failed to check if node...no volume with that name exists in the list of managed volumes> Actual: <no error>")
}
if notAddedNodeExists {
t.Fatalf("Added node %q exists, it should not.", notAddedNodeName)
}
}
func Test_PodExists_Positive_NonExistantPodNodeExistsVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
notAddedPodName := "pod-not-added-name"
vc.AddVolume(volumeName)
addNodeErr := vc.AddNode(nodeName, volumeName)
if addNodeErr != nil {
t.Fatalf("AddNode for node %q failed. Expected: <no error> Actual: <%v>", nodeName, addNodeErr)
}
// Act
notAddedPodExists, notAddedPodExistsErr := vc.PodExists(notAddedPodName, nodeName, volumeName)
// Assert
if notAddedPodExistsErr != nil {
t.Fatalf("PodExists failed. Expected: <no error> Actual: <%v>", notAddedPodExistsErr)
}
if notAddedPodExists {
t.Fatalf("Not added pod %q exists, it should not.", notAddedPodName)
}
}
func Test_PodExists_Negative_NonExistantPodNodeDoesntExistVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
notAddedPodName := "pod-not-added-name"
vc.AddVolume(volumeName)
// Act
notAddedPodExists, notAddedPodExistsErr := vc.PodExists(notAddedPodName, nodeName, volumeName)
// Assert
if notAddedPodExistsErr == nil {
t.Fatalf("PodExists did not fail. Expected: <\"failed to check if pod exists...no node with that name exists in the list of attached nodes for that volume\"> Actual: <no error>")
}
if notAddedPodExists {
t.Fatalf("Added pod %q exists, it should not.", notAddedPodName)
}
}
func Test_PodExists_Negative_NonExistantPodNodeDoesntExistVolumeDoesntExist(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
notAddedPodName := "pod-not-added-name"
// Act
notAddedPodExists, notAddedPodExistsErr := vc.PodExists(notAddedPodName, nodeName, volumeName)
// Assert
if notAddedPodExistsErr == nil {
t.Fatalf("PodExists did not fail. Expected: <\"failed to check if node...no volume with that name exists in the list of managed volumes\"> Actual: <no error>")
}
if notAddedPodExists {
t.Fatalf("Added pod %q exists, it should not.", notAddedPodName)
}
}
func Test_DeleteVolume_Positive_VolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
vc.AddVolume(volumeName)
// Act
deleteVolumeErr := vc.DeleteVolume(volumeName)
// Assert
if deleteVolumeErr != nil {
t.Fatalf("DeleteVolume failed. Expected: <no error> Actual: <%v>", deleteVolumeErr)
}
volumeExists := vc.VolumeExists(volumeName)
if volumeExists {
t.Fatalf("Deleted volume %q still exists, it should not.", volumeName)
}
}
func Test_DeleteVolume_Negative_VolumeDoesntExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
notAddedVolumeName := "volume-not-added-name"
// Act
deleteVolumeErr := vc.DeleteVolume(notAddedVolumeName)
// Assert
if deleteVolumeErr == nil {
t.Fatalf("DeleteVolume did not fail. Expected: <\"failed to delete volume...no volume with that name exists in the list of managed volumes\"> Actual: <no error>")
}
notAddedVolumeExists := vc.VolumeExists(notAddedVolumeName)
if notAddedVolumeExists {
t.Fatalf("Not added volume %q exists, it should not.", notAddedVolumeName)
}
}
func Test_DeleteNode_Positive_NodeExistsVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
vc.AddVolume(volumeName)
nodeErr := vc.AddNode(nodeName, volumeName)
if nodeErr != nil {
t.Fatalf("AddNode failed. Expected: <no error> Actual: <%v>", nodeErr)
}
// Act
deleteNodeErr := vc.DeleteNode(nodeName, volumeName)
// Assert
if deleteNodeErr != nil {
t.Fatalf("DeleteNode failed. Expected: <no error> Actual: <%v>", deleteNodeErr)
}
nodeExists, nodeExistsErr := vc.NodeExists(nodeName, volumeName)
if nodeExistsErr != nil {
t.Fatalf("NodeExists failed. Expected: <no error> Actual: <%v>", nodeExistsErr)
}
if nodeExists {
t.Fatalf("Deleted node %q still exists, it should not.", nodeName)
}
}
func Test_DeleteNode_Negative_NodeDoesntExistVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
notAddedNodeName := "node-not-added-name"
vc.AddVolume(volumeName)
// Act
deleteNodeErr := vc.DeleteNode(notAddedNodeName, volumeName)
// Assert
if deleteNodeErr == nil {
t.Fatalf("DeleteNode did not fail. Expected: <\"failed to delete node...no node with the that name exists in the list of attached nodes for that volume\"> Actual: <no error>")
}
notAddedNodeExists, notAddedNodeExistsErr := vc.NodeExists(notAddedNodeName, volumeName)
if notAddedNodeExistsErr != nil {
t.Fatalf("NodeExists failed. Expected: <no error> Actual: <%v>", notAddedNodeExistsErr)
}
if notAddedNodeExists {
t.Fatalf("Not added node %q exists, it should not.", notAddedNodeName)
}
}
func Test_DeleteNode_Negative_NodeDoesntExistVolumeDoesntExist(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
notAddedNodeName := "node-not-added-name"
// Act
deleteNodeErr := vc.DeleteNode(notAddedNodeName, volumeName)
// Assert
if deleteNodeErr == nil {
t.Fatalf("DeleteNode did not fail. Expected: <\"failed to delete node...no volume with that name exists in the list of managed volumes\"> Actual: <no error>")
}
notAddedNodeExists, notAddedNodeExistsErr := vc.NodeExists(notAddedNodeName, volumeName)
if notAddedNodeExistsErr == nil {
t.Fatalf("NodeExists did not fail. Expected: <\failed to check if node...no volume with that name exists in the list of managed volumes\"> Actual: <no error>")
}
if notAddedNodeExists {
t.Fatalf("Not added node %q exists, it should not.", notAddedNodeName)
}
}
func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
podName := "pod-name"
vc.AddVolume(volumeName)
nodeErr := vc.AddNode(nodeName, volumeName)
if nodeErr != nil {
t.Fatalf("AddNode failed. Expected: <no error> Actual: <%v>", nodeErr)
}
podErr := vc.AddPod(podName, nodeName, volumeName)
if podErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podErr)
}
// Act
deletePodErr := vc.DeletePod(podName, nodeName, volumeName)
// Assert
if deletePodErr != nil {
t.Fatalf("DeletePod failed. Expected: <no error> Actual: <%v>", podName)
}
podExists, podExistsErr := vc.PodExists(podName, nodeName, volumeName)
if podExistsErr != nil {
t.Fatalf("PodExists failed. Expected: <no error> Actual: <%v>", podExistsErr)
}
if podExists {
t.Fatalf("Deleted pod %q still exists, it should not.", podName)
}
}
func Test_DeletePod_Positive_PodDoesntExistNodeExistsVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
podName := "pod-name"
vc.AddVolume(volumeName)
nodeErr := vc.AddNode(nodeName, volumeName)
if nodeErr != nil {
t.Fatalf("AddNode failed. Expected: <no error> Actual: <%v>", nodeErr)
}
// Act
deletePodErr := vc.DeletePod(podName, nodeName, volumeName)
// Assert
if deletePodErr == nil {
t.Fatalf("DeletePod did not fail. Expected: <\"failed to delete pod...no pod with that name exists in the list of scheduled pods under that node/volume\"> Actual: <no error>")
}
podExists, podExistsErr := vc.PodExists(podName, nodeName, volumeName)
if podExistsErr != nil {
t.Fatalf("PodExists failed. Expected: <no error> Actual: <%v>", podExistsErr)
}
if podExists {
t.Fatalf("Deleted pod %q still exists, it should not.", podName)
}
}
func Test_DeletePod_Positive_PodDoesntExistNodeDoesntExistVolumeExists(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
podName := "pod-name"
vc.AddVolume(volumeName)
// Act
deletePodErr := vc.DeletePod(podName, nodeName, volumeName)
// Assert
if deletePodErr == nil {
t.Fatalf("DeletePod did not fail. Expected: <\"failed to delete pod...no node with that name exists in the list of attached nodes for that volume\"> Actual: <no error>")
}
podExists, podExistsErr := vc.PodExists(podName, nodeName, volumeName)
if podExistsErr == nil {
t.Fatalf("PodExists did not fail. Expected: <\failed to check if pod...no node with that name exists in the list of attached nodes for that volume\"> Actual: <no error>")
}
if podExists {
t.Fatalf("Deleted pod %q still exists, it should not.", podName)
}
}
func Test_DeletePod_Positive_PodDoesntExistNodeDoesntExistVolumeDoesntExist(t *testing.T) {
// Arrange
vc := NewAttachDetachVolumeCache()
volumeName := "volume-name"
nodeName := "node-name"
podName := "pod-name"
// Act
deletePodErr := vc.DeletePod(podName, nodeName, volumeName)
// Assert
if deletePodErr == nil {
t.Fatalf("DeletePod did not fail. Expected: <\"failed to delete pod...no volume with that name exists in the list of managed volumes\"> Actual: <no error>")
}
podExists, podExistsErr := vc.PodExists(podName, nodeName, volumeName)
if podExistsErr == nil {
t.Fatalf("PodExists did not fail. Expected: <\failed to check if pod...no volume with that name exists in the list of managed volumes\"> Actual: <no error>")
}
if podExists {
t.Fatalf("Deleted pod %q still exists, it should not.", podName)
}
}
/*
t.Fatalf("%q", notAddedNodeExistsErr)
*/

View File

@ -0,0 +1,301 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them.
*/
package cache
import (
"fmt"
"sync"
"k8s.io/kubernetes/pkg/volume"
)
// DesiredStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's desired state of the world cache.
// This cache contains nodes->volumes->pods where nodes are all the nodes
// managed by the attach/detach controller, volumes are all the volumes that
// should be attached to the specified node, and pods are the pods that
// reference the volume and are scheduled to that node.
type DesiredStateOfWorld interface {
// AddNode adds the given node to the list of nodes managed by the attach/
// detach controller.
// If the node already exists this is a no-op.
AddNode(nodeName string)
// AddPod adds the given pod to the list of pods that reference the
// specified volume and is scheduled to the specified node.
// A unique volumeName is generated from the volumeSpec and returned on
// success.
// If the pod already exists under the specified volume, this is a no-op.
// If volumeSpec is not an attachable volume plugin, an error is returned.
// If no volume with the name volumeName exists in the list of volumes that
// should be attached to the specified node, the volume is implicitly added.
// If no node with the name nodeName exists in list of nodes managed by the
// attach/detach attached controller, an error is returned.
AddPod(podName string, volumeSpec *volume.Spec, nodeName string) (string, error)
// DeleteNode removes the given node from the list of nodes managed by the
// attach/detach controller.
// If the node does not exist this is a no-op.
// If the node exists but has 1 or more child volumes, an error is returned.
DeleteNode(nodeName string) error
// DeletePod removes the given pod from the list of pods that reference the
// specified volume and are scheduled to the specified node.
// If no pod exists in the list of pods that reference the specified volume
// and are scheduled to the specified node, this is a no-op.
// If a node with the name nodeName does not exist in the list of nodes
// managed by the attach/detach attached controller, this is a no-op.
// If no volume with the name volumeName exists in the list of managed
// volumes under the specified node, this is a no-op.
// If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted.
DeletePod(podName, volumeName, nodeName string)
// NodeExists returns true if the node with the specified name exists in
// the list of nodes managed by the attach/detach controller.
NodeExists(nodeName string) bool
// VolumeExists returns true if the volume with the specified name exists
// in the list of volumes that should be attached to the specified node by
// the attach detach controller.
VolumeExists(volumeName, nodeName string) bool
// GetVolumesToAttach generates and returns a list of volumes to attach
// and the nodes they should be attached to based on the current desired
// state of the world.
GetVolumesToAttach() []VolumeToAttach
}
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
// VolumeName is the unique identifier for the volume that should be
// attached.
VolumeName string
// VolumeSpec is a volume spec containing the specification for the volume
// that should be attached.
VolumeSpec *volume.Spec
// NodeName is the identifier for the node that the volume should be
// attached to.
NodeName string
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
return &desiredStateOfWorld{
nodesManaged: make(map[string]nodeManaged),
volumePluginMgr: volumePluginMgr,
}
}
type desiredStateOfWorld struct {
// nodesManaged is a map containing the set of nodes managed by the attach/
// detach controller. The key in this map is the name of the node and the
// value is a node object containing more information about the node.
nodesManaged map[string]nodeManaged
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// nodeManaged represents a node that is being managed by the attach/detach
// controller.
type nodeManaged struct {
// nodName contains the name of this node.
nodeName string
// volumesToAttach is a map containing the set of volumes that should be
// attached to this node. The key in the map is the name of the volume and
// the value is a pod object containing more information about the volume.
volumesToAttach map[string]volumeToAttach
}
// The volume object represents a volume that should be attached to a node.
type volumeToAttach struct {
// volumeName contains the unique identifier for this volume.
volumeName string
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
// methods.
spec *volume.Spec
// scheduledPods is a map containing the set of pods that reference this
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
scheduledPods map[string]pod
}
// The pod object represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type pod struct {
// podName contains the name of this pod.
podName string
}
func (dsw *desiredStateOfWorld) AddNode(nodeName string) {
dsw.Lock()
defer dsw.Unlock()
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
dsw.nodesManaged[nodeName] = nodeManaged{
nodeName: nodeName,
volumesToAttach: make(map[string]volumeToAttach),
}
}
}
func (dsw *desiredStateOfWorld) AddPod(podName string, volumeSpec *volume.Spec, nodeName string) (string, error) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return "", fmt.Errorf(
"no node with the name %q exists in the list of managed nodes",
nodeName)
}
attachableVolumePlugin, err := dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err := attachableVolumePlugin.GetUniqueVolumeName(volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeName from AttachablePlugin for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
volumeObj = volumeToAttach{
volumeName: volumeName,
spec: volumeSpec,
scheduledPods: make(map[string]pod),
}
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods[podName] =
pod{
podName: podName,
}
}
return volumeName, nil
}
func (dsw *desiredStateOfWorld) DeleteNode(nodeName string) error {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return nil
}
if len(nodeObj.volumesToAttach) > 0 {
return fmt.Errorf(
"failed to delete node %q from list of nodes managed by attach/detach controller--the node still contains %v volumes in its list of volumes to attach",
nodeName,
len(nodeObj.volumesToAttach))
}
delete(
dsw.nodesManaged,
nodeName)
return nil
}
func (dsw *desiredStateOfWorld) DeletePod(podName, volumeName, nodeName string) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
return
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
return
}
delete(
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods,
podName)
if len(volumeObj.scheduledPods) == 0 {
delete(
dsw.nodesManaged[nodeName].volumesToAttach,
volumeName)
}
}
func (dsw *desiredStateOfWorld) NodeExists(nodeName string) bool {
dsw.RLock()
defer dsw.RUnlock()
_, nodeExists := dsw.nodesManaged[nodeName]
return nodeExists
}
func (dsw *desiredStateOfWorld) VolumeExists(volumeName, nodeName string) bool {
dsw.RLock()
defer dsw.RUnlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if _, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
return true
}
}
return false
}
func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
dsw.RLock()
defer dsw.RUnlock()
volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
volumesToAttach = append(volumesToAttach, VolumeToAttach{NodeName: nodeName, VolumeName: volumeName, VolumeSpec: volumeObj.spec})
}
}
return volumesToAttach
}

View File

@ -0,0 +1,974 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"testing"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
)
func Test_AddNode_Positive_NewNode(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name"
// Act
dsw.AddNode(nodeName)
// Assert
nodeExists := dsw.NodeExists(nodeName)
if !nodeExists {
t.Fatalf("Added node %q does not exist, it should.", nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_AddNode_Positive_ExistingVolume(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name"
dsw.AddNode(nodeName)
// Act
dsw.AddNode(nodeName)
// Assert
nodeExists := dsw.NodeExists(nodeName)
if !nodeExists {
t.Fatalf("Added node %q does not exist, it should.", nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_AddNode_Positive_ExistingNode(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name"
// Act
dsw.AddNode(nodeName)
// Assert
nodeExists := dsw.NodeExists(nodeName)
if !nodeExists {
t.Fatalf("Added node %q does not exist, it should.", nodeName)
}
// Act
dsw.AddNode(nodeName)
// Assert
nodeExists = dsw.NodeExists(nodeName)
if !nodeExists {
t.Fatalf("Added node %q does not exist, it should.", nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
// Act
generatedVolumeName, podErr := dsw.AddPod(podName, volumeSpec, nodeName)
// Assert
if podErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podErr)
}
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if !volumeExists {
t.Fatalf(
"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
podName,
generatedVolumeName,
nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName)
}
func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod1Name := "pod1-name"
pod2Name := "pod2-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
// Act
generatedVolumeName, podErr := dsw.AddPod(pod1Name, volumeSpec, nodeName)
// Assert
if podErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod1Name,
podErr)
}
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if !volumeExists {
t.Fatalf(
"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
pod1Name,
generatedVolumeName,
nodeName)
}
// Act
generatedVolumeName, podErr = dsw.AddPod(pod2Name, volumeSpec, nodeName)
// Assert
if podErr != nil {
t.Fatalf("AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod2Name,
podErr)
}
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if !volumeExists {
t.Fatalf(
"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
pod1Name,
generatedVolumeName,
nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName)
}
func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
// Act
generatedVolumeName, podErr := dsw.AddPod(podName, volumeSpec, nodeName)
// Assert
if podErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
podName,
podErr)
}
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if !volumeExists {
t.Fatalf(
"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
podName,
generatedVolumeName,
nodeName)
}
// Act
generatedVolumeName, podErr = dsw.AddPod(podName, volumeSpec, nodeName)
// Assert
if podErr != nil {
t.Fatalf("AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
podName,
podErr)
}
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if !volumeExists {
t.Fatalf(
"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
podName,
generatedVolumeName,
nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName)
}
func Test_AddPod_Negative_NewPodNodeDoesntExistVolumeDoesntExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
// Act
_, podErr := dsw.AddPod(podName, volumeSpec, nodeName)
// Assert
if podErr == nil {
t.Fatalf("AddPod did not fail. Expected: <\"failed to add pod...no node with that name exists in the list of managed nodes\"> Actual: <no error>")
}
volumeExists = dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_DeleteNode_Positive_NodeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name"
dsw.AddNode(nodeName)
// Act
err := dsw.DeleteNode(nodeName)
// Assert
if err != nil {
t.Fatalf("DeleteNode failed. Expected: <no error> Actual: <%v>", err)
}
nodeExists := dsw.NodeExists(nodeName)
if nodeExists {
t.Fatalf("Deleted node %q still exists, it should not.", nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_DeleteNode_Positive_NodeDoesntExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
notAddedNodeName := "node-not-added-name"
// Act
err := dsw.DeleteNode(notAddedNodeName)
// Assert
if err != nil {
t.Fatalf("DeleteNode failed. Expected: <no error> Actual: <%v>", err)
}
nodeExists := dsw.NodeExists(notAddedNodeName)
if nodeExists {
t.Fatalf("Deleted node %q still exists, it should not.", notAddedNodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name"
dsw.AddNode(nodeName)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
podName,
podAddErr)
}
// Act
err := dsw.DeleteNode(nodeName)
// Assert
if err == nil {
t.Fatalf("DeleteNode did not fail. Expected: <\"\"> Actual: <no error>")
}
nodeExists := dsw.NodeExists(nodeName)
if !nodeExists {
t.Fatalf("Node %q no longer exists, it should.", nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName)
}
func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
dsw.AddNode(nodeName)
generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
podName,
podAddErr)
}
volumeExists := dsw.VolumeExists(generatedVolumeName, nodeName)
if !volumeExists {
t.Fatalf(
"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
podName,
generatedVolumeName,
nodeName)
}
// Act
dsw.DeletePod(podName, generatedVolumeName, nodeName)
// Assert
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if volumeExists {
t.Fatalf(
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
podName,
generatedVolumeName,
nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod1Name := "pod1-name"
pod2Name := "pod2-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
dsw.AddNode(nodeName)
generatedVolumeName1, pod1AddErr := dsw.AddPod(pod1Name, volumeSpec, nodeName)
if pod1AddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod1Name,
pod1AddErr)
}
generatedVolumeName2, pod2AddErr := dsw.AddPod(pod2Name, volumeSpec, nodeName)
if pod2AddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod2Name,
pod2AddErr)
}
if generatedVolumeName1 != generatedVolumeName2 {
t.Fatalf(
"Generated volume names for the same volume should be the same but they are not: %q and %q",
generatedVolumeName1,
generatedVolumeName2)
}
volumeExists := dsw.VolumeExists(generatedVolumeName1, nodeName)
if !volumeExists {
t.Fatalf(
"Volume %q does not exist under node %q, it should.",
generatedVolumeName1,
nodeName)
}
// Act
dsw.DeletePod(pod1Name, generatedVolumeName1, nodeName)
// Assert
volumeExists = dsw.VolumeExists(generatedVolumeName1, nodeName)
if !volumeExists {
t.Fatalf(
"Volume %q under node %q should still exist, but it does not.",
generatedVolumeName1,
nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName1, volumeName)
}
func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod1Name := "pod1-name"
pod2Name := "pod2-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
dsw.AddNode(nodeName)
generatedVolumeName, pod1AddErr := dsw.AddPod(pod1Name, volumeSpec, nodeName)
if pod1AddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod1Name,
pod1AddErr)
}
volumeExists := dsw.VolumeExists(generatedVolumeName, nodeName)
if !volumeExists {
t.Fatalf(
"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
pod1Name,
generatedVolumeName,
nodeName)
}
// Act
dsw.DeletePod(pod2Name, generatedVolumeName, nodeName)
// Assert
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if !volumeExists {
t.Fatalf(
"Volume %q/node %q does not exist, it should.",
generatedVolumeName,
nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName)
}
func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
node1Name := "node1-name"
dsw.AddNode(node1Name)
generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, node1Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
podName,
podAddErr)
}
volumeExists := dsw.VolumeExists(generatedVolumeName, node1Name)
if !volumeExists {
t.Fatalf(
"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
podName,
generatedVolumeName,
node1Name)
}
node2Name := "node2-name"
// Act
dsw.DeletePod(podName, generatedVolumeName, node2Name)
// Assert
volumeExists = dsw.VolumeExists(generatedVolumeName, node1Name)
if !volumeExists {
t.Fatalf(
"Volume %q/node %q does not exist, it should.",
generatedVolumeName,
node1Name)
}
volumeExists = dsw.VolumeExists(generatedVolumeName, node2Name)
if volumeExists {
t.Fatalf(
"node %q exists, it should not.",
node2Name)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolumeName, volumeName)
}
func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-name"
volume1Name := "volume1-name"
volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name)
nodeName := "node-name"
dsw.AddNode(nodeName)
generatedVolume1Name, podAddErr := dsw.AddPod(podName, volume1Spec, nodeName)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
podName,
podAddErr)
}
volumeExists := dsw.VolumeExists(generatedVolume1Name, nodeName)
if !volumeExists {
t.Fatalf(
"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
podName,
generatedVolume1Name,
nodeName)
}
volume2Name := "volume2-name"
// Act
dsw.DeletePod(podName, volume2Name, nodeName)
// Assert
volumeExists = dsw.VolumeExists(generatedVolume1Name, nodeName)
if !volumeExists {
t.Fatalf(
"Volume %q/node %q does not exist, it should.",
generatedVolume1Name,
nodeName)
}
volumeExists = dsw.VolumeExists(volume2Name, nodeName)
if volumeExists {
t.Fatalf(
"volume %q exists, it should not.",
volume2Name)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolume1Name, volume1Name)
}
func Test_NodeExists_Positive_NodeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
notAddedNodeName := "node-not-added-name"
// Act
notAddedNodeExists := dsw.NodeExists(notAddedNodeName)
// Assert
if notAddedNodeExists {
t.Fatalf("Node %q exists, it should not.", notAddedNodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_NodeExists_Positive_NodeDoesntExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name"
dsw.AddNode(nodeName)
// Act
nodeExists := dsw.NodeExists(nodeName)
// Assert
if !nodeExists {
t.Fatalf("Node %q does not exist, it should.", nodeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name"
dsw.AddNode(nodeName)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
generatedVolumeName, _ := dsw.AddPod(podName, volumeSpec, nodeName)
// Act
volumeExists := dsw.VolumeExists(generatedVolumeName, nodeName)
// Assert
if !volumeExists {
t.Fatalf("Volume %q does not exist, it should.", generatedVolumeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName)
}
func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name"
dsw.AddNode(nodeName)
podName := "pod-name"
volume1Name := "volume1-name"
volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name)
generatedVolume1Name, podAddErr := dsw.AddPod(podName, volume1Spec, nodeName)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
podName,
podAddErr)
}
volume2Name := "volume2-name"
// Act
volumeExists := dsw.VolumeExists(volume2Name, nodeName)
// Assert
if volumeExists {
t.Fatalf("Volume %q exists, it should not.", volume2Name)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 1 {
t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolume1Name, volume1Name)
}
func Test_VolumeExists_Positive_VolumeDoesntExistNodeDoesntExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := "node-name"
volumeName := "volume-name"
// Act
volumeExists := dsw.VolumeExists(volumeName, nodeName)
// Assert
if volumeExists {
t.Fatalf("Volume %q exists, it should not.", volumeName)
}
volumesToAttach := dsw.GetVolumesToAttach()
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_GetVolumesToAttach_Positive_NoNodes(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
// Act
volumesToAttach := dsw.GetVolumesToAttach()
// Assert
if len(volumesToAttach) > 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_GetVolumesToAttach_Positive_TwoNodes(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := "node1-name"
node2Name := "node2-name"
dsw.AddNode(node1Name)
dsw.AddNode(node2Name)
// Act
volumesToAttach := dsw.GetVolumesToAttach()
// Assert
if len(volumesToAttach) != 0 {
t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
}
}
func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := "node1-name"
pod1Name := "pod1-name"
volume1Name := "volume1-name"
volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name)
dsw.AddNode(node1Name)
generatedVolume1Name, podAddErr := dsw.AddPod(pod1Name, volume1Spec, node1Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod1Name,
podAddErr)
}
node2Name := "node2-name"
pod2Name := "pod2-name"
volume2Name := "volume2-name"
volume2Spec := controllervolumetesting.GetTestVolumeSpec(volume2Name, volume2Name)
dsw.AddNode(node2Name)
generatedVolume2Name, podAddErr := dsw.AddPod(pod2Name, volume2Spec, node2Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod2Name,
podAddErr)
}
// Act
volumesToAttach := dsw.GetVolumesToAttach()
// Assert
if len(volumesToAttach) != 2 {
t.Fatalf("len(volumesToAttach) Expected: <2> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume1Name, volume1Name)
verifyVolumeToAttach(t, volumesToAttach, node2Name, generatedVolume2Name, volume2Name)
}
func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := "node1-name"
pod1Name := "pod1-name"
volume1Name := "volume1-name"
volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name)
dsw.AddNode(node1Name)
generatedVolume1Name, podAddErr := dsw.AddPod(pod1Name, volume1Spec, node1Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod1Name,
podAddErr)
}
node2Name := "node2-name"
pod2Name := "pod2-name"
volume2Name := "volume2-name"
volume2Spec := controllervolumetesting.GetTestVolumeSpec(volume2Name, volume2Name)
dsw.AddNode(node2Name)
generatedVolume2Name, podAddErr := dsw.AddPod(pod2Name, volume2Spec, node2Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod2Name,
podAddErr)
}
pod3Name := "pod3-name"
dsw.AddPod(pod3Name, volume2Spec, node2Name)
_, podAddErr = dsw.AddPod(pod3Name, volume2Spec, node2Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod3Name,
podAddErr)
}
// Act
volumesToAttach := dsw.GetVolumesToAttach()
// Assert
if len(volumesToAttach) != 2 {
t.Fatalf("len(volumesToAttach) Expected: <2> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume1Name, volume1Name)
verifyVolumeToAttach(t, volumesToAttach, node2Name, generatedVolume2Name, volume2Name)
}
func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
// Arrange
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := "node1-name"
pod1Name := "pod1-name"
volume1Name := "volume1-name"
volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name)
dsw.AddNode(node1Name)
generatedVolume1Name, podAddErr := dsw.AddPod(pod1Name, volume1Spec, node1Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod1Name,
podAddErr)
}
node2Name := "node2-name"
pod2aName := "pod2a-name"
volume2Name := "volume2-name"
volume2Spec := controllervolumetesting.GetTestVolumeSpec(volume2Name, volume2Name)
dsw.AddNode(node2Name)
generatedVolume2Name1, podAddErr := dsw.AddPod(pod2aName, volume2Spec, node2Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod2aName,
podAddErr)
}
pod2bName := "pod2b-name"
generatedVolume2Name2, podAddErr := dsw.AddPod(pod2bName, volume2Spec, node2Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod2bName,
podAddErr)
}
if generatedVolume2Name1 != generatedVolume2Name2 {
t.Fatalf(
"Generated volume names for the same volume should be the same but they are not: %q and %q",
generatedVolume2Name1,
generatedVolume2Name2)
}
pod3Name := "pod3-name"
volume3Name := "volume3-name"
volume3Spec := controllervolumetesting.GetTestVolumeSpec(volume3Name, volume3Name)
generatedVolume3Name, podAddErr := dsw.AddPod(pod3Name, volume3Spec, node1Name)
if podAddErr != nil {
t.Fatalf(
"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
pod3Name,
podAddErr)
}
// Act
volumesToAttach := dsw.GetVolumesToAttach()
// Assert
if len(volumesToAttach) != 3 {
t.Fatalf("len(volumesToAttach) Expected: <3> Actual: <%v>", len(volumesToAttach))
}
verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume1Name, volume1Name)
verifyVolumeToAttach(t, volumesToAttach, node2Name, generatedVolume2Name1, volume2Name)
verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume3Name, volume3Name)
}
func verifyVolumeToAttach(
t *testing.T,
volumesToAttach []VolumeToAttach,
expectedNodeName,
expectedVolumeName,
expectedVolumeSpecName string) {
for _, volumeToAttach := range volumesToAttach {
if volumeToAttach.NodeName == expectedNodeName &&
volumeToAttach.VolumeName == expectedVolumeName &&
volumeToAttach.VolumeSpec.Name() == expectedVolumeSpecName {
return
}
}
t.Fatalf("volumesToAttach (%v) should contain %q/%q. It does not.", volumesToAttach, expectedVolumeName, expectedNodeName)
}

View File

@ -0,0 +1,118 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconciler implements interfaces that attempt to reconcile the
// desired state of the with the actual state of the world by triggering
// actions.
package reconciler
import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/controller/volume/attacherdetacher"
"k8s.io/kubernetes/pkg/controller/volume/cache"
"k8s.io/kubernetes/pkg/util/wait"
)
// Reconciler runs a periodic loop to reconcile the desired state of the with
// the actual state of the world by triggering attach detach operations.
type Reconciler interface {
// Starts running the reconcilation loop which executes periodically, checks
// if volumes that should be attached are attached and volumes that should
// be detached are detached. If not, it will trigger attach/detach
// operations to rectify.
Run(stopCh <-chan struct{})
}
// NewReconciler returns a new instance of Reconciler that waits loopPeriod
// between successive executions.
// loopPeriod is the ammount of time the reconciler loop waits between
// successive executions.
// maxSafeToDetachDuration is the max ammount of time the reconciler will wait
// for the volume to deatch, after this it will detach the volume anyway
// assuming the node is unavilable. If during this time the volume becomes used
// by a new pod, the detach request will be aborted and the timer cleared.
func NewReconciler(
loopPeriod time.Duration,
maxSafeToDetachDuration time.Duration,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
attacherDetacher attacherdetacher.AttacherDetacher) Reconciler {
return &reconciler{
loopPeriod: loopPeriod,
maxSafeToDetachDuration: maxSafeToDetachDuration,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
attacherDetacher: attacherDetacher,
}
}
type reconciler struct {
loopPeriod time.Duration
maxSafeToDetachDuration time.Duration
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
attacherDetacher attacherdetacher.AttacherDetacher
}
func (rc *reconciler) Run(stopCh <-chan struct{}) {
wait.Until(rc.reconciliationLoopFunc(), rc.loopPeriod, stopCh)
}
func (rc *reconciler) reconciliationLoopFunc() func() {
return func() {
// Ensure volumes that should be attached are attached.
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
if rc.actualStateOfWorld.VolumeNodeExists(
volumeToAttach.VolumeName, volumeToAttach.NodeName) {
// Volume/Node exists, touch it to reset "safe to detach"
glog.V(12).Infof("Volume %q/Node %q is attached--touching.", volumeToAttach.VolumeName, volumeToAttach.NodeName)
_, err := rc.actualStateOfWorld.AddVolumeNode(
volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
if err != nil {
glog.Errorf("Unexpected error on actualStateOfWorld.AddVolumeNode(): %v", err)
}
} else {
// Volume/Node doesn't exist, spawn a goroutine to attach it
glog.V(5).Infof("Triggering AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
rc.attacherDetacher.AttachVolume(&volumeToAttach, rc.actualStateOfWorld)
}
}
// Ensure volumes that should be detached are detached.
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
if !rc.desiredStateOfWorld.VolumeExists(
attachedVolume.VolumeName, attachedVolume.NodeName) {
// Volume exists in actual state of world but not desired
if attachedVolume.SafeToDetach {
glog.V(5).Infof("Triggering DetachVolume for volume %q to node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
rc.attacherDetacher.DetachVolume(&attachedVolume, rc.actualStateOfWorld)
} else {
// If volume is not safe to detach wait a max amount of time before detaching any way.
timeElapsed, err := rc.actualStateOfWorld.MarkDesireToDetach(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil {
glog.Errorf("Unexpected error actualStateOfWorld.MarkDesireToDetach(): %v", err)
}
if timeElapsed > rc.maxSafeToDetachDuration {
glog.V(5).Infof("Triggering DetachVolume for volume %q to node %q. Volume is not safe to detach, but max wait time expired.", attachedVolume.VolumeName, attachedVolume.NodeName)
rc.attacherDetacher.DetachVolume(&attachedVolume, rc.actualStateOfWorld)
}
}
}
}
}
}

View File

@ -0,0 +1,365 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciler
import (
"testing"
"time"
"k8s.io/kubernetes/pkg/controller/volume/attacherdetacher"
"k8s.io/kubernetes/pkg/controller/volume/cache"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
"k8s.io/kubernetes/pkg/util/wait"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
)
const (
reconcilerLoopPeriod time.Duration = 0 * time.Millisecond
maxSafeToDetachDuration time.Duration = 50 * time.Millisecond
)
func Test_Run_Positive_DoNothing(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(volumePluginMgr)
ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr)
reconciler := NewReconciler(
reconcilerLoopPeriod, maxSafeToDetachDuration, dsw, asw, ad)
// Act
go reconciler.Run(wait.NeverStop)
// Assert
waitForNewAttacherCallCount(t, 0 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, true /* expectZeroNewAttacherCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 0 /* expectedAttachCallCount */, fakePlugin)
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
}
func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(volumePluginMgr)
ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr)
reconciler := NewReconciler(
reconcilerLoopPeriod, maxSafeToDetachDuration, dsw, asw, ad)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
_, podErr := dsw.AddPod(podName, volumeSpec, nodeName)
if podErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podErr)
}
// Act
go reconciler.Run(wait.NeverStop)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
}
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMarkVolume(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(volumePluginMgr)
ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr)
reconciler := NewReconciler(
reconcilerLoopPeriod, maxSafeToDetachDuration, dsw, asw, ad)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName)
if podAddErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
}
// Act
go reconciler.Run(wait.NeverStop)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
// Act
dsw.DeletePod(podName, generatedVolumeName, nodeName)
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if volumeExists {
t.Fatalf(
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
podName,
generatedVolumeName,
nodeName)
}
asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName)
// Assert -- Marked SafeToDetach
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin)
}
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithoutMarkVolume(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t))
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(volumePluginMgr)
ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr)
reconciler := NewReconciler(
reconcilerLoopPeriod, maxSafeToDetachDuration, dsw, asw, ad)
podName := "pod-name"
volumeName := "volume-name"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName)
nodeName := "node-name"
dsw.AddNode(nodeName)
volumeExists := dsw.VolumeExists(volumeName, nodeName)
if volumeExists {
t.Fatalf(
"Volume %q/node %q should not exist, but it does.",
volumeName,
nodeName)
}
generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName)
if podAddErr != nil {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
}
// Act
go reconciler.Run(wait.NeverStop)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
// Act
dsw.DeletePod(podName, generatedVolumeName, nodeName)
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
if volumeExists {
t.Fatalf(
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
podName,
generatedVolumeName,
nodeName)
}
// Assert -- Timer will triger detach
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
waitForDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin)
}
func waitForNewAttacherCallCount(
t *testing.T,
expectedCallCount int,
fakePlugin *volumetesting.FakeVolumePlugin) {
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
actualCallCount := fakePlugin.GetNewAttacherCallCount()
if actualCallCount >= expectedCallCount {
return true, nil
}
t.Logf(
"Warning: Wrong NewAttacherCallCount. Expected: <%v> Actual: <%v>. Will retry.",
expectedCallCount,
actualCallCount)
return false, nil
},
)
if err != nil {
t.Fatalf(
"Timed out waiting for NewAttacherCallCount. Expected: <%v> Actual: <%v>",
expectedCallCount,
fakePlugin.GetNewAttacherCallCount())
}
}
func waitForNewDetacherCallCount(
t *testing.T,
expectedCallCount int,
fakePlugin *volumetesting.FakeVolumePlugin) {
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
actualCallCount := fakePlugin.GetNewDetacherCallCount()
if actualCallCount >= expectedCallCount {
return true, nil
}
t.Logf(
"Warning: Wrong NewDetacherCallCount. Expected: <%v> Actual: <%v>. Will retry.",
expectedCallCount,
actualCallCount)
return false, nil
},
)
if err != nil {
t.Fatalf(
"Timed out waiting for NewDetacherCallCount. Expected: <%v> Actual: <%v>",
expectedCallCount,
fakePlugin.GetNewDetacherCallCount())
}
}
func waitForAttachCallCount(
t *testing.T,
expectedAttachCallCount int,
fakePlugin *volumetesting.FakeVolumePlugin) {
if len(fakePlugin.Attachers) == 0 && expectedAttachCallCount == 0 {
return
}
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
for i, attacher := range fakePlugin.Attachers {
actualCallCount := attacher.GetAttachCallCount()
if actualCallCount == expectedAttachCallCount {
return true, nil
}
t.Logf(
"Warning: Wrong attacher[%v].GetAttachCallCount(). Expected: <%v> Actual: <%v>. Will try next attacher.",
i,
expectedAttachCallCount,
actualCallCount)
}
t.Logf(
"Warning: No attachers have expected AttachCallCount. Expected: <%v>. Will retry.",
expectedAttachCallCount)
return false, nil
},
)
if err != nil {
t.Fatalf(
"No attachers have expected AttachCallCount. Expected: <%v>",
expectedAttachCallCount)
}
}
func waitForDetachCallCount(
t *testing.T,
expectedDetachCallCount int,
fakePlugin *volumetesting.FakeVolumePlugin) {
if len(fakePlugin.Detachers) == 0 && expectedDetachCallCount == 0 {
return
}
err := retryWithExponentialBackOff(
time.Duration(5*time.Millisecond),
func() (bool, error) {
for i, detacher := range fakePlugin.Detachers {
actualCallCount := detacher.GetDetachCallCount()
if actualCallCount == expectedDetachCallCount {
return true, nil
}
t.Logf(
"Wrong detacher[%v].GetDetachCallCount(). Expected: <%v> Actual: <%v>. Will try next detacher.",
i,
expectedDetachCallCount,
actualCallCount)
}
t.Logf(
"Warning: No detachers have expected DetachCallCount. Expected: <%v>. Will retry.",
expectedDetachCallCount)
return false, nil
},
)
if err != nil {
t.Fatalf(
"No detachers have expected DetachCallCount. Expected: <%v>",
expectedDetachCallCount)
}
}
func verifyNewAttacherCallCount(
t *testing.T,
expectZeroNewAttacherCallCount bool,
fakePlugin *volumetesting.FakeVolumePlugin) {
if expectZeroNewAttacherCallCount &&
fakePlugin.GetNewAttacherCallCount() != 0 {
t.Fatalf(
"Wrong NewAttacherCallCount. Expected: <0> Actual: <%v>",
fakePlugin.GetNewAttacherCallCount())
}
}
func verifyNewDetacherCallCount(
t *testing.T,
expectZeroNewDetacherCallCount bool,
fakePlugin *volumetesting.FakeVolumePlugin) {
if expectZeroNewDetacherCallCount &&
fakePlugin.GetNewDetacherCallCount() != 0 {
t.Fatalf("Wrong NewDetacherCallCount. Expected: <0> Actual: <%v>",
fakePlugin.GetNewDetacherCallCount())
}
}
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
backoff := wait.Backoff{
Duration: initialDuration,
Factor: 3,
Jitter: 0,
Steps: 6,
}
return wait.ExponentialBackoff(backoff, fn)
}
// t.Logf("asw: %v", asw.GetAttachedVolumes())
// t.Logf("dsw: %v", dsw.GetVolumesToAttach())

View File

@ -0,0 +1,102 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
)
// GetTestVolumePluginMgr creates, initializes, and returns a test volume
// plugin manager.
func GetTestVolumePluginMgr(t *testing.T) (*volume.VolumePluginMgr, *volumetesting.FakeVolumePlugin) {
plugins := []volume.VolumePlugin{}
// plugins = append(plugins, aws_ebs.ProbeVolumePlugins()...)
// plugins = append(plugins, gce_pd.ProbeVolumePlugins()...)
// plugins = append(plugins, cinder.ProbeVolumePlugins()...)
volumeTestingPlugins := volumetesting.ProbeVolumePlugins(volume.VolumeConfig{})
plugins = append(plugins, volumeTestingPlugins...)
volumePluginMgr := testVolumePluginMgr{}
if err := volumePluginMgr.InitPlugins(plugins, &volumePluginMgr); err != nil {
t.Fatalf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err)
}
return &volumePluginMgr.VolumePluginMgr, volumeTestingPlugins[0].(*volumetesting.FakeVolumePlugin)
}
type testVolumePluginMgr struct {
volume.VolumePluginMgr
}
// VolumeHost implementation
// This is an unfortunate requirement of the current factoring of volume plugin
// initializing code. It requires kubelet specific methods used by the mounting
// code to be implemented by all initializers even if the initializer does not
// do mounting (like this attach/detach controller).
// Issue kubernetes/kubernetes/issues/14217 to fix this.
func (vpm *testVolumePluginMgr) GetPluginDir(podUID string) string {
return ""
}
func (vpm *testVolumePluginMgr) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
return ""
}
func (vpm *testVolumePluginMgr) GetPodPluginDir(podUID types.UID, pluginName string) string {
return ""
}
func (vpm *testVolumePluginMgr) GetKubeClient() internalclientset.Interface {
return nil
}
func (vpm *testVolumePluginMgr) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return nil, fmt.Errorf("NewWrapperMounter not supported by Attach/Detach controller's VolumeHost implementation")
}
func (vpm *testVolumePluginMgr) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
return nil, fmt.Errorf("NewWrapperUnmounter not supported by Attach/Detach controller's VolumeHost implementation")
}
func (vpm *testVolumePluginMgr) GetCloudProvider() cloudprovider.Interface {
return &fake.FakeCloud{}
}
func (vpm *testVolumePluginMgr) GetMounter() mount.Interface {
return nil
}
func (vpm *testVolumePluginMgr) GetWriter() io.Writer {
return nil
}
func (vpm *testVolumePluginMgr) GetHostName() string {
return ""
}

View File

@ -0,0 +1,38 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/volume"
)
// GetTestVolumeSpec returns a test volume spec
func GetTestVolumeSpec(volumeName, diskName string) *volume.Spec {
return &volume.Spec{
Volume: &api.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "fake",
ReadOnly: false,
},
},
},
}
}

View File

@ -50,6 +50,24 @@ func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
return &gcePersistentDiskAttacher{host: plugin.host}, nil
}
func (plugin *gcePersistentDiskPlugin) GetUniqueVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _ := getVolumeSource(spec)
if volumeSource == nil {
return "", fmt.Errorf("Spec does not reference a GCE volume type")
}
return fmt.Sprintf("%s/%s:%v", gcePersistentDiskPluginName, volumeSource.PDName, volumeSource.ReadOnly), nil
}
func (plugin *gcePersistentDiskPlugin) GetDeviceName(spec *volume.Spec) (string, error) {
volumeSource, _ := getVolumeSource(spec)
if volumeSource == nil {
return "", fmt.Errorf("Spec does not reference a GCE volume type")
}
return volumeSource.PDName, nil
}
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, hostName string) error {
volumeSource, readOnly := getVolumeSource(spec)
pdName := volumeSource.PDName

View File

@ -133,6 +133,18 @@ type AttachableVolumePlugin interface {
VolumePlugin
NewAttacher() (Attacher, error)
NewDetacher() (Detacher, error)
// GetUniqueVolumeName returns a unique name representing the volume
// defined in spec. e.g. pluginname-deviceName-readwrite
// This helps ensures that the same operation (attach/detach) is never
// started on the same volume.
// If the plugin does not support the given spec, this returns an error.
GetUniqueVolumeName(spec *Spec) (string, error)
// GetDeviceName returns the name or ID of the device referenced in the
// specified volume spec. This is passed by callers to the Deatch method.
// If the plugin does not support the given spec, this returns an error.
GetDeviceName(spec *Spec) (string, error)
}
// VolumeHost is an interface that plugins can use to access the kubelet.

View File

@ -23,6 +23,7 @@ import (
"os/exec"
"path"
"strings"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
@ -132,6 +133,7 @@ func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
// Use as:
// volume.RegisterPlugin(&FakePlugin{"fake-name"})
type FakeVolumePlugin struct {
sync.RWMutex
PluginName string
Host VolumeHost
Config VolumeConfig
@ -158,11 +160,15 @@ func (plugin *FakeVolumePlugin) getFakeVolume(list *[]*FakeVolume) *FakeVolume {
}
func (plugin *FakeVolumePlugin) Init(host VolumeHost) error {
plugin.Lock()
defer plugin.Unlock()
plugin.Host = host
return nil
}
func (plugin *FakeVolumePlugin) Name() string {
plugin.RLock()
defer plugin.RUnlock()
return plugin.PluginName
}
@ -172,6 +178,8 @@ func (plugin *FakeVolumePlugin) CanSupport(spec *Spec) bool {
}
func (plugin *FakeVolumePlugin) NewMounter(spec *Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error) {
plugin.Lock()
defer plugin.Unlock()
volume := plugin.getFakeVolume(&plugin.Mounters)
volume.PodUID = pod.UID
volume.VolName = spec.Name()
@ -181,6 +189,8 @@ func (plugin *FakeVolumePlugin) NewMounter(spec *Spec, pod *api.Pod, opts Volume
}
func (plugin *FakeVolumePlugin) NewUnmounter(volName string, podUID types.UID) (Unmounter, error) {
plugin.Lock()
defer plugin.Unlock()
volume := plugin.getFakeVolume(&plugin.Unmounters)
volume.PodUID = podUID
volume.VolName = volName
@ -190,15 +200,41 @@ func (plugin *FakeVolumePlugin) NewUnmounter(volName string, podUID types.UID) (
}
func (plugin *FakeVolumePlugin) NewAttacher() (Attacher, error) {
plugin.Lock()
defer plugin.Unlock()
plugin.NewAttacherCallCount = plugin.NewAttacherCallCount + 1
return plugin.getFakeVolume(&plugin.Attachers), nil
}
func (plugin *FakeVolumePlugin) GetNewAttacherCallCount() int {
plugin.RLock()
defer plugin.RUnlock()
return plugin.NewAttacherCallCount
}
func (plugin *FakeVolumePlugin) NewDetacher() (Detacher, error) {
plugin.Lock()
defer plugin.Unlock()
plugin.NewDetacherCallCount = plugin.NewDetacherCallCount + 1
return plugin.getFakeVolume(&plugin.Detachers), nil
}
func (plugin *FakeVolumePlugin) GetNewDetacherCallCount() int {
plugin.RLock()
defer plugin.RUnlock()
return plugin.NewDetacherCallCount
}
func (plugin *FakeVolumePlugin) GetUniqueVolumeName(spec *Spec) (string, error) {
plugin.RLock()
defer plugin.RUnlock()
return plugin.Name() + "/" + spec.Name(), nil
}
func (plugin *FakeVolumePlugin) GetDeviceName(spec *Spec) (string, error) {
return spec.Name(), nil
}
func (plugin *FakeVolumePlugin) NewRecycler(pvName string, spec *Spec) (Recycler, error) {
return &fakeRecycler{"/attributesTransferredFromSpec", MetricsNil{}}, nil
}
@ -208,6 +244,8 @@ func (plugin *FakeVolumePlugin) NewDeleter(spec *Spec) (Deleter, error) {
}
func (plugin *FakeVolumePlugin) NewProvisioner(options VolumeOptions) (Provisioner, error) {
plugin.Lock()
defer plugin.Unlock()
plugin.LastProvisionerOptions = options
return &FakeProvisioner{options, plugin.Host}, nil
}
@ -217,6 +255,7 @@ func (plugin *FakeVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMod
}
type FakeVolume struct {
sync.RWMutex
PodUID types.UID
VolName string
Plugin *FakeVolumePlugin
@ -242,8 +281,10 @@ func (_ *FakeVolume) GetAttributes() Attributes {
}
func (fv *FakeVolume) SetUp(fsGroup *int64) error {
fv.Lock()
defer fv.Unlock()
fv.SetUpCallCount++
return fv.SetUpAt(fv.GetPath(), fsGroup)
return fv.SetUpAt(fv.getPath(), fsGroup)
}
func (fv *FakeVolume) SetUpAt(dir string, fsGroup *int64) error {
@ -251,12 +292,20 @@ func (fv *FakeVolume) SetUpAt(dir string, fsGroup *int64) error {
}
func (fv *FakeVolume) GetPath() string {
fv.RLock()
defer fv.RUnlock()
return fv.getPath()
}
func (fv *FakeVolume) getPath() string {
return path.Join(fv.Plugin.Host.GetPodVolumeDir(fv.PodUID, utilstrings.EscapeQualifiedNameForDisk(fv.Plugin.PluginName), fv.VolName))
}
func (fv *FakeVolume) TearDown() error {
fv.Lock()
defer fv.Unlock()
fv.TearDownCallCount++
return fv.TearDownAt(fv.GetPath())
return fv.TearDownAt(fv.getPath())
}
func (fv *FakeVolume) TearDownAt(dir string) error {
@ -264,36 +313,62 @@ func (fv *FakeVolume) TearDownAt(dir string) error {
}
func (fv *FakeVolume) Attach(spec *Spec, hostName string) error {
fv.Lock()
defer fv.Unlock()
fv.AttachCallCount++
return nil
}
func (fv *FakeVolume) GetAttachCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.AttachCallCount
}
func (fv *FakeVolume) WaitForAttach(spec *Spec, spectimeout time.Duration) (string, error) {
fv.Lock()
defer fv.Unlock()
fv.WaitForAttachCallCount++
return "", nil
}
func (fv *FakeVolume) GetDeviceMountPath(spec *Spec) string {
fv.Lock()
defer fv.Unlock()
fv.GetDeviceMountPathCallCount++
return ""
}
func (fv *FakeVolume) MountDevice(spec *Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
fv.Lock()
defer fv.Unlock()
fv.MountDeviceCallCount++
return nil
}
func (fv *FakeVolume) Detach(deviceMountPath string, hostName string) error {
fv.Lock()
defer fv.Unlock()
fv.DetachCallCount++
return nil
}
func (fv *FakeVolume) GetDetachCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.DetachCallCount
}
func (fv *FakeVolume) WaitForDetach(devicePath string, timeout time.Duration) error {
fv.Lock()
defer fv.Unlock()
fv.WaitForDetachCallCount++
return nil
}
func (fv *FakeVolume) UnmountDevice(globalMountPath string, mounter mount.Interface) error {
fv.Lock()
defer fv.Unlock()
fv.UnmountDeviceCallCount++
return nil
}

View File

@ -291,7 +291,18 @@ func createClients(s *httptest.Server) (*clientset.Clientset, *persistentvolumec
// creates many claims and default values were too low.
testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000, Burst: 100000})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}, 0, 0, nil, nil, nil, nil}}
plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{
PluginName: "plugin-name",
Host: host,
Config: volume.VolumeConfig{},
LastProvisionerOptions: volume.VolumeOptions{},
NewAttacherCallCount: 0,
NewDetacherCallCount: 0,
Mounters: nil,
Unmounters: nil,
Attachers: nil,
Detachers: nil,
}}
cloud := &fake_cloud.FakeCloud{}
ctrl := persistentvolumecontroller.NewPersistentVolumeController(testClient, 10*time.Second, nil, plugins, cloud, "", nil, nil, nil)
return testClient, ctrl