Move from glog to klog

- Move from the old github.com/golang/glog to k8s.io/klog
- klog as explicit InitFlags() so we add them as necessary
- we update the other repositories that we vendor that made a similar
change from glog to klog
  * github.com/kubernetes/repo-infra
  * k8s.io/gengo/
  * k8s.io/kube-openapi/
  * github.com/google/cadvisor
- Entirely remove all references to glog
- Fix some tests by explicit InitFlags in their init() methods

Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
Davanum Srinivas
2018-11-09 13:49:10 -05:00
parent 97baad34a7
commit 954996e231
1263 changed files with 10023 additions and 10076 deletions

View File

@@ -58,8 +58,8 @@ go_library(
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@@ -106,7 +106,7 @@ go_test(
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@@ -27,7 +27,7 @@ import (
"testing"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
@@ -159,7 +159,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.lock.Lock()
defer r.lock.Unlock()
glog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource())
klog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource())
// Inject error when requested
err = r.injectReactError(action)
@@ -183,7 +183,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.volumes[volume.Name] = volume
r.changedObjects = append(r.changedObjects, volume)
r.changedSinceLastSync++
glog.V(4).Infof("created volume %s", volume.Name)
klog.V(4).Infof("created volume %s", volume.Name)
return true, volume, nil
case action.Matches("update", "persistentvolumes"):
@@ -209,7 +209,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.volumes[volume.Name] = volume
r.changedObjects = append(r.changedObjects, volume)
r.changedSinceLastSync++
glog.V(4).Infof("saved updated volume %s", volume.Name)
klog.V(4).Infof("saved updated volume %s", volume.Name)
return true, volume, nil
case action.Matches("update", "persistentvolumeclaims"):
@@ -235,23 +235,23 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.claims[claim.Name] = claim
r.changedObjects = append(r.changedObjects, claim)
r.changedSinceLastSync++
glog.V(4).Infof("saved updated claim %s", claim.Name)
klog.V(4).Infof("saved updated claim %s", claim.Name)
return true, claim, nil
case action.Matches("get", "persistentvolumes"):
name := action.(core.GetAction).GetName()
volume, found := r.volumes[name]
if found {
glog.V(4).Infof("GetVolume: found %s", volume.Name)
klog.V(4).Infof("GetVolume: found %s", volume.Name)
return true, volume, nil
} else {
glog.V(4).Infof("GetVolume: volume %s not found", name)
klog.V(4).Infof("GetVolume: volume %s not found", name)
return true, nil, fmt.Errorf("Cannot find volume %s", name)
}
case action.Matches("delete", "persistentvolumes"):
name := action.(core.DeleteAction).GetName()
glog.V(4).Infof("deleted volume %s", name)
klog.V(4).Infof("deleted volume %s", name)
_, found := r.volumes[name]
if found {
delete(r.volumes, name)
@@ -263,7 +263,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
case action.Matches("delete", "persistentvolumeclaims"):
name := action.(core.DeleteAction).GetName()
glog.V(4).Infof("deleted claim %s", name)
klog.V(4).Infof("deleted claim %s", name)
_, found := r.volumes[name]
if found {
delete(r.claims, name)
@@ -286,11 +286,11 @@ func (r *volumeReactor) injectReactError(action core.Action) error {
}
for i, expected := range r.errors {
glog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource())
klog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource())
if action.Matches(expected.verb, expected.resource) {
// That's the action we're waiting for, remove it from injectedErrors
r.errors = append(r.errors[:i], r.errors[i+1:]...)
glog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error)
klog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error)
return expected.error
}
}
@@ -379,14 +379,14 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo
select {
case event, ok := <-fakeRecorder.Events:
if ok {
glog.V(5).Infof("event recorder got event %s", event)
klog.V(5).Infof("event recorder got event %s", event)
gotEvents = append(gotEvents, event)
} else {
glog.V(5).Infof("event recorder finished")
klog.V(5).Infof("event recorder finished")
finished = true
}
case _, _ = <-timer.C:
glog.V(5).Infof("event recorder timeout")
klog.V(5).Infof("event recorder timeout")
finished = true
}
}
@@ -426,10 +426,10 @@ func (r *volumeReactor) popChange() interface{} {
switch obj.(type) {
case *v1.PersistentVolume:
vol, _ := obj.(*v1.PersistentVolume)
glog.V(4).Infof("reactor queue: %s", vol.Name)
klog.V(4).Infof("reactor queue: %s", vol.Name)
case *v1.PersistentVolumeClaim:
claim, _ := obj.(*v1.PersistentVolumeClaim)
glog.V(4).Infof("reactor queue: %s", claim.Name)
klog.V(4).Infof("reactor queue: %s", claim.Name)
}
}
@@ -898,7 +898,7 @@ func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(c
// Inject a hook before async operation starts
ctrl.preOperationHook = func(operationName string) {
// Inside the hook, run the function to inject
glog.V(4).Infof("reactor: scheduleOperation reached, injecting call")
klog.V(4).Infof("reactor: scheduleOperation reached, injecting call")
injectBeforeOperation(ctrl, reactor)
}
@@ -945,7 +945,7 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReacto
// 3. Compare resulting volumes and claims with expected volumes and claims.
func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {
for _, test := range tests {
glog.V(4).Infof("starting test %q", test.name)
klog.V(4).Infof("starting test %q", test.name)
// Initialize the controller
client := &fake.Clientset{}
@@ -1008,7 +1008,7 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
// Some limit of calls in enforced to prevent endless loops.
func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {
for _, test := range tests {
glog.V(4).Infof("starting multisync test %q", test.name)
klog.V(4).Infof("starting multisync test %q", test.name)
// Initialize the controller
client := &fake.Clientset{}
@@ -1046,7 +1046,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
counter := 0
for {
counter++
glog.V(4).Infof("test %q: iteration %d", test.name, counter)
klog.V(4).Infof("test %q: iteration %d", test.name, counter)
if counter > 100 {
t.Errorf("Test %q failed: too many iterations", test.name)
@@ -1064,7 +1064,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
// Simulate "periodic sync" of everything (until it produces
// no changes).
firstSync = false
glog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name)
klog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name)
reactor.syncAll()
} else {
// Last sync did not produce any updates, the test reached
@@ -1085,7 +1085,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil {
if err == versionConflictError {
// Ignore version errors
glog.V(4).Infof("test intentionaly ignores version error.")
klog.V(4).Infof("test intentionaly ignores version error.")
} else {
t.Errorf("Error calling syncClaim: %v", err)
// Finish the loop on the first error
@@ -1102,7 +1102,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil {
if err == versionConflictError {
// Ignore version errors
glog.V(4).Infof("test intentionaly ignores version error.")
klog.V(4).Infof("test intentionaly ignores version error.")
} else {
t.Errorf("Error calling syncVolume: %v", err)
// Finish the loop on the first error
@@ -1114,7 +1114,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
}
}
evaluateTestResults(ctrl, reactor, test, t)
glog.V(4).Infof("test %q finished after %d iterations", test.name, counter)
klog.V(4).Infof("test %q finished after %d iterations", test.name, counter)
}
}
@@ -1185,7 +1185,7 @@ func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol
func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.Provisioner, error) {
if len(plugin.provisionCalls) > 0 {
// mockVolumePlugin directly implements Provisioner interface
glog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
plugin.provisionOptions = options
return plugin, nil
} else {
@@ -1201,7 +1201,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
var pv *v1.PersistentVolume
call := plugin.provisionCalls[plugin.provisionCallCounter]
if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) {
glog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters)
klog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters)
return nil, fmt.Errorf("Mock plugin error: invalid provisioner call")
}
if call.ret == nil {
@@ -1229,7 +1229,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
}
plugin.provisionCallCounter++
glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret)
klog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret)
return pv, call.ret
}
@@ -1238,7 +1238,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
func (plugin *mockVolumePlugin) NewDeleter(spec *vol.Spec) (vol.Deleter, error) {
if len(plugin.deleteCalls) > 0 {
// mockVolumePlugin directly implements Deleter interface
glog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
return plugin, nil
} else {
return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured")
@@ -1251,7 +1251,7 @@ func (plugin *mockVolumePlugin) Delete() error {
}
ret := plugin.deleteCalls[plugin.deleteCallCounter]
plugin.deleteCallCounter++
glog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret)
klog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret)
return ret
}
@@ -1277,6 +1277,6 @@ func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventReco
}
ret := plugin.recycleCalls[plugin.recycleCallCounter]
plugin.recycleCallCounter++
glog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret)
klog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret)
return ret
}

View File

@@ -7,8 +7,8 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@@ -21,8 +21,8 @@ import (
"k8s.io/api/core/v1"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/klog"
)
const (
@@ -139,7 +139,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric)
float64(number),
storageClassName)
if err != nil {
glog.Warningf("Create bound pv number metric failed: %v", err)
klog.Warningf("Create bound pv number metric failed: %v", err)
continue
}
ch <- metric
@@ -151,7 +151,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric)
float64(number),
storageClassName)
if err != nil {
glog.Warningf("Create unbound pv number metric failed: %v", err)
klog.Warningf("Create unbound pv number metric failed: %v", err)
continue
}
ch <- metric
@@ -179,7 +179,7 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric)
float64(number),
namespace)
if err != nil {
glog.Warningf("Create bound pvc number metric failed: %v", err)
klog.Warningf("Create bound pvc number metric failed: %v", err)
continue
}
ch <- metric
@@ -191,7 +191,7 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric)
float64(number),
namespace)
if err != nil {
glog.Warningf("Create unbound pvc number metric failed: %v", err)
klog.Warningf("Create unbound pvc number metric failed: %v", err)
continue
}
ch <- metric

View File

@@ -48,7 +48,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
"github.com/golang/glog"
"k8s.io/klog"
)
// ==================================================================
@@ -238,7 +238,7 @@ type PersistentVolumeController struct {
// For easier readability, it was split into syncUnboundClaim and syncBoundClaim
// methods.
func (ctrl *PersistentVolumeController) syncClaim(claim *v1.PersistentVolumeClaim) error {
glog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim))
klog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim))
if !metav1.HasAnnotation(claim.ObjectMeta, annBindCompleted) {
return ctrl.syncUnboundClaim(claim)
@@ -330,11 +330,11 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
// [Unit test set 1]
volume, err := ctrl.volumes.findBestMatchForClaim(claim, delayBinding)
if err != nil {
glog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err)
klog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err)
return fmt.Errorf("Error finding PV for claim %q: %v", claimToClaimKey(claim), err)
}
if volume == nil {
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim))
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim))
// No PV could be found
// OBSERVATION: pvc is "Pending", will retry
switch {
@@ -358,7 +358,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
} else /* pv != nil */ {
// Found a PV for this claim
// OBSERVATION: pvc is "Pending", pv is "Available"
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), volume.Name, getVolumeStatusForLogging(volume))
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), volume.Name, getVolumeStatusForLogging(volume))
if err = ctrl.bind(volume, claim); err != nil {
// On any error saving the volume or the claim, subsequent
// syncClaim will finish the binding.
@@ -370,7 +370,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
} else /* pvc.Spec.VolumeName != nil */ {
// [Unit test set 2]
// User asked for a specific PV.
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested", claimToClaimKey(claim), claim.Spec.VolumeName)
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested", claimToClaimKey(claim), claim.Spec.VolumeName)
obj, found, err := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName)
if err != nil {
return err
@@ -379,7 +379,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
// User asked for a PV that does not exist.
// OBSERVATION: pvc is "Pending"
// Retry later.
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and not found, will try again next time", claimToClaimKey(claim), claim.Spec.VolumeName)
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and not found, will try again next time", claimToClaimKey(claim), claim.Spec.VolumeName)
if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil {
return err
}
@@ -389,13 +389,13 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
if !ok {
return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj)
}
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume))
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume))
if volume.Spec.ClaimRef == nil {
// User asked for a PV that is not claimed
// OBSERVATION: pvc is "Pending", pv is "Available"
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim))
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim))
if err = checkVolumeSatisfyClaim(volume, claim); err != nil {
glog.V(4).Infof("Can't bind the claim to volume %q: %v", volume.Name, err)
klog.V(4).Infof("Can't bind the claim to volume %q: %v", volume.Name, err)
//send an event
msg := fmt.Sprintf("Cannot bind to requested volume %q: %s", volume.Name, err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.VolumeMismatch, msg)
@@ -413,7 +413,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
} else if isVolumeBoundToClaim(volume, claim) {
// User asked for a PV that is claimed by this PVC
// OBSERVATION: pvc is "Pending", pv is "Bound"
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim))
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim))
// Finish the volume binding by adding claim UID.
if err = ctrl.bind(volume, claim); err != nil {
@@ -425,7 +425,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
// User asked for a PV that is claimed by someone else
// OBSERVATION: pvc is "Pending", pv is "Bound"
if !metav1.HasAnnotation(claim.ObjectMeta, annBoundByController) {
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim))
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim))
// User asked for a specific PV, retry later
if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil {
return err
@@ -434,7 +434,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
} else {
// This should never happen because someone had to remove
// annBindCompleted annotation on the claim.
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), claimrefToClaimKey(volume.Spec.ClaimRef))
klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), claimrefToClaimKey(volume.Spec.ClaimRef))
return fmt.Errorf("Invalid binding of claim %q to volume %q: volume already claimed by %q", claimToClaimKey(claim), claim.Spec.VolumeName, claimrefToClaimKey(volume.Spec.ClaimRef))
}
}
@@ -472,13 +472,13 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum
return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj)
}
glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume))
klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume))
if volume.Spec.ClaimRef == nil {
// Claim is bound but volume has come unbound.
// Or, a claim was bound and the controller has not received updated
// volume yet. We can't distinguish these cases.
// Bind the volume again and set all states to Bound.
glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume is unbound, fixing", claimToClaimKey(claim))
klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume is unbound, fixing", claimToClaimKey(claim))
if err = ctrl.bind(volume, claim); err != nil {
// Objects not saved, next syncPV or syncClaim will try again
return err
@@ -489,7 +489,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum
// NOTE: syncPV can handle this so it can be left out.
// NOTE: bind() call here will do nothing in most cases as
// everything should be already set.
glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: claim is already correctly bound", claimToClaimKey(claim))
klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: claim is already correctly bound", claimToClaimKey(claim))
if err = ctrl.bind(volume, claim); err != nil {
// Objects not saved, next syncPV or syncClaim will try again
return err
@@ -512,12 +512,12 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum
// created, updated or periodically synced. We do not differentiate between
// these events.
func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) error {
glog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume))
klog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume))
// [Unit test set 4]
if volume.Spec.ClaimRef == nil {
// Volume is unused
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name)
klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name)
if _, err := ctrl.updateVolumePhase(volume, v1.VolumeAvailable, ""); err != nil {
// Nothing was saved; we will fall back into the same
// condition in the next call to this method
@@ -529,7 +529,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
if volume.Spec.ClaimRef.UID == "" {
// The PV is reserved for a PVC; that PVC has not yet been
// bound to this PV; the PVC sync will handle it.
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
if _, err := ctrl.updateVolumePhase(volume, v1.VolumeAvailable, ""); err != nil {
// Nothing was saved; we will fall back into the same
// condition in the next call to this method
@@ -537,7 +537,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
}
return nil
}
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
// Get the PVC by _name_
var claim *v1.PersistentVolumeClaim
claimName := claimrefToClaimKey(volume.Spec.ClaimRef)
@@ -570,7 +570,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
}
}
if !found {
glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
// Fall through with claim = nil
} else {
var ok bool
@@ -578,12 +578,12 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
if !ok {
return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj)
}
glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim))
klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim))
}
if claim != nil && claim.UID != volume.Spec.ClaimRef.UID {
// The claim that the PV was pointing to was deleted, and another
// with the same name created.
glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has different UID, the old one must have been deleted", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has different UID, the old one must have been deleted", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
// Treat the volume as bound to a missing claim.
claim = nil
}
@@ -598,7 +598,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
// volume.
if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed {
// Also, log this only once:
glog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy)
klog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy)
if volume, err = ctrl.updateVolumePhase(volume, v1.VolumeReleased, ""); err != nil {
// Nothing was saved; we will fall back into the same condition
// in the next call to this method
@@ -626,10 +626,10 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
if metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
// The binding is not completed; let PVC sync handle it
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name)
klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name)
} else {
// Dangling PV; try to re-establish the link in the PVC sync
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name)
klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name)
}
// In both cases, the volume is Bound and the claim is Pending.
// Next syncClaim will fix it. To speed it up, we enqueue the claim
@@ -642,7 +642,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
return nil
} else if claim.Spec.VolumeName == volume.Name {
// Volume is bound to a claim properly, update status if necessary
glog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name)
klog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name)
if _, err = ctrl.updateVolumePhase(volume, v1.VolumeBound, ""); err != nil {
// Nothing was saved; we will fall back into the same
// condition in the next call to this method
@@ -659,7 +659,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
// the user know. Don't overwrite existing Failed status!
if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed {
// Also, log this only once:
glog.V(2).Infof("dynamically volume %q is released and it will be deleted", volume.Name)
klog.V(2).Infof("dynamically volume %q is released and it will be deleted", volume.Name)
if volume, err = ctrl.updateVolumePhase(volume, v1.VolumeReleased, ""); err != nil {
// Nothing was saved; we will fall back into the same condition
// in the next call to this method
@@ -679,14 +679,14 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
// This is part of the normal operation of the controller; the
// controller tried to use this volume for a claim but the claim
// was fulfilled by another volume. We did this; fix it.
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name)
klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name)
if err = ctrl.unbindVolume(volume); err != nil {
return err
}
return nil
} else {
// The PV must have been created with this ptr; leave it alone.
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name)
klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name)
// This just updates the volume phase and clears
// volume.Spec.ClaimRef.UID. It leaves the volume pre-bound
// to the claim.
@@ -706,7 +706,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
// phase - phase to set
// volume - volume which Capacity is set into claim.Status.Capacity
func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) {
glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase)
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase)
dirty := false
@@ -751,21 +751,21 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo
if !dirty {
// Nothing to do.
glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: phase %s already set", claimToClaimKey(claim), phase)
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: phase %s already set", claimToClaimKey(claim), phase)
return claim, nil
}
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err)
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err)
return newClaim, err
}
_, err = ctrl.storeClaimUpdate(newClaim)
if err != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: cannot update internal cache: %v", claimToClaimKey(claim), err)
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: cannot update internal cache: %v", claimToClaimKey(claim), err)
return newClaim, err
}
glog.V(2).Infof("claim %q entered phase %q", claimToClaimKey(claim), phase)
klog.V(2).Infof("claim %q entered phase %q", claimToClaimKey(claim), phase)
return newClaim, nil
}
@@ -778,10 +778,10 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo
// volume - volume which Capacity is set into claim.Status.Capacity
// eventtype, reason, message - event to send, see EventRecorder.Event()
func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume, eventtype, reason, message string) (*v1.PersistentVolumeClaim, error) {
glog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase)
klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase)
if claim.Status.Phase == phase {
// Nothing to do.
glog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: phase %s already set", claimToClaimKey(claim), phase)
klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: phase %s already set", claimToClaimKey(claim), phase)
return claim, nil
}
@@ -792,7 +792,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.Per
// Emit the event only when the status change happens, not every time
// syncClaim is called.
glog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message)
klog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message)
ctrl.eventRecorder.Event(newClaim, eventtype, reason, message)
return newClaim, nil
@@ -800,10 +800,10 @@ func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.Per
// updateVolumePhase saves new volume phase to API server.
func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, message string) (*v1.PersistentVolume, error) {
glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase)
klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase)
if volume.Status.Phase == phase {
// Nothing to do.
glog.V(4).Infof("updating PersistentVolume[%s]: phase %s already set", volume.Name, phase)
klog.V(4).Infof("updating PersistentVolume[%s]: phase %s already set", volume.Name, phase)
return volume, nil
}
@@ -813,15 +813,15 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(volumeClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err)
klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err)
return newVol, err
}
_, err = ctrl.storeVolumeUpdate(newVol)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err)
klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err)
return newVol, err
}
glog.V(2).Infof("volume %q entered phase %q", volume.Name, phase)
klog.V(2).Infof("volume %q entered phase %q", volume.Name, phase)
return newVol, err
}
@@ -829,10 +829,10 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV
// given event on the volume. It saves the phase and emits the event only when
// the phase has actually changed from the version saved in API server.
func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, eventtype, reason, message string) (*v1.PersistentVolume, error) {
glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: set phase %s", volume.Name, phase)
klog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: set phase %s", volume.Name, phase)
if volume.Status.Phase == phase {
// Nothing to do.
glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: phase %s already set", volume.Name, phase)
klog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: phase %s already set", volume.Name, phase)
return volume, nil
}
@@ -843,7 +843,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.Pe
// Emit the event only when the status change happens, not every time
// syncClaim is called.
glog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message)
klog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message)
ctrl.eventRecorder.Event(newVol, eventtype, reason, message)
return newVol, nil
@@ -852,7 +852,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.Pe
// bindVolumeToClaim modifies given volume to be bound to a claim and saves it to
// API server. The claim is not modified in this method!
func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim))
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim))
volumeClone, dirty, err := ctrl.getBindVolumeToClaim(volume, claim)
if err != nil {
@@ -864,27 +864,27 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV
return ctrl.updateBindVolumeToClaim(volumeClone, claim, true)
}
glog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim))
klog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim))
return volume, nil
}
// bindVolumeToClaim modifies given volume to be bound to a claim and saves it to
// API server. The claim is not modified in this method!
func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, claim *v1.PersistentVolumeClaim, updateCache bool) (*v1.PersistentVolume, error) {
glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name)
klog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name)
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err)
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err)
return newVol, err
}
if updateCache {
_, err = ctrl.storeVolumeUpdate(newVol)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volumeClone.Name, err)
klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volumeClone.Name, err)
return newVol, err
}
}
glog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim))
klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim))
return newVol, nil
}
@@ -928,7 +928,7 @@ func (ctrl *PersistentVolumeController) getBindVolumeToClaim(volume *v1.Persiste
// bindClaimToVolume modifies the given claim to be bound to a volume and
// saves it to API server. The volume is not modified in this method!
func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) {
glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q", claimToClaimKey(claim), volume.Name)
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q", claimToClaimKey(claim), volume.Name)
dirty := false
@@ -960,22 +960,22 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo
}
if dirty {
glog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err)
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err)
return newClaim, err
}
_, err = ctrl.storeClaimUpdate(newClaim)
if err != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s]: cannot update internal cache: %v", claimToClaimKey(claim), err)
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: cannot update internal cache: %v", claimToClaimKey(claim), err)
return newClaim, err
}
glog.V(4).Infof("updating PersistentVolumeClaim[%s]: bound to %q", claimToClaimKey(claim), volume.Name)
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: bound to %q", claimToClaimKey(claim), volume.Name)
return newClaim, nil
}
glog.V(4).Infof("updating PersistentVolumeClaim[%s]: already bound to %q", claimToClaimKey(claim), volume.Name)
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: already bound to %q", claimToClaimKey(claim), volume.Name)
return claim, nil
}
@@ -990,35 +990,35 @@ func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim
var updatedClaim *v1.PersistentVolumeClaim
var updatedVolume *v1.PersistentVolume
glog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim))
klog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim))
if updatedVolume, err = ctrl.bindVolumeToClaim(volume, claim); err != nil {
glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err)
klog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err)
return err
}
volume = updatedVolume
if updatedVolume, err = ctrl.updateVolumePhase(volume, v1.VolumeBound, ""); err != nil {
glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err)
klog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err)
return err
}
volume = updatedVolume
if updatedClaim, err = ctrl.bindClaimToVolume(claim, volume); err != nil {
glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err)
klog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err)
return err
}
claim = updatedClaim
if updatedClaim, err = ctrl.updateClaimStatus(claim, v1.ClaimBound, volume); err != nil {
glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err)
klog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err)
return err
}
claim = updatedClaim
glog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
glog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume))
glog.V(4).Infof("claim %q status after binding: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim))
klog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
klog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume))
klog.V(4).Infof("claim %q status after binding: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim))
return nil
}
@@ -1029,7 +1029,7 @@ func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim
// It returns on first error, it's up to the caller to implement some retry
// mechanism.
func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume) error {
glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
klog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
// Save the PV only when any modification is necessary.
volumeClone := volume.DeepCopy()
@@ -1050,15 +1050,15 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err)
klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err)
return err
}
_, err = ctrl.storeVolumeUpdate(newVol)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err)
klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err)
return err
}
glog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name)
klog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name)
// Update the status
_, err = ctrl.updateVolumePhase(newVol, v1.VolumeAvailable, "")
@@ -1070,10 +1070,10 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume
func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolume) error {
switch volume.Spec.PersistentVolumeReclaimPolicy {
case v1.PersistentVolumeReclaimRetain:
glog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", volume.Name)
klog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", volume.Name)
case v1.PersistentVolumeReclaimRecycle:
glog.V(4).Infof("reclaimVolume[%s]: policy is Recycle", volume.Name)
klog.V(4).Infof("reclaimVolume[%s]: policy is Recycle", volume.Name)
opName := fmt.Sprintf("recycle-%s[%s]", volume.Name, string(volume.UID))
ctrl.scheduleOperation(opName, func() error {
ctrl.recycleVolumeOperation(volume)
@@ -1081,7 +1081,7 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum
})
case v1.PersistentVolumeReclaimDelete:
glog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name)
klog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name)
opName := fmt.Sprintf("delete-%s[%s]", volume.Name, string(volume.UID))
startTime := time.Now()
ctrl.scheduleOperation(opName, func() error {
@@ -1103,33 +1103,33 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum
// doRerecycleVolumeOperationcycleVolume recycles a volume. This method is
// running in standalone goroutine and already has all necessary locks.
func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.PersistentVolume) {
glog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name)
klog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name)
// This method may have been waiting for a volume lock for some time.
// Previous recycleVolumeOperation might just have saved an updated version,
// so read current volume state now.
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
if err != nil {
glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
return
}
needsReclaim, err := ctrl.isVolumeReleased(newVolume)
if err != nil {
glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err)
klog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err)
return
}
if !needsReclaim {
glog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name)
klog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name)
return
}
pods, used, err := ctrl.isVolumeUsed(newVolume)
if err != nil {
glog.V(3).Infof("can't recycle volume %q: %v", volume.Name, err)
klog.V(3).Infof("can't recycle volume %q: %v", volume.Name, err)
return
}
if used {
msg := fmt.Sprintf("Volume is used by pods: %s", strings.Join(pods, ","))
glog.V(3).Infof("can't recycle volume %q: %s", volume.Name, msg)
klog.V(3).Infof("can't recycle volume %q: %s", volume.Name, msg)
ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeFailedRecycle, msg)
return
}
@@ -1144,7 +1144,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis
if err != nil {
// No recycler found. Emit an event and mark the volume Failed.
if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, "No recycler plugin found for the volume!"); err != nil {
glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
klog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
// Save failed, retry on the next deletion attempt
return
}
@@ -1160,7 +1160,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis
// Recycler failed
strerr := fmt.Sprintf("Recycle failed: %s", err)
if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, strerr); err != nil {
glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
klog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
// Save failed, retry on the next deletion attempt
return
}
@@ -1169,7 +1169,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis
return
}
glog.V(2).Infof("volume %q recycled", volume.Name)
klog.V(2).Infof("volume %q recycled", volume.Name)
// Send an event
ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeRecycled, "Volume recycled")
// Make the volume available again
@@ -1178,7 +1178,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis
// recycle the volume again on next update. We _could_ maintain a cache
// of "recently recycled volumes" and avoid unnecessary recycling, this
// is left out as future optimization.
glog.V(3).Infof("recycleVolumeOperation [%s]: failed to make recycled volume 'Available' (%v), we will recycle the volume again", volume.Name, err)
klog.V(3).Infof("recycleVolumeOperation [%s]: failed to make recycled volume 'Available' (%v), we will recycle the volume again", volume.Name, err)
return
}
return
@@ -1187,30 +1187,30 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis
// deleteVolumeOperation deletes a volume. This method is running in standalone
// goroutine and already has all necessary locks.
func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.PersistentVolume) (string, error) {
glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name)
klog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name)
// This method may have been waiting for a volume lock for some time.
// Previous deleteVolumeOperation might just have saved an updated version, so
// read current volume state now.
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
if err != nil {
glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
return "", nil
}
needsReclaim, err := ctrl.isVolumeReleased(newVolume)
if err != nil {
glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err)
klog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err)
return "", nil
}
if !needsReclaim {
glog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name)
klog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name)
return "", nil
}
pluginName, deleted, err := ctrl.doDeleteVolume(volume)
if err != nil {
// Delete failed, update the volume and emit an event.
glog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err)
klog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err)
if vol.IsDeletedVolumeInUse(err) {
// The plugin needs more time, don't mark the volume as Failed
// and send Normal event only
@@ -1219,7 +1219,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
// The plugin failed, mark the volume as Failed and send Warning
// event
if _, err := ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedDelete, err.Error()); err != nil {
glog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
klog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
// Save failed, retry on the next deletion attempt
return pluginName, err
}
@@ -1234,14 +1234,14 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
return pluginName, nil
}
glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name)
klog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name)
// Delete the volume
if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil {
// Oops, could not delete the volume and therefore the controller will
// try to delete the volume again on next update. We _could_ maintain a
// cache of "recently deleted volumes" and avoid unnecessary deletion,
// this is left out as future optimization.
glog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err)
klog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err)
return pluginName, nil
}
return pluginName, nil
@@ -1254,13 +1254,13 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVo
// A volume needs reclaim if it has ClaimRef and appropriate claim does not
// exist.
if volume.Spec.ClaimRef == nil {
glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is nil", volume.Name)
klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is nil", volume.Name)
return false, nil
}
if volume.Spec.ClaimRef.UID == "" {
// This is a volume bound by user and the controller has not finished
// binding to the real claim yet.
glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is not bound", volume.Name)
klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is not bound", volume.Name)
return false, nil
}
@@ -1287,11 +1287,11 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVo
return true, nil
}
glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name)
klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name)
return false, nil
}
glog.V(2).Infof("isVolumeReleased[%s]: volume is released", volume.Name)
klog.V(2).Infof("isVolumeReleased[%s]: volume is released", volume.Name)
return true, nil
}
@@ -1326,7 +1326,7 @@ func (ctrl *PersistentVolumeController) isVolumeUsed(pv *v1.PersistentVolume) ([
// 'false' when the volume cannot be deleted because of the deleter is external. No
// error should be reported in this case.
func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolume) (string, bool, error) {
glog.V(4).Infof("doDeleteVolume [%s]", volume.Name)
klog.V(4).Infof("doDeleteVolume [%s]", volume.Name)
var err error
plugin, err := ctrl.findDeletablePlugin(volume)
@@ -1335,13 +1335,13 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu
}
if plugin == nil {
// External deleter is requested, do nothing
glog.V(3).Infof("external deleter for volume %q requested, ignoring", volume.Name)
klog.V(3).Infof("external deleter for volume %q requested, ignoring", volume.Name)
return "", false, nil
}
// Plugin found
pluginName := plugin.GetPluginName()
glog.V(5).Infof("found a deleter plugin %q for volume %q", pluginName, volume.Name)
klog.V(5).Infof("found a deleter plugin %q for volume %q", pluginName, volume.Name)
spec := vol.NewSpecFromPersistentVolume(volume, false)
deleter, err := plugin.NewDeleter(spec)
if err != nil {
@@ -1357,7 +1357,7 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu
return pluginName, false, err
}
glog.V(2).Infof("volume %q deleted", volume.Name)
klog.V(2).Infof("volume %q deleted", volume.Name)
return pluginName, true, nil
}
@@ -1367,7 +1367,7 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum
if !ctrl.enableDynamicProvisioning {
return nil
}
glog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim))
klog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim))
opName := fmt.Sprintf("provision-%s[%s]", claimToClaimKey(claim), string(claim.UID))
startTime := time.Now()
ctrl.scheduleOperation(opName, func() error {
@@ -1383,12 +1383,12 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum
// standalone goroutine and already has all necessary locks.
func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) (string, error) {
claimClass := v1helper.GetPersistentVolumeClaimClass(claim)
glog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass)
klog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass)
plugin, storageClass, err := ctrl.findProvisionablePlugin(claim)
if err != nil {
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, err.Error())
glog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err)
klog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err)
// The controller will retry provisioning the volume in every
// syncVolume() call.
return "", err
@@ -1403,7 +1403,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
newClaim, err := ctrl.setClaimProvisioner(claim, storageClass)
if err != nil {
// Save failed, the controller will retry in the next sync
glog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err)
klog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err)
return pluginName, err
}
claim = newClaim
@@ -1414,7 +1414,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
// and wait for the external provisioner
msg := fmt.Sprintf("waiting for a volume to be created, either by external provisioner %q or manually created by system administrator", storageClass.Provisioner)
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ExternalProvisioning, msg)
glog.V(3).Infof("provisioning claim %q: %s", claimToClaimKey(claim), msg)
klog.V(3).Infof("provisioning claim %q: %s", claimToClaimKey(claim), msg)
return pluginName, nil
}
@@ -1428,7 +1428,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil && volume != nil {
// Volume has been already provisioned, nothing to do.
glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim))
klog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim))
return pluginName, err
}
@@ -1436,7 +1436,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
// provisioned)
claimRef, err := ref.GetReference(scheme.Scheme, claim)
if err != nil {
glog.V(3).Infof("unexpected error getting claim reference: %v", err)
klog.V(3).Infof("unexpected error getting claim reference: %v", err)
return pluginName, err
}
@@ -1460,7 +1460,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
// of PV would be rejected by validation anyway
if !plugin.SupportsMountOption() && len(options.MountOptions) > 0 {
strerr := fmt.Sprintf("Mount options are not supported by the provisioner but StorageClass %q has mount options %v", storageClass.Name, options.MountOptions)
glog.V(2).Infof("Mount options are not supported by the provisioner but claim %q's StorageClass %q has mount options %v", claimToClaimKey(claim), storageClass.Name, options.MountOptions)
klog.V(2).Infof("Mount options are not supported by the provisioner but claim %q's StorageClass %q has mount options %v", claimToClaimKey(claim), storageClass.Name, options.MountOptions)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
return pluginName, fmt.Errorf("provisioner %q doesn't support mount options", plugin.GetPluginName())
}
@@ -1469,7 +1469,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
provisioner, err := plugin.NewProvisioner(options)
if err != nil {
strerr := fmt.Sprintf("Failed to create provisioner: %v", err)
glog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
klog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
return pluginName, err
}
@@ -1479,7 +1479,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
selectedNode, err = ctrl.NodeLister.Get(nodeName)
if err != nil {
strerr := fmt.Sprintf("Failed to get target node: %v", err)
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
klog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
return pluginName, err
}
@@ -1496,12 +1496,12 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
ctrl.rescheduleProvisioning(claim)
strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err)
glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
klog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
return pluginName, err
}
glog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim))
klog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim))
// Create Kubernetes PV object for the volume.
if volume.Name == "" {
@@ -1518,26 +1518,26 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
// Try to create the PV object several times
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
glog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
var newVol *v1.PersistentVolume
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) {
// Save succeeded.
if err != nil {
glog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))
klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))
err = nil
} else {
glog.V(3).Infof("volume %q for claim %q saved", volume.Name, claimToClaimKey(claim))
klog.V(3).Infof("volume %q for claim %q saved", volume.Name, claimToClaimKey(claim))
_, updateErr := ctrl.storeVolumeUpdate(newVol)
if updateErr != nil {
// We will get an "volume added" event soon, this is not a big error
glog.V(4).Infof("provisionClaimOperation [%s]: cannot update internal cache: %v", volume.Name, updateErr)
klog.V(4).Infof("provisionClaimOperation [%s]: cannot update internal cache: %v", volume.Name, updateErr)
}
}
break
}
// Save failed, try again after a while.
glog.V(3).Infof("failed to save volume %q for claim %q: %v", volume.Name, claimToClaimKey(claim), err)
klog.V(3).Infof("failed to save volume %q for claim %q: %v", volume.Name, claimToClaimKey(claim), err)
time.Sleep(ctrl.createProvisionedPVInterval)
}
@@ -1547,7 +1547,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
// Emit some event here and try to delete the storage asset several
// times.
strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), err)
glog.V(3).Info(strerr)
klog.V(3).Info(strerr)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
var deleteErr error
@@ -1556,18 +1556,18 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
_, deleted, deleteErr = ctrl.doDeleteVolume(volume)
if deleteErr == nil && deleted {
// Delete succeeded
glog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name)
klog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name)
break
}
if !deleted {
// This is unreachable code, the volume was provisioned by an
// internal plugin and therefore there MUST be an internal
// plugin that deletes it.
glog.Errorf("Error finding internal deleter for volume plugin %q", plugin.GetPluginName())
klog.Errorf("Error finding internal deleter for volume plugin %q", plugin.GetPluginName())
break
}
// Delete failed, try again after a while.
glog.V(3).Infof("failed to delete volume %q: %v", volume.Name, deleteErr)
klog.V(3).Infof("failed to delete volume %q: %v", volume.Name, deleteErr)
time.Sleep(ctrl.createProvisionedPVInterval)
}
@@ -1575,11 +1575,11 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
// Delete failed several times. There is an orphaned volume and there
// is nothing we can do about it.
strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), deleteErr)
glog.V(2).Info(strerr)
klog.V(2).Info(strerr)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningCleanupFailed, strerr)
}
} else {
glog.V(2).Infof("volume %q provisioned for claim %q", volume.Name, claimToClaimKey(claim))
klog.V(2).Infof("volume %q provisioned for claim %q", volume.Name, claimToClaimKey(claim))
msg := fmt.Sprintf("Successfully provisioned volume %s using %s", volume.Name, plugin.GetPluginName())
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ProvisioningSucceeded, msg)
}
@@ -1600,12 +1600,12 @@ func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.Persist
delete(newClaim.Annotations, annSelectedNode)
// Try to update the PVC object
if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(newClaim); err != nil {
glog.V(4).Infof("Failed to delete annotation 'annSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err)
klog.V(4).Infof("Failed to delete annotation 'annSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err)
return
}
if _, err := ctrl.storeClaimUpdate(newClaim); err != nil {
// We will get an "claim updated" event soon, this is not a big error
glog.V(4).Infof("Updating PersistentVolumeClaim %q: cannot update internal cache: %v", claimToClaimKey(newClaim), err)
klog.V(4).Infof("Updating PersistentVolumeClaim %q: cannot update internal cache: %v", claimToClaimKey(newClaim), err)
}
}
@@ -1618,7 +1618,7 @@ func (ctrl *PersistentVolumeController) getProvisionedVolumeNameForClaim(claim *
// scheduleOperation starts given asynchronous operation on given volume. It
// makes sure the operation is already not running.
func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func() error) {
glog.V(4).Infof("scheduleOperation[%s]", operationName)
klog.V(4).Infof("scheduleOperation[%s]", operationName)
// Poke test code that an operation is just about to get started.
if ctrl.preOperationHook != nil {
@@ -1629,11 +1629,11 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string,
if err != nil {
switch {
case goroutinemap.IsAlreadyExists(err):
glog.V(4).Infof("operation %q is already running, skipping", operationName)
klog.V(4).Infof("operation %q is already running, skipping", operationName)
case exponentialbackoff.IsExponentialBackoff(err):
glog.V(4).Infof("operation %q postponed due to exponential backoff", operationName)
klog.V(4).Infof("operation %q postponed due to exponential backoff", operationName)
default:
glog.Errorf("error scheduling operation %q: %v", operationName, err)
klog.Errorf("error scheduling operation %q: %v", operationName, err)
}
}
}

View File

@@ -44,7 +44,7 @@ import (
"k8s.io/kubernetes/pkg/util/goroutinemap"
vol "k8s.io/kubernetes/pkg/volume"
"github.com/golang/glog"
"k8s.io/klog"
)
// This file contains the controller base functionality, i.e. framework to
@@ -73,7 +73,7 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
eventRecorder := p.EventRecorder
if eventRecorder == nil {
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
broadcaster.StartLogging(klog.Infof)
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: p.KubeClient.CoreV1().Events("")})
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
}
@@ -134,27 +134,27 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
volumeList, err := volumeLister.List(labels.Everything())
if err != nil {
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
return
}
for _, volume := range volumeList {
volumeClone := volume.DeepCopy()
if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil {
glog.Errorf("error updating volume cache: %v", err)
klog.Errorf("error updating volume cache: %v", err)
}
}
claimList, err := claimLister.List(labels.Everything())
if err != nil {
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
return
}
for _, claim := range claimList {
if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil {
glog.Errorf("error updating claim cache: %v", err)
klog.Errorf("error updating claim cache: %v", err)
}
}
glog.V(4).Infof("controller initialized")
klog.V(4).Infof("controller initialized")
}
// enqueueWork adds volume or claim to given work queue.
@@ -165,10 +165,10 @@ func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, o
}
objName, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("failed to get key from object: %v", err)
klog.Errorf("failed to get key from object: %v", err)
return
}
glog.V(5).Infof("enqueued %q for sync", objName)
klog.V(5).Infof("enqueued %q for sync", objName)
queue.Add(objName)
}
@@ -187,7 +187,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
// is an old version.
new, err := ctrl.storeVolumeUpdate(volume)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
if !new {
return
@@ -198,9 +198,9 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err)
klog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err)
} else {
glog.Errorf("could not sync volume %q: %+v", volume.Name, err)
klog.Errorf("could not sync volume %q: %+v", volume.Name, err)
}
}
}
@@ -208,7 +208,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
// deleteVolume runs in worker thread and handles "volume deleted" event.
func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) {
_ = ctrl.volumes.store.Delete(volume)
glog.V(4).Infof("volume %q deleted", volume.Name)
klog.V(4).Infof("volume %q deleted", volume.Name)
if volume.Spec.ClaimRef == nil {
return
@@ -217,7 +217,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume
// claim here in response to volume deletion prevents the claim from
// waiting until the next sync period for its Lost status.
claimKey := claimrefToClaimKey(volume.Spec.ClaimRef)
glog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey)
klog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey)
ctrl.claimQueue.Add(claimKey)
}
@@ -228,7 +228,7 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
// an old version.
new, err := ctrl.storeClaimUpdate(claim)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
if !new {
return
@@ -238,9 +238,9 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err)
klog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err)
} else {
glog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err)
klog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err)
}
}
}
@@ -248,17 +248,17 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
// deleteClaim runs in worker thread and handles "claim deleted" event.
func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) {
_ = ctrl.claims.Delete(claim)
glog.V(4).Infof("claim %q deleted", claimToClaimKey(claim))
klog.V(4).Infof("claim %q deleted", claimToClaimKey(claim))
volumeName := claim.Spec.VolumeName
if volumeName == "" {
glog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim))
klog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim))
return
}
// sync the volume when its claim is deleted. Explicitly sync'ing the
// volume here in response to claim deletion prevents the volume from
// waiting until the next sync period for its Release.
glog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName)
klog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName)
ctrl.volumeQueue.Add(volumeName)
}
@@ -268,8 +268,8 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
defer ctrl.claimQueue.ShutDown()
defer ctrl.volumeQueue.ShutDown()
glog.Infof("Starting persistent volume controller")
defer glog.Infof("Shutting down persistent volume controller")
klog.Infof("Starting persistent volume controller")
defer klog.Infof("Shutting down persistent volume controller")
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
return
@@ -296,11 +296,11 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
}
defer ctrl.volumeQueue.Done(keyObj)
key := keyObj.(string)
glog.V(5).Infof("volumeWorker[%s]", key)
klog.V(5).Infof("volumeWorker[%s]", key)
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err)
klog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err)
return false
}
volume, err := ctrl.volumeLister.Get(name)
@@ -311,7 +311,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
return false
}
if !errors.IsNotFound(err) {
glog.V(2).Infof("error getting volume %q from informer: %v", key, err)
klog.V(2).Infof("error getting volume %q from informer: %v", key, err)
return false
}
@@ -319,18 +319,18 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
// "delete"
volumeObj, found, err := ctrl.volumes.store.GetByKey(key)
if err != nil {
glog.V(2).Infof("error getting volume %q from cache: %v", key, err)
klog.V(2).Infof("error getting volume %q from cache: %v", key, err)
return false
}
if !found {
// The controller has already processed the delete event and
// deleted the volume from its cache
glog.V(2).Infof("deletion of volume %q was already processed", key)
klog.V(2).Infof("deletion of volume %q was already processed", key)
return false
}
volume, ok := volumeObj.(*v1.PersistentVolume)
if !ok {
glog.Errorf("expected volume, got %+v", volumeObj)
klog.Errorf("expected volume, got %+v", volumeObj)
return false
}
ctrl.deleteVolume(volume)
@@ -338,7 +338,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
}
for {
if quit := workFunc(); quit {
glog.Infof("volume worker queue shutting down")
klog.Infof("volume worker queue shutting down")
return
}
}
@@ -354,11 +354,11 @@ func (ctrl *PersistentVolumeController) claimWorker() {
}
defer ctrl.claimQueue.Done(keyObj)
key := keyObj.(string)
glog.V(5).Infof("claimWorker[%s]", key)
klog.V(5).Infof("claimWorker[%s]", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err)
klog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err)
return false
}
claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name)
@@ -369,25 +369,25 @@ func (ctrl *PersistentVolumeController) claimWorker() {
return false
}
if !errors.IsNotFound(err) {
glog.V(2).Infof("error getting claim %q from informer: %v", key, err)
klog.V(2).Infof("error getting claim %q from informer: %v", key, err)
return false
}
// The claim is not in informer cache, the event must have been "delete"
claimObj, found, err := ctrl.claims.GetByKey(key)
if err != nil {
glog.V(2).Infof("error getting claim %q from cache: %v", key, err)
klog.V(2).Infof("error getting claim %q from cache: %v", key, err)
return false
}
if !found {
// The controller has already processed the delete event and
// deleted the claim from its cache
glog.V(2).Infof("deletion of claim %q was already processed", key)
klog.V(2).Infof("deletion of claim %q was already processed", key)
return false
}
claim, ok := claimObj.(*v1.PersistentVolumeClaim)
if !ok {
glog.Errorf("expected claim, got %+v", claimObj)
klog.Errorf("expected claim, got %+v", claimObj)
return false
}
ctrl.deleteClaim(claim)
@@ -395,7 +395,7 @@ func (ctrl *PersistentVolumeController) claimWorker() {
}
for {
if quit := workFunc(); quit {
glog.Infof("claim worker queue shutting down")
klog.Infof("claim worker queue shutting down")
return
}
}
@@ -405,11 +405,11 @@ func (ctrl *PersistentVolumeController) claimWorker() {
// all consumers of PV/PVC shared informer to have a short resync period,
// therefore we do our own.
func (ctrl *PersistentVolumeController) resync() {
glog.V(4).Infof("resyncing PV controller")
klog.V(4).Infof("resyncing PV controller")
pvcs, err := ctrl.claimLister.List(labels.NewSelector())
if err != nil {
glog.Warningf("cannot list claims: %s", err)
klog.Warningf("cannot list claims: %s", err)
return
}
for _, pvc := range pvcs {
@@ -418,7 +418,7 @@ func (ctrl *PersistentVolumeController) resync() {
pvs, err := ctrl.volumeLister.List(labels.NewSelector())
if err != nil {
glog.Warningf("cannot list persistent volumes: %s", err)
klog.Warningf("cannot list persistent volumes: %s", err)
return
}
for _, pv := range pvs {
@@ -504,7 +504,7 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
if !found {
// This is a new object
glog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
if err = store.Add(obj); err != nil {
return false, fmt.Errorf("Error adding %s %q to controller cache: %v", className, objName, err)
}
@@ -528,11 +528,11 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
// Throw away only older version, let the same version pass - we do want to
// get periodic sync events.
if oldObjResourceVersion > objResourceVersion {
glog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
return false, nil
}
glog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
if err = store.Update(obj); err != nil {
return false, fmt.Errorf("Error updating %s %q in controller cache: %v", className, objName, err)
}

View File

@@ -20,7 +20,7 @@ import (
"testing"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
@@ -96,7 +96,7 @@ func TestControllerSync(t *testing.T) {
}
for _, test := range tests {
glog.V(4).Infof("starting test %q", test.name)
klog.V(4).Infof("starting test %q", test.name)
// Initialize the controller
client := &fake.Clientset{}
@@ -140,7 +140,7 @@ func TestControllerSync(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
glog.V(4).Infof("controller synced, starting test")
klog.V(4).Infof("controller synced, starting test")
// Call the tested function
err = test.test(ctrl, reactor, test)

View File

@@ -21,7 +21,7 @@ import (
"strconv"
"sync"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
@@ -152,7 +152,7 @@ func (c *assumeCache) add(obj interface{}) {
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
glog.Errorf("add failed: %v", &errObjectName{err})
klog.Errorf("add failed: %v", &errObjectName{err})
return
}
@@ -162,27 +162,27 @@ func (c *assumeCache) add(obj interface{}) {
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
newVersion, err := c.getObjVersion(name, obj)
if err != nil {
glog.Errorf("add: couldn't get object version: %v", err)
klog.Errorf("add: couldn't get object version: %v", err)
return
}
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
if err != nil {
glog.Errorf("add: couldn't get stored object version: %v", err)
klog.Errorf("add: couldn't get stored object version: %v", err)
return
}
// Only update object if version is newer.
// This is so we don't override assumed objects due to informer resync.
if newVersion <= storedVersion {
glog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion)
klog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion)
return
}
}
objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj}
c.store.Update(objInfo)
glog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj)
klog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj)
}
func (c *assumeCache) update(oldObj interface{}, newObj interface{}) {
@@ -196,7 +196,7 @@ func (c *assumeCache) delete(obj interface{}) {
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
glog.Errorf("delete failed: %v", &errObjectName{err})
klog.Errorf("delete failed: %v", &errObjectName{err})
return
}
@@ -206,7 +206,7 @@ func (c *assumeCache) delete(obj interface{}) {
objInfo := &objInfo{name: name}
err = c.store.Delete(objInfo)
if err != nil {
glog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err)
klog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err)
}
}
@@ -257,14 +257,14 @@ func (c *assumeCache) List(indexObj interface{}) []interface{} {
allObjs := []interface{}{}
objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
if err != nil {
glog.Errorf("list index error: %v", err)
klog.Errorf("list index error: %v", err)
return nil
}
for _, obj := range objs {
objInfo, ok := obj.(*objInfo)
if !ok {
glog.Errorf("list error: %v", &errWrongType{"objInfo", obj})
klog.Errorf("list error: %v", &errWrongType{"objInfo", obj})
continue
}
allObjs = append(allObjs, objInfo.latestObj)
@@ -302,7 +302,7 @@ func (c *assumeCache) Assume(obj interface{}) error {
// Only update the cached object
objInfo.latestObj = obj
glog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion)
klog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion)
return nil
}
@@ -313,10 +313,10 @@ func (c *assumeCache) Restore(objName string) {
objInfo, err := c.getObjInfo(objName)
if err != nil {
// This could be expected if object got deleted
glog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err)
klog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err)
} else {
objInfo.latestObj = objInfo.apiObj
glog.V(4).Infof("Restored %v %q", c.description, objName)
klog.V(4).Infof("Restored %v %q", c.description, objName)
}
}
@@ -366,7 +366,7 @@ func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume
for _, obj := range objs {
pv, ok := obj.(*v1.PersistentVolume)
if !ok {
glog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj})
klog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj})
}
pvs = append(pvs, pv)
}

View File

@@ -21,7 +21,7 @@ import (
"sort"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -144,7 +144,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
podName := getPodName(pod)
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
glog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
// Initialize to true for pods that don't have volumes
unboundVolumesSatisfied = true
@@ -204,7 +204,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) {
podName := getPodName(assumedPod)
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
start := time.Now()
defer func() {
VolumeSchedulingStageLatency.WithLabelValues("assume").Observe(time.Since(start).Seconds())
@@ -214,7 +214,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
}()
if allBound := b.arePodVolumesBound(assumedPod); allBound {
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
return true, nil
}
@@ -227,7 +227,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
newBindings := []*bindingInfo{}
for _, binding := range claimsToBind {
newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc)
glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
klog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
podName,
binding.pv.Name,
binding.pvc.Name,
@@ -280,7 +280,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
// by the PV controller.
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) (err error) {
podName := getPodName(assumedPod)
glog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName)
klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName)
start := time.Now()
defer func() {
@@ -346,7 +346,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
// Do the actual prebinding. Let the PV controller take care of the rest
// There is no API rollback if the actual binding fails
for _, binding = range bindings {
glog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
// TODO: does it hurt if we make an api call and nothing needs to be updated?
if _, err := b.ctrl.updateBindVolumeToClaim(binding.pv, binding.pvc, false); err != nil {
return err
@@ -357,7 +357,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
for _, claim = range claimsToProvision {
glog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
return err
}
@@ -426,7 +426,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claim
}
// All pvs and pvcs that we operated on are bound
glog.V(4).Infof("All PVCs for pod %q are bound", podName)
klog.V(4).Infof("All PVCs for pod %q are bound", podName)
return true, nil
}
@@ -455,15 +455,15 @@ func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.Persiste
pvName := pvc.Spec.VolumeName
if pvName != "" {
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
glog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvName)
klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvName)
return true, pvc, nil
} else {
glog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvName)
klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvName)
return false, pvc, nil
}
}
glog.V(5).Infof("PVC %q is not bound", pvcKey)
klog.V(5).Infof("PVC %q is not bound", pvcKey)
return false, pvc, nil
}
@@ -523,13 +523,13 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
err = volumeutil.CheckNodeAffinity(pv, node.Labels)
if err != nil {
glog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err)
klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err)
return false, nil
}
glog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
}
glog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
return true, nil
}
@@ -561,7 +561,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
return false, nil, err
}
if bindingInfo.pv == nil {
glog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name)
klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name)
unboundClaims = append(unboundClaims, bindingInfo.pvc)
foundMatches = false
continue
@@ -570,7 +570,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
// matching PV needs to be excluded so we don't select it again
chosenPVs[bindingInfo.pv.Name] = bindingInfo.pv
matchedClaims = append(matchedClaims, bindingInfo)
glog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, pvcName, node.Name, podName)
klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, pvcName, node.Name, podName)
}
// Mark cache with all the matches for each PVC for this node
@@ -579,7 +579,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
}
if foundMatches {
glog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
}
return
@@ -605,13 +605,13 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
}
provisioner := class.Provisioner
if provisioner == "" || provisioner == notSupportedProvisioner {
glog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName)
klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName)
return false, nil
}
// Check if the node can satisfy the topology requirement in the class
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
glog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName)
klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName)
return false, nil
}
@@ -621,7 +621,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
provisionedClaims = append(provisionedClaims, claim)
}
glog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name)
klog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name)
// Mark cache with all the PVCs that need provisioning for this node
b.podBindingCache.UpdateProvisionedPVCs(pod, node.Name, provisionedClaims)

View File

@@ -22,7 +22,7 @@ import (
"testing"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
@@ -742,7 +742,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
}
for name, scenario := range scenarios {
glog.V(5).Infof("Running test case %q", name)
klog.V(5).Infof("Running test case %q", name)
// Setup
testEnv := newTestBinder(t)
@@ -964,7 +964,7 @@ func TestAssumePodVolumes(t *testing.T) {
}
for name, scenario := range scenarios {
glog.V(5).Infof("Running test case %q", name)
klog.V(5).Infof("Running test case %q", name)
// Setup
testEnv := newTestBinder(t)
@@ -1094,7 +1094,7 @@ func TestBindAPIUpdate(t *testing.T) {
},
}
for name, scenario := range scenarios {
glog.V(4).Infof("Running test case %q", name)
klog.V(4).Infof("Running test case %q", name)
// Setup
testEnv := newTestBinder(t)
@@ -1253,7 +1253,7 @@ func TestCheckBindings(t *testing.T) {
}
for name, scenario := range scenarios {
glog.V(4).Infof("Running test case %q", name)
klog.V(4).Infof("Running test case %q", name)
// Setup
pod := makePod(nil)
@@ -1386,7 +1386,7 @@ func TestBindPodVolumes(t *testing.T) {
}
for name, scenario := range scenarios {
glog.V(4).Infof("Running test case %q", name)
klog.V(4).Infof("Running test case %q", name)
// Setup
pod := makePod(nil)
@@ -1407,7 +1407,7 @@ func TestBindPodVolumes(t *testing.T) {
if scenario.delayFunc != nil {
go func() {
time.Sleep(5 * time.Second)
glog.V(5).Infof("Running delay function")
klog.V(5).Infof("Running delay function")
scenario.delayFunc(t, testEnv, pod, scenario.binding.pv, scenario.binding.pvc)
}()
}

View File

@@ -20,7 +20,6 @@ import (
"fmt"
"net"
"github.com/golang/glog"
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@@ -28,6 +27,7 @@ import (
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
vol "k8s.io/kubernetes/pkg/volume"
)
@@ -112,7 +112,7 @@ func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ s
func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) {
glog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController")
klog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController")
}
}