Move APIs and core code to use metav1.ObjectMeta

This commit is contained in:
Clayton Coleman
2017-01-11 15:28:46 -05:00
parent 54d8ed001d
commit 36acd90aba
104 changed files with 486 additions and 865 deletions

View File

@@ -24,6 +24,7 @@ import (
"github.com/golang/glog"
"github.com/robfig/cron"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"

View File

@@ -127,7 +127,7 @@ func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
DNSPolicy: v1.DNSDefault,
},
}
v1.GenerateName(v1.SimpleNameGenerator, &pod.ObjectMeta)
metav1.GenerateName(v1.SimpleNameGenerator, &pod.ObjectMeta)
return pod
}

View File

@@ -212,7 +212,7 @@ func referencesDiffs(old []metav1.OwnerReference, new []metav1.OwnerReference) (
return added, removed
}
func shouldOrphanDependents(e *event, accessor meta.Object) bool {
func shouldOrphanDependents(e *event, accessor metav1.Object) bool {
// The delta_fifo may combine the creation and update of the object into one
// event, so we need to check AddEvent as well.
if e.oldObj == nil {

View File

@@ -18,7 +18,6 @@ package metaonly
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
)
// MetadataOnlyObject allows decoding only the apiVersion, kind, and metadata fields of
@@ -27,7 +26,7 @@ import (
type MetadataOnlyObject struct {
metav1.TypeMeta `json:",inline"`
// +optional
v1.ObjectMeta `json:"metadata,omitempty"`
metav1.ObjectMeta `json:"metadata,omitempty"`
}
// MetadataOnlyObjectList allows decoding from JSON data only the typemeta and metadata of

View File

@@ -21,12 +21,12 @@ import (
"sync"
"github.com/golang/groupcache/lru"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
hashutil "k8s.io/kubernetes/pkg/util/hash"
)
type objectWithMeta interface {
meta.Object
metav1.Object
}
// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object.

View File

@@ -1,373 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"encoding/json"
"fmt"
"io"
"testing"
"time"
"k8s.io/kubernetes/pkg/api/resource"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
heapster "k8s.io/heapster/metrics/api/v1/types"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert"
)
var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC)
func (w fakeResponseWrapper) DoRaw() ([]byte, error) {
return w.raw, nil
}
func (w fakeResponseWrapper) Stream() (io.ReadCloser, error) {
return nil, nil
}
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
return fakeResponseWrapper{raw: raw}
}
type fakeResponseWrapper struct {
raw []byte
}
// timestamp is used for establishing order on metricPoints
type metricPoint struct {
level uint64
timestamp int
}
type testCase struct {
desiredResourceValues PodResourceInfo
desiredMetricValues PodMetricsInfo
desiredError error
replicas int
targetTimestamp int
reportedMetricsPoints [][]metricPoint
reportedPodMetrics [][]int64
namespace string
selector labels.Selector
resourceName v1.ResourceName
metricName string
}
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
namespace := "test-namespace"
tc.namespace = namespace
podNamePrefix := "test-pod"
podLabels := map[string]string{"name": podNamePrefix}
tc.selector = labels.SelectorFromSet(podLabels)
// it's a resource test if we have a resource name
isResource := len(tc.resourceName) > 0
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.PodList{}
for i := 0; i < tc.replicas; i++ {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := buildPod(namespace, podName, podLabels, v1.PodRunning, "1024")
obj.Items = append(obj.Items, pod)
}
return true, obj, nil
})
if isResource {
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
metrics := metricsapi.PodMetricsList{}
for i, containers := range tc.reportedPodMetrics {
metric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: namespace,
},
Timestamp: metav1.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
Containers: []metricsapi.ContainerMetrics{},
}
for j, cpu := range containers {
cm := metricsapi.ContainerMetrics{
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
cpu,
resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(
int64(1024*1024),
resource.BinarySI),
},
}
metric.Containers = append(metric.Containers, cm)
}
metrics.Items = append(metrics.Items, metric)
}
heapsterRawMemResponse, _ := json.Marshal(&metrics)
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
})
} else {
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
metrics := heapster.MetricResultList{}
var latestTimestamp time.Time
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
var heapsterMetricPoints []heapster.MetricPoint
for _, reportedMetricPoint := range reportedMetricPoints {
timestamp := fixedTimestamp.Add(time.Duration(reportedMetricPoint.timestamp) * time.Minute)
if latestTimestamp.Before(timestamp) {
latestTimestamp = timestamp
}
heapsterMetricPoint := heapster.MetricPoint{Timestamp: timestamp, Value: reportedMetricPoint.level, FloatValue: nil}
heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint)
}
metric := heapster.MetricResult{
Metrics: heapsterMetricPoints,
LatestTimestamp: latestTimestamp,
}
metrics.Items = append(metrics.Items, metric)
}
heapsterRawMemResponse, _ := json.Marshal(&metrics)
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
})
}
return fakeClient
}
func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
return v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: podLabels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(request),
},
},
},
},
},
Status: v1.PodStatus{
Phase: phase,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
},
}
}
func (tc *testCase) verifyResults(t *testing.T, metrics interface{}, timestamp time.Time, err error) {
if tc.desiredError != nil {
assert.Error(t, err, "there should be an error retrieving the metrics")
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be eas expected")
return
}
assert.NoError(t, err, "there should be no error retrieving the metrics")
assert.NotNil(t, metrics, "there should be metrics returned")
if metricsInfo, wasRaw := metrics.(PodMetricsInfo); wasRaw {
assert.Equal(t, tc.desiredMetricValues, metricsInfo, "the raw metrics values should be as expected")
} else if resourceInfo, wasResource := metrics.(PodResourceInfo); wasResource {
assert.Equal(t, tc.desiredResourceValues, resourceInfo, "the resource metrics values be been as expected")
} else {
assert.False(t, true, "should return either resource metrics info or raw metrics info")
}
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
}
func (tc *testCase) runTest(t *testing.T) {
testClient := tc.prepareTestClient(t)
metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort)
isResource := len(tc.resourceName) > 0
if isResource {
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector)
tc.verifyResults(t, info, timestamp, err)
} else {
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
tc.verifyResults(t, info, timestamp, err)
}
}
func TestCPU(t *testing.T) {
tc := testCase{
replicas: 3,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
},
resourceName: v1.ResourceCPU,
targetTimestamp: 1,
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
}
tc.runTest(t)
}
func TestQPS(t *testing.T) {
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 10, "test-pod-1": 20, "test-pod-2": 10,
},
metricName: "qps",
targetTimestamp: 1,
reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}},
}
tc.runTest(t)
}
func TestQpsSumEqualZero(t *testing.T) {
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
},
metricName: "qps",
targetTimestamp: 0,
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
}
tc.runTest(t)
}
func TestCPUMoreMetrics(t *testing.T) {
tc := testCase{
replicas: 5,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
"test-pod-3": 5000, "test-pod-4": 5000,
},
resourceName: v1.ResourceCPU,
targetTimestamp: 10,
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
}
tc.runTest(t)
}
func TestCPUMissingMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 4000,
},
resourceName: v1.ResourceCPU,
reportedPodMetrics: [][]int64{{4000}},
}
tc.runTest(t)
}
func TestQpsMissingMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 1"),
metricName: "qps",
targetTimestamp: 1,
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
}
tc.runTest(t)
}
func TestQpsSuperfluousMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 6"),
metricName: "qps",
reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}},
}
tc.runTest(t)
}
func TestCPUEmptyMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
resourceName: v1.ResourceCPU,
desiredError: fmt.Errorf("no metrics returned from heapster"),
reportedMetricsPoints: [][]metricPoint{},
reportedPodMetrics: [][]int64{},
}
tc.runTest(t)
}
func TestQpsEmptyEntries(t *testing.T) {
tc := testCase{
replicas: 3,
metricName: "qps",
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 4000, "test-pod-2": 2000,
},
targetTimestamp: 4,
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {}, {{2000, 4}}},
}
tc.runTest(t)
}
func TestCPUZeroReplicas(t *testing.T) {
tc := testCase{
replicas: 0,
resourceName: v1.ResourceCPU,
desiredError: fmt.Errorf("no metrics returned from heapster"),
reportedPodMetrics: [][]int64{},
}
tc.runTest(t)
}
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
tc := testCase{
replicas: 3,
resourceName: v1.ResourceCPU,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 100, "test-pod-1": 700,
},
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
}
tc.runTest(t)
}
func testCollapseTimeSamples(t *testing.T) {
now := time.Now()
metrics := heapster.MetricResult{
Metrics: []heapster.MetricPoint{
{Timestamp: now, Value: 50, FloatValue: nil},
{Timestamp: now.Add(-15 * time.Second), Value: 100, FloatValue: nil},
{Timestamp: now.Add(-60 * time.Second), Value: 100000, FloatValue: nil}},
LatestTimestamp: now,
}
val, timestamp, hadMetrics := collapseTimeSamples(metrics, time.Minute)
assert.True(t, hadMetrics, "should report that it received a populated list of metrics")
assert.InEpsilon(t, float64(75), val, 0.1, "collapsed sample value should be as expected")
assert.True(t, timestamp.Equal(now), "timestamp should be the current time (the newest)")
}

View File

@@ -134,7 +134,7 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVol
// with existing PVs, findByClaim must find only PVs that are
// pre-bound to the claim (by dynamic provisioning). TODO: remove in
// 1.5
if v1.HasAnnotation(claim.ObjectMeta, storageutil.AlphaStorageClassAnnotation) {
if metav1.HasAnnotation(claim.ObjectMeta, storageutil.AlphaStorageClassAnnotation) {
continue
}

View File

@@ -218,7 +218,7 @@ type PersistentVolumeController struct {
func (ctrl *PersistentVolumeController) syncClaim(claim *v1.PersistentVolumeClaim) error {
glog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim))
if !v1.HasAnnotation(claim.ObjectMeta, annBindCompleted) {
if !metav1.HasAnnotation(claim.ObjectMeta, annBindCompleted) {
return ctrl.syncUnboundClaim(claim)
} else {
return ctrl.syncBoundClaim(claim)
@@ -243,7 +243,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim))
// No PV could be found
// OBSERVATION: pvc is "Pending", will retry
if storageutil.GetClaimStorageClass(claim) != "" || v1.HasAnnotation(claim.ObjectMeta, storageutil.AlphaStorageClassAnnotation) {
if storageutil.GetClaimStorageClass(claim) != "" || metav1.HasAnnotation(claim.ObjectMeta, storageutil.AlphaStorageClassAnnotation) {
if err = ctrl.provisionClaim(claim); err != nil {
return err
}
@@ -315,7 +315,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
} else {
// User asked for a PV that is claimed by someone else
// OBSERVATION: pvc is "Pending", pv is "Bound"
if !v1.HasAnnotation(claim.ObjectMeta, annBoundByController) {
if !metav1.HasAnnotation(claim.ObjectMeta, annBoundByController) {
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim))
// User asked for a specific PV, retry later
if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil {
@@ -480,7 +480,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
}
return nil
} else if claim.Spec.VolumeName == "" {
if v1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
if metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
// The binding is not completed; let PVC sync handle it
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name)
} else {
@@ -507,7 +507,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
return nil
} else {
// Volume is bound to a claim, but the claim is bound elsewhere
if v1.HasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) && volume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
if metav1.HasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) && volume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
// This volume was dynamically provisioned for this claim. The
// claim got bound elsewhere, and thus this volume is not
// needed. Delete it.
@@ -531,7 +531,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
} else {
// Volume is bound to a claim, but the claim is bound elsewhere
// and it's not dynamically provisioned.
if v1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
if metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
// This is part of the normal operation of the controller; the
// controller tried to use this volume for a claim but the claim
// was fulfilled by another volume. We did this; fix it.
@@ -755,8 +755,8 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV
}
// Set annBoundByController if it is not set yet
if shouldSetBoundByController && !v1.HasAnnotation(volumeClone.ObjectMeta, annBoundByController) {
v1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, annBoundByController, "yes")
if shouldSetBoundByController && !metav1.HasAnnotation(volumeClone.ObjectMeta, annBoundByController) {
metav1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, annBoundByController, "yes")
dirty = true
}
@@ -812,14 +812,14 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo
}
// Set annBoundByController if it is not set yet
if shouldSetBoundByController && !v1.HasAnnotation(claimClone.ObjectMeta, annBoundByController) {
v1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annBoundByController, "yes")
if shouldSetBoundByController && !metav1.HasAnnotation(claimClone.ObjectMeta, annBoundByController) {
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annBoundByController, "yes")
dirty = true
}
// Set annBindCompleted if it is not set yet
if !v1.HasAnnotation(claimClone.ObjectMeta, annBindCompleted) {
v1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annBindCompleted, "yes")
if !metav1.HasAnnotation(claimClone.ObjectMeta, annBindCompleted) {
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annBindCompleted, "yes")
dirty = true
}
@@ -905,7 +905,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume
return fmt.Errorf("Unexpected volume cast error : %v", volumeClone)
}
if v1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
if metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
// The volume was bound by the controller.
volumeClone.Spec.ClaimRef = nil
delete(volumeClone.Annotations, annBoundByController)
@@ -1335,13 +1335,13 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
volume.Status.Phase = v1.VolumeBound
// Add annBoundByController (used in deleting the volume)
v1.SetMetaDataAnnotation(&volume.ObjectMeta, annBoundByController, "yes")
v1.SetMetaDataAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.GetPluginName())
metav1.SetMetaDataAnnotation(&volume.ObjectMeta, annBoundByController, "yes")
metav1.SetMetaDataAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.GetPluginName())
// For Alpha provisioning behavior, do not add storage.BetaStorageClassAnnotations for volumes created
// by storage.AlphaStorageClassAnnotation
// TODO: remove this check in 1.5, storage.StorageClassAnnotation will be always non-empty there.
if claimClass != "" {
v1.SetMetaDataAnnotation(&volume.ObjectMeta, storageutil.StorageClassAnnotation, claimClass)
metav1.SetMetaDataAnnotation(&volume.ObjectMeta, storageutil.StorageClassAnnotation, claimClass)
}
// Try to create the PV object several times
@@ -1445,8 +1445,8 @@ func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *v1.Pers
// provisioner is requested.
func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *v1.PersistentVolumeClaim) (vol.ProvisionableVolumePlugin, *storage.StorageClass, error) {
// TODO: remove this alpha behavior in 1.5
alpha := v1.HasAnnotation(claim.ObjectMeta, storageutil.AlphaStorageClassAnnotation)
beta := v1.HasAnnotation(claim.ObjectMeta, storageutil.BetaStorageClassAnnotation)
alpha := metav1.HasAnnotation(claim.ObjectMeta, storageutil.AlphaStorageClassAnnotation)
beta := metav1.HasAnnotation(claim.ObjectMeta, storageutil.BetaStorageClassAnnotation)
if alpha && beta {
// Both Alpha and Beta annotations are set. Do beta.
alpha = false
@@ -1512,7 +1512,7 @@ func (ctrl *PersistentVolumeController) findAlphaProvisionablePlugin() (vol.Prov
func (ctrl *PersistentVolumeController) findDeletablePlugin(volume *v1.PersistentVolume) (vol.DeletableVolumePlugin, error) {
// Find a plugin. Try to find the same plugin that provisioned the volume
var plugin vol.DeletableVolumePlugin
if v1.HasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) {
if metav1.HasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) {
provisionPluginName := volume.Annotations[annDynamicallyProvisioned]
if provisionPluginName != "" {
plugin, err := ctrl.volumePluginMgr.FindDeletablePluginByName(provisionPluginName)

View File

@@ -510,7 +510,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent
if !ok {
return nil, fmt.Errorf("Unexpected claim cast error : %v", claimClone)
}
v1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annStorageProvisioner, class.Provisioner)
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annStorageProvisioner, class.Provisioner)
newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
if err != nil {
return newClaim, err
@@ -525,14 +525,14 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent
// Stateless functions
func getClaimStatusForLogging(claim *v1.PersistentVolumeClaim) string {
bound := v1.HasAnnotation(claim.ObjectMeta, annBindCompleted)
boundByController := v1.HasAnnotation(claim.ObjectMeta, annBoundByController)
bound := metav1.HasAnnotation(claim.ObjectMeta, annBindCompleted)
boundByController := metav1.HasAnnotation(claim.ObjectMeta, annBoundByController)
return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController)
}
func getVolumeStatusForLogging(volume *v1.PersistentVolume) string {
boundByController := v1.HasAnnotation(volume.ObjectMeta, annBoundByController)
boundByController := metav1.HasAnnotation(volume.ObjectMeta, annBoundByController)
claimName := ""
if volume.Spec.ClaimRef != nil {
claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID)