mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #88447 from taesunny/pkg/controller
Fix pkg/controller typos in some error messages, comments etc
This commit is contained in:
commit
55f2d91c8c
@ -53,7 +53,7 @@ type SignerOptions struct {
|
|||||||
// TokenSecretNamespace string is the namespace for token Secrets.
|
// TokenSecretNamespace string is the namespace for token Secrets.
|
||||||
TokenSecretNamespace string
|
TokenSecretNamespace string
|
||||||
|
|
||||||
// ConfigMapResynce is the time.Duration at which to fully re-list configmaps.
|
// ConfigMapResync is the time.Duration at which to fully re-list configmaps.
|
||||||
// If zero, re-list will be delayed as long as possible
|
// If zero, re-list will be delayed as long as possible
|
||||||
ConfigMapResync time.Duration
|
ConfigMapResync time.Duration
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ func TestConfigMapCreation(t *testing.T) {
|
|||||||
UpdatedConfigMap *v1.ConfigMap
|
UpdatedConfigMap *v1.ConfigMap
|
||||||
ExpectActions []action
|
ExpectActions []action
|
||||||
}{
|
}{
|
||||||
"create new namesapce": {
|
"create new namespace": {
|
||||||
AddedNamespace: newNs,
|
AddedNamespace: newNs,
|
||||||
ExpectActions: []action{{verb: "create", name: RootCACertConfigMapName}},
|
ExpectActions: []action{{verb: "create", name: RootCACertConfigMapName}},
|
||||||
},
|
},
|
||||||
|
@ -158,12 +158,12 @@ func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobContr
|
|||||||
}
|
}
|
||||||
|
|
||||||
failedJobs := []batchv1.Job{}
|
failedJobs := []batchv1.Job{}
|
||||||
succesfulJobs := []batchv1.Job{}
|
successfulJobs := []batchv1.Job{}
|
||||||
|
|
||||||
for _, job := range js {
|
for _, job := range js {
|
||||||
isFinished, finishedStatus := getFinishedStatus(&job)
|
isFinished, finishedStatus := getFinishedStatus(&job)
|
||||||
if isFinished && finishedStatus == batchv1.JobComplete {
|
if isFinished && finishedStatus == batchv1.JobComplete {
|
||||||
succesfulJobs = append(succesfulJobs, job)
|
successfulJobs = append(successfulJobs, job)
|
||||||
} else if isFinished && finishedStatus == batchv1.JobFailed {
|
} else if isFinished && finishedStatus == batchv1.JobFailed {
|
||||||
failedJobs = append(failedJobs, job)
|
failedJobs = append(failedJobs, job)
|
||||||
}
|
}
|
||||||
@ -171,7 +171,7 @@ func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobContr
|
|||||||
|
|
||||||
if sj.Spec.SuccessfulJobsHistoryLimit != nil {
|
if sj.Spec.SuccessfulJobsHistoryLimit != nil {
|
||||||
removeOldestJobs(sj,
|
removeOldestJobs(sj,
|
||||||
succesfulJobs,
|
successfulJobs,
|
||||||
jc,
|
jc,
|
||||||
*sj.Spec.SuccessfulJobsHistoryLimit,
|
*sj.Spec.SuccessfulJobsHistoryLimit,
|
||||||
recorder)
|
recorder)
|
||||||
|
@ -268,7 +268,7 @@ func TestSyncOne_RunOrNot(t *testing.T) {
|
|||||||
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
|
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
|
||||||
job, err = getJobFromTemplate(&sj, sj.Status.LastScheduleTime.Time)
|
job, err = getJobFromTemplate(&sj, sj.Status.LastScheduleTime.Time)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: nexpected error creating a job from template: %v", name, err)
|
t.Fatalf("%s: unexpected error creating a job from template: %v", name, err)
|
||||||
}
|
}
|
||||||
job.UID = "1234"
|
job.UID = "1234"
|
||||||
job.Namespace = ""
|
job.Namespace = ""
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
daemonconfig "k8s.io/kubernetes/pkg/controller/daemon/config"
|
daemonconfig "k8s.io/kubernetes/pkg/controller/daemon/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Important! The public back-and-forth conversion functions for the types in this epackage
|
// Important! The public back-and-forth conversion functions for the types in this package
|
||||||
// with DaemonSetControllerConfiguration types need to be manually exposed like this in order for
|
// with DaemonSetControllerConfiguration types need to be manually exposed like this in order for
|
||||||
// other packages that reference this package to be able to call these conversion functions
|
// other packages that reference this package to be able to call these conversion functions
|
||||||
// in an autogenerated manner.
|
// in an autogenerated manner.
|
||||||
|
@ -537,7 +537,7 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(nodeMap) != 0 {
|
if len(nodeMap) != 0 {
|
||||||
t.Fatalf("did not foud pods on nodes %+v", nodeMap)
|
t.Fatalf("did not find pods on nodes %+v", nodeMap)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -587,7 +587,7 @@ func TestDaemonSetPodCreateExpectationsError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !manager.expectations.SatisfiedExpectations(dsKey) {
|
if !manager.expectations.SatisfiedExpectations(dsKey) {
|
||||||
t.Errorf("Unsatisfied pod creation expectatitons. Expected %d", creationExpectations)
|
t.Errorf("Unsatisfied pod creation expectations. Expected %d", creationExpectations)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1146,7 +1146,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
|||||||
|
|
||||||
manager, podControl, _, err := newTestController(daemon)
|
manager, podControl, _, err := newTestController(daemon)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("rrror creating DaemonSetsController: %v", err)
|
t.Fatalf("error creating DaemonSetsController: %v", err)
|
||||||
}
|
}
|
||||||
addNodes(manager.nodeStore, 0, 4, nil)
|
addNodes(manager.nodeStore, 0, 4, nil)
|
||||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
@ -1786,7 +1786,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
shouldContinueRunning: true,
|
shouldContinueRunning: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
predicateName: "ShouldRunDaemonPodOnUnscheduableNode",
|
predicateName: "ShouldRunDaemonPodOnUnschedulableNode",
|
||||||
ds: &apps.DaemonSet{
|
ds: &apps.DaemonSet{
|
||||||
Spec: apps.DaemonSetSpec{
|
Spec: apps.DaemonSetSpec{
|
||||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||||
|
@ -1308,7 +1308,7 @@ func TestPodChanged(t *testing.T) {
|
|||||||
|
|
||||||
/* dual stack tests */
|
/* dual stack tests */
|
||||||
// primary changes, because changing IPs is done by changing sandbox
|
// primary changes, because changing IPs is done by changing sandbox
|
||||||
// case 1: add new secondrary IP
|
// case 1: add new secondary IP
|
||||||
newPod.Status.PodIP = "1.1.3.1"
|
newPod.Status.PodIP = "1.1.3.1"
|
||||||
newPod.Status.PodIPs = []v1.PodIP{
|
newPod.Status.PodIPs = []v1.PodIP{
|
||||||
{
|
{
|
||||||
|
@ -1147,7 +1147,7 @@ func TestSyncJobExpectations(t *testing.T) {
|
|||||||
|
|
||||||
manager.expectations = FakeJobExpectations{
|
manager.expectations = FakeJobExpectations{
|
||||||
controller.NewControllerExpectations(), true, func() {
|
controller.NewControllerExpectations(), true, func() {
|
||||||
// If we check active pods before checking expectataions, the job
|
// If we check active pods before checking expectations, the job
|
||||||
// will create a new replica because it doesn't see this pod, but
|
// will create a new replica because it doesn't see this pod, but
|
||||||
// has fulfilled its expectations.
|
// has fulfilled its expectations.
|
||||||
podIndexer.Add(&pods[1])
|
podIndexer.Add(&pods[1])
|
||||||
|
@ -561,7 +561,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ns *v1.Namespace) (int64,
|
|||||||
return estimate, utilerrors.NewAggregate(errs)
|
return estimate, utilerrors.NewAggregate(errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// estimateGrracefulTermination will estimate the graceful termination required for the specific entity in the namespace
|
// estimateGracefulTermination will estimate the graceful termination required for the specific entity in the namespace
|
||||||
func (d *namespacedResourcesDeleter) estimateGracefulTermination(gvr schema.GroupVersionResource, ns string, namespaceDeletedAt metav1.Time) (int64, error) {
|
func (d *namespacedResourcesDeleter) estimateGracefulTermination(gvr schema.GroupVersionResource, ns string, namespaceDeletedAt metav1.Time) (int64, error) {
|
||||||
groupResource := gvr.GroupResource()
|
groupResource := gvr.GroupResource()
|
||||||
klog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource)
|
klog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource)
|
||||||
|
@ -273,7 +273,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
|||||||
allocated.allocatedCIDRs[idx] = podCIDR
|
allocated.allocatedCIDRs[idx] = podCIDR
|
||||||
}
|
}
|
||||||
|
|
||||||
//queue the assignement
|
//queue the assignment
|
||||||
klog.V(4).Infof("Putting node %s with CIDR %v into the work queue", node.Name, allocated.allocatedCIDRs)
|
klog.V(4).Infof("Putting node %s with CIDR %v into the work queue", node.Name, allocated.allocatedCIDRs)
|
||||||
r.nodeCIDRUpdateChannel <- allocated
|
r.nodeCIDRUpdateChannel <- allocated
|
||||||
return nil
|
return nil
|
||||||
|
@ -32,7 +32,7 @@ type NodeLifecycleControllerConfiguration struct {
|
|||||||
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
|
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
|
||||||
// be unresponsive before marking it unhealthy.
|
// be unresponsive before marking it unhealthy.
|
||||||
NodeStartupGracePeriod metav1.Duration
|
NodeStartupGracePeriod metav1.Duration
|
||||||
// nodeMontiorGracePeriod is the amount of time which we allow a running node to be
|
// NodeMonitorGracePeriod is the amount of time which we allow a running node to be
|
||||||
// unresponsive before marking it unhealthy. Must be N times more than kubelet's
|
// unresponsive before marking it unhealthy. Must be N times more than kubelet's
|
||||||
// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
|
// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
|
||||||
// to post node status.
|
// to post node status.
|
||||||
|
@ -129,7 +129,7 @@ const (
|
|||||||
retrySleepTime = 20 * time.Millisecond
|
retrySleepTime = 20 * time.Millisecond
|
||||||
nodeNameKeyIndex = "spec.nodeName"
|
nodeNameKeyIndex = "spec.nodeName"
|
||||||
// podUpdateWorkerSizes assumes that in most cases pod will be handled by monitorNodeHealth pass.
|
// podUpdateWorkerSizes assumes that in most cases pod will be handled by monitorNodeHealth pass.
|
||||||
// Pod update workes will only handle lagging cache pods. 4 workes should be enough.
|
// Pod update workers will only handle lagging cache pods. 4 workers should be enough.
|
||||||
podUpdateWorkerSize = 4
|
podUpdateWorkerSize = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -734,7 +734,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
|||||||
return true, 0
|
return true, 0
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
t.Fatalf("Zone %v was unitialized!", zone)
|
t.Fatalf("Zone %v was uninitialized!", zone)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ func (q *TimedWorkerQueue) CancelWork(key string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetWorkerUnsafe returns a TimedWorker corresponding to the given key.
|
// GetWorkerUnsafe returns a TimedWorker corresponding to the given key.
|
||||||
// Unsafe method - workers have attached goroutines which can fire afater this function is called.
|
// Unsafe method - workers have attached goroutines which can fire after this function is called.
|
||||||
func (q *TimedWorkerQueue) GetWorkerUnsafe(key string) *TimedWorker {
|
func (q *TimedWorkerQueue) GetWorkerUnsafe(key string) *TimedWorker {
|
||||||
q.Lock()
|
q.Lock()
|
||||||
defer q.Unlock()
|
defer q.Unlock()
|
||||||
|
@ -47,7 +47,7 @@ func TestExecute(t *testing.T) {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
lastVal := atomic.LoadInt32(&testVal)
|
lastVal := atomic.LoadInt32(&testVal)
|
||||||
if lastVal != 5 {
|
if lastVal != 5 {
|
||||||
t.Errorf("Espected testVal = 5, got %v", lastVal)
|
t.Errorf("Expected testVal = 5, got %v", lastVal)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ func TestExecuteDelayed(t *testing.T) {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
lastVal := atomic.LoadInt32(&testVal)
|
lastVal := atomic.LoadInt32(&testVal)
|
||||||
if lastVal != 5 {
|
if lastVal != 5 {
|
||||||
t.Errorf("Espected testVal = 5, got %v", lastVal)
|
t.Errorf("Expected testVal = 5, got %v", lastVal)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ func TestCancel(t *testing.T) {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
lastVal := atomic.LoadInt32(&testVal)
|
lastVal := atomic.LoadInt32(&testVal)
|
||||||
if lastVal != 3 {
|
if lastVal != 3 {
|
||||||
t.Errorf("Espected testVal = 3, got %v", lastVal)
|
t.Errorf("Expected testVal = 3, got %v", lastVal)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,6 +136,6 @@ func TestCancelAndReadd(t *testing.T) {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
lastVal := atomic.LoadInt32(&testVal)
|
lastVal := atomic.LoadInt32(&testVal)
|
||||||
if lastVal != 4 {
|
if lastVal != 4 {
|
||||||
t.Errorf("Espected testVal = 4, got %v", lastVal)
|
t.Errorf("Expected testVal = 4, got %v", lastVal)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,7 @@ func (c *customMetricsClient) GetObjectMetric(metricName string, namespace strin
|
|||||||
return metricValue.Value.MilliValue(), metricValue.Timestamp.Time, nil
|
return metricValue.Value.MilliValue(), metricValue.Timestamp.Time, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// externalMetricsClient implenets the external metrics related parts of MetricsClient,
|
// externalMetricsClient implements the external metrics related parts of MetricsClient,
|
||||||
// using data from the external metrics API.
|
// using data from the external metrics API.
|
||||||
type externalMetricsClient struct {
|
type externalMetricsClient struct {
|
||||||
client externalclient.ExternalMetricsClient
|
client externalclient.ExternalMetricsClient
|
||||||
|
@ -52,7 +52,7 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricUtilizationRatio takes in a set of metrics and a target utilization value,
|
// GetMetricUtilizationRatio takes in a set of metrics and a target utilization value,
|
||||||
// and calcuates the ratio of desired to actual utilization
|
// and calculates the ratio of desired to actual utilization
|
||||||
// (returning that and the actual utilization)
|
// (returning that and the actual utilization)
|
||||||
func GetMetricUtilizationRatio(metrics PodMetricsInfo, targetUtilization int64) (utilizationRatio float64, currentUtilization int64) {
|
func GetMetricUtilizationRatio(metrics PodMetricsInfo, targetUtilization int64) (utilizationRatio float64, currentUtilization int64) {
|
||||||
metricsTotal := int64(0)
|
metricsTotal := int64(0)
|
||||||
|
@ -1196,7 +1196,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
t.Errorf("No expectations found for ReplicaSet %q", oldRSKey)
|
t.Errorf("No expectations found for ReplicaSet %q", oldRSKey)
|
||||||
}
|
}
|
||||||
if rsExp.Fulfilled() {
|
if rsExp.Fulfilled() {
|
||||||
t.Errorf("There should be unfulfiled expectation for creating new pods for ReplicaSet %q", oldRSKey)
|
t.Errorf("There should be unfulfilled expectations for creating new pods for ReplicaSet %q", oldRSKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if manager.queue.Len() != 0 {
|
if manager.queue.Len() != 0 {
|
||||||
@ -1275,7 +1275,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
t.Errorf("No expectations found for ReplicaSet %q", oldRSKey)
|
t.Errorf("No expectations found for ReplicaSet %q", oldRSKey)
|
||||||
}
|
}
|
||||||
if rsExp.Fulfilled() {
|
if rsExp.Fulfilled() {
|
||||||
t.Errorf("There should be unfulfiled expectation for creating new pods for ReplicaSet %q", oldRSKey)
|
t.Errorf("There should be unfulfilled expectations for creating new pods for ReplicaSet %q", oldRSKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
|
err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
|
||||||
|
@ -952,7 +952,7 @@ func TestNeedsUpdate(t *testing.T) {
|
|||||||
expectedNeedsUpdate: true,
|
expectedNeedsUpdate: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testName: "If externel ip counts are different",
|
testName: "If external ip counts are different",
|
||||||
updateFn: func() {
|
updateFn: func() {
|
||||||
oldSvc = defaultExternalService()
|
oldSvc = defaultExternalService()
|
||||||
newSvc = defaultExternalService()
|
newSvc = defaultExternalService()
|
||||||
@ -962,7 +962,7 @@ func TestNeedsUpdate(t *testing.T) {
|
|||||||
expectedNeedsUpdate: true,
|
expectedNeedsUpdate: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testName: "If externel ips are different",
|
testName: "If external ips are different",
|
||||||
updateFn: func() {
|
updateFn: func() {
|
||||||
oldSvc = defaultExternalService()
|
oldSvc = defaultExternalService()
|
||||||
newSvc = defaultExternalService()
|
newSvc = defaultExternalService()
|
||||||
|
@ -251,7 +251,7 @@ func TestStatefulSetControllerDeletionTimestampRace(t *testing.T) {
|
|||||||
// It should not adopt pods.
|
// It should not adopt pods.
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if len(pod.OwnerReferences) > 0 {
|
if len(pod.OwnerReferences) > 0 {
|
||||||
t.Errorf("unexpect pod owner references: %v", pod.OwnerReferences)
|
t.Errorf("unexpected pod owner references: %v", pod.OwnerReferences)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,7 +265,7 @@ func TestStatefulSetControllerDeletionTimestampRace(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, revision := range revisions {
|
for _, revision := range revisions {
|
||||||
if len(revision.OwnerReferences) > 0 {
|
if len(revision.OwnerReferences) > 0 {
|
||||||
t.Errorf("unexpect revision owner references: %v", revision.OwnerReferences)
|
t.Errorf("unexpected revision owner references: %v", revision.OwnerReferences)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -199,7 +199,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
t.Fatalf("Error waiting for the informer caches to sync")
|
t.Fatalf("Error waiting for the informer caches to sync")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the nodes and pods are in the inforer cache
|
// Make sure the nodes and pods are in the informer cache
|
||||||
i = 0
|
i = 0
|
||||||
nodeList, err := informerFactory.Core().V1().Nodes().Lister().List(labels.Everything())
|
nodeList, err := informerFactory.Core().V1().Nodes().Lister().List(labels.Everything())
|
||||||
for len(nodeList) < nodesNum {
|
for len(nodeList) < nodesNum {
|
||||||
|
@ -120,7 +120,7 @@ type ActualStateOfWorld interface {
|
|||||||
GetAttachedVolumesPerNode() map[types.NodeName][]operationexecutor.AttachedVolume
|
GetAttachedVolumesPerNode() map[types.NodeName][]operationexecutor.AttachedVolume
|
||||||
|
|
||||||
// GetNodesForAttachedVolume returns the nodes on which the volume is attached.
|
// GetNodesForAttachedVolume returns the nodes on which the volume is attached.
|
||||||
// This function is used by reconciler for mutli-attach check.
|
// This function is used by reconciler for multi-attach check.
|
||||||
GetNodesForAttachedVolume(volumeName v1.UniqueVolumeName) []types.NodeName
|
GetNodesForAttachedVolume(volumeName v1.UniqueVolumeName) []types.NodeName
|
||||||
|
|
||||||
// GetVolumesToReportAttached returns a map containing the set of nodes for
|
// GetVolumesToReportAttached returns a map containing the set of nodes for
|
||||||
|
@ -990,7 +990,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
|
|||||||
|
|
||||||
// Populates data struct with one volume/node entry.
|
// Populates data struct with one volume/node entry.
|
||||||
// Calls RemoveVolumeFromReportAsAttached
|
// Calls RemoveVolumeFromReportAsAttached
|
||||||
// Verifyies there is no valume as reported as attached
|
// Verifies there is no volume as reported as attached
|
||||||
func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
|
func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
@ -1023,7 +1023,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
|
|||||||
// Populates data struct with one volume/node entry.
|
// Populates data struct with one volume/node entry.
|
||||||
// Calls RemoveVolumeFromReportAsAttached
|
// Calls RemoveVolumeFromReportAsAttached
|
||||||
// Calls AddVolumeToReportAsAttached to add volume back as attached
|
// Calls AddVolumeToReportAsAttached to add volume back as attached
|
||||||
// Verifyies there is one volume as reported as attached
|
// Verifies there is one volume as reported as attached
|
||||||
func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(t *testing.T) {
|
func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
@ -1066,7 +1066,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
|
|||||||
// Calls RemoveVolumeFromReportAsAttached
|
// Calls RemoveVolumeFromReportAsAttached
|
||||||
// Calls DeleteVolumeNode
|
// Calls DeleteVolumeNode
|
||||||
// Calls AddVolumeNode
|
// Calls AddVolumeNode
|
||||||
// Verifyies there is no volume as reported as attached
|
// Verifies there is no volume as reported as attached
|
||||||
func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
|
func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
@ -263,7 +263,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
|
|||||||
nodeName)
|
nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert -- Timer will triger detach
|
// Assert -- Timer will trigger detach
|
||||||
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||||
@ -415,7 +415,7 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing.
|
|||||||
nodeName1)
|
nodeName1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert -- Timer will triger detach
|
// Assert -- Timer will trigger detach
|
||||||
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||||
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
|
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
|
||||||
@ -433,7 +433,7 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing.
|
|||||||
nodeName2)
|
nodeName2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert -- Timer will triger detach
|
// Assert -- Timer will trigger detach
|
||||||
waitForNewDetacherCallCount(t, 2 /* expectedCallCount */, fakePlugin)
|
waitForNewDetacherCallCount(t, 2 /* expectedCallCount */, fakePlugin)
|
||||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||||
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
|
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
|
||||||
@ -692,7 +692,7 @@ func Test_ReportMultiAttachError(t *testing.T) {
|
|||||||
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by pod(s) pod2"},
|
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by pod(s) pod2"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pods in anotother namespace use the volume",
|
"pods in another namespace use the volume",
|
||||||
[]nodeWithPods{
|
[]nodeWithPods{
|
||||||
{"node1", []string{"ns1/pod1"}},
|
{"node1", []string{"ns1/pod1"}},
|
||||||
{"node2", []string{"ns2/pod2"}},
|
{"node2", []string{"ns2/pod2"}},
|
||||||
@ -700,7 +700,7 @@ func Test_ReportMultiAttachError(t *testing.T) {
|
|||||||
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by 1 pod(s) in different namespaces"},
|
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by 1 pod(s) in different namespaces"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pods both in the same and anotother namespace use the volume",
|
"pods both in the same and another namespace use the volume",
|
||||||
[]nodeWithPods{
|
[]nodeWithPods{
|
||||||
{"node1", []string{"ns1/pod1"}},
|
{"node1", []string{"ns1/pod1"}},
|
||||||
{"node2", []string{"ns2/pod2"}},
|
{"node2", []string{"ns2/pod2"}},
|
||||||
|
@ -769,7 +769,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
if err == pvtesting.ErrVersionConflict {
|
if err == pvtesting.ErrVersionConflict {
|
||||||
// Ignore version errors
|
// Ignore version errors
|
||||||
klog.V(4).Infof("test intentionaly ignores version error.")
|
klog.V(4).Infof("test intentionally ignores version error.")
|
||||||
} else {
|
} else {
|
||||||
t.Errorf("Error calling syncClaim: %v", err)
|
t.Errorf("Error calling syncClaim: %v", err)
|
||||||
// Finish the loop on the first error
|
// Finish the loop on the first error
|
||||||
@ -786,7 +786,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
if err == pvtesting.ErrVersionConflict {
|
if err == pvtesting.ErrVersionConflict {
|
||||||
// Ignore version errors
|
// Ignore version errors
|
||||||
klog.V(4).Infof("test intentionaly ignores version error.")
|
klog.V(4).Infof("test intentionally ignores version error.")
|
||||||
} else {
|
} else {
|
||||||
t.Errorf("Error calling syncVolume: %v", err)
|
t.Errorf("Error calling syncVolume: %v", err)
|
||||||
// Finish the loop on the first error
|
// Finish the loop on the first error
|
||||||
|
@ -107,7 +107,7 @@ func TestPVProtectionController(t *testing.T) {
|
|||||||
// Optional client reactors.
|
// Optional client reactors.
|
||||||
reactors []reaction
|
reactors []reaction
|
||||||
// PV event to simulate. This PV will be automatically added to
|
// PV event to simulate. This PV will be automatically added to
|
||||||
// initalObjects.
|
// initialObjects.
|
||||||
updatedPV *v1.PersistentVolume
|
updatedPV *v1.PersistentVolume
|
||||||
// List of expected kubeclient actions that should happen during the
|
// List of expected kubeclient actions that should happen during the
|
||||||
// test.
|
// test.
|
||||||
@ -220,7 +220,7 @@ func TestPVProtectionController(t *testing.T) {
|
|||||||
case *v1.PersistentVolume:
|
case *v1.PersistentVolume:
|
||||||
pvInformer.Informer().GetStore().Add(obj)
|
pvInformer.Informer().GetStore().Add(obj)
|
||||||
default:
|
default:
|
||||||
t.Fatalf("Unknown initalObject type: %+v", obj)
|
t.Fatalf("Unknown initialObject type: %+v", obj)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ func (c *assumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) {
|
|||||||
return c.indexFunc(objInfo.latestObj)
|
return c.indexFunc(objInfo.latestObj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAssumeCache creates an assume cache for genernal objects.
|
// NewAssumeCache creates an assume cache for general objects.
|
||||||
func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) AssumeCache {
|
func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) AssumeCache {
|
||||||
c := &assumeCache{
|
c := &assumeCache{
|
||||||
description: description,
|
description: description,
|
||||||
|
@ -163,7 +163,7 @@ func TestRestorePV(t *testing.T) {
|
|||||||
// Restore PV
|
// Restore PV
|
||||||
cache.Restore(oldPV.Name)
|
cache.Restore(oldPV.Name)
|
||||||
if err := verifyPV(cache, oldPV.Name, oldPV); err != nil {
|
if err := verifyPV(cache, oldPV.Name, oldPV); err != nil {
|
||||||
t.Fatalf("Failed to GetPV() after iniital restore: %v", err)
|
t.Fatalf("Failed to GetPV() after initial restore: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assume newPV
|
// Assume newPV
|
||||||
@ -420,7 +420,7 @@ func TestRestorePVC(t *testing.T) {
|
|||||||
// Restore PVC
|
// Restore PVC
|
||||||
cache.Restore(getPVCName(oldPVC))
|
cache.Restore(getPVCName(oldPVC))
|
||||||
if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil {
|
if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil {
|
||||||
t.Fatalf("Failed to GetPVC() after iniital restore: %v", err)
|
t.Fatalf("Failed to GetPVC() after initial restore: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assume newPVC
|
// Assume newPVC
|
||||||
|
@ -912,10 +912,10 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
|||||||
t.Error("returned success but expected error")
|
t.Error("returned success but expected error")
|
||||||
}
|
}
|
||||||
if boundSatisfied != scenario.expectedBound {
|
if boundSatisfied != scenario.expectedBound {
|
||||||
t.Errorf("expected boundSatsified %v, got %v", scenario.expectedBound, boundSatisfied)
|
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
|
||||||
}
|
}
|
||||||
if unboundSatisfied != scenario.expectedUnbound {
|
if unboundSatisfied != scenario.expectedUnbound {
|
||||||
t.Errorf("expected unboundSatsified %v, got %v", scenario.expectedUnbound, unboundSatisfied)
|
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
|
||||||
}
|
}
|
||||||
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, nil)
|
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, nil)
|
||||||
}
|
}
|
||||||
@ -1037,10 +1037,10 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
|||||||
t.Error("returned success but expected error")
|
t.Error("returned success but expected error")
|
||||||
}
|
}
|
||||||
if boundSatisfied != scenario.expectedBound {
|
if boundSatisfied != scenario.expectedBound {
|
||||||
t.Errorf("expected boundSatsified %v, got %v", scenario.expectedBound, boundSatisfied)
|
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
|
||||||
}
|
}
|
||||||
if unboundSatisfied != scenario.expectedUnbound {
|
if unboundSatisfied != scenario.expectedUnbound {
|
||||||
t.Errorf("expected unboundSatsified %v, got %v", scenario.expectedUnbound, unboundSatisfied)
|
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
|
||||||
}
|
}
|
||||||
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions)
|
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions)
|
||||||
}
|
}
|
||||||
@ -1150,10 +1150,10 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
|
|||||||
t.Error("returned success but expected error")
|
t.Error("returned success but expected error")
|
||||||
}
|
}
|
||||||
if boundSatisfied != scenario.expectedBound {
|
if boundSatisfied != scenario.expectedBound {
|
||||||
t.Errorf("expected boundSatsified %v, got %v", scenario.expectedBound, boundSatisfied)
|
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
|
||||||
}
|
}
|
||||||
if unboundSatisfied != scenario.expectedUnbound {
|
if unboundSatisfied != scenario.expectedUnbound {
|
||||||
t.Errorf("expected unboundSatsified %v, got %v", scenario.expectedUnbound, unboundSatisfied)
|
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user