mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
Merge pull request #63744 from krmayankk/changelog
Automatic merge from submit-queue (batch tested with PRs 63580, 63744, 64541, 64502, 64100). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. remove redundant getKey functions from controller tests ```release-note None ```
This commit is contained in:
commit
65819a8f92
@ -84,15 +84,6 @@ var (
|
|||||||
}}
|
}}
|
||||||
)
|
)
|
||||||
|
|
||||||
func getKey(ds *apps.DaemonSet, t *testing.T) string {
|
|
||||||
key, err := controller.KeyFunc(ds)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Unexpected error getting key for ds %v: %v", ds.Name, err)
|
|
||||||
}
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDaemonSet(name string) *apps.DaemonSet {
|
func newDaemonSet(name string) *apps.DaemonSet {
|
||||||
two := int32(2)
|
two := int32(2)
|
||||||
return &apps.DaemonSet{
|
return &apps.DaemonSet{
|
||||||
|
@ -71,6 +71,7 @@ go_test(
|
|||||||
"//pkg/apis/storage/install:go_default_library",
|
"//pkg/apis/storage/install:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/deployment/util:go_default_library",
|
"//pkg/controller/deployment/util:go_default_library",
|
||||||
|
"//pkg/controller/testutil:go_default_library",
|
||||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||||
|
@ -45,6 +45,7 @@ import (
|
|||||||
_ "k8s.io/kubernetes/pkg/apis/storage/install"
|
_ "k8s.io/kubernetes/pkg/apis/storage/install"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
|
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -77,7 +78,7 @@ func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map
|
|||||||
|
|
||||||
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment {
|
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment {
|
||||||
d := apps.Deployment{
|
d := apps.Deployment{
|
||||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"},
|
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -120,6 +121,7 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
|||||||
|
|
||||||
func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet {
|
func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet {
|
||||||
return &apps.ReplicaSet{
|
return &apps.ReplicaSet{
|
||||||
|
TypeMeta: metav1.TypeMeta{Kind: "ReplicaSet"},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
@ -135,15 +137,6 @@ func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaS
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getKey(d *apps.Deployment, t *testing.T) string {
|
|
||||||
if key, err := controller.KeyFunc(d); err != nil {
|
|
||||||
t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err)
|
|
||||||
return ""
|
|
||||||
} else {
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fixture struct {
|
type fixture struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
|
|
||||||
@ -285,7 +278,7 @@ func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
|
|||||||
f.expectUpdateDeploymentStatusAction(d)
|
f.expectUpdateDeploymentStatusAction(d)
|
||||||
f.expectUpdateDeploymentStatusAction(d)
|
f.expectUpdateDeploymentStatusAction(d)
|
||||||
|
|
||||||
f.run(getKey(d, t))
|
f.run(testutil.GetKey(d, t))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
||||||
@ -298,7 +291,7 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
|||||||
f.objects = append(f.objects, d)
|
f.objects = append(f.objects, d)
|
||||||
|
|
||||||
f.expectUpdateDeploymentStatusAction(d)
|
f.expectUpdateDeploymentStatusAction(d)
|
||||||
f.run(getKey(d, t))
|
f.run(testutil.GetKey(d, t))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncDeploymentDeletionRace(t *testing.T) {
|
func TestSyncDeploymentDeletionRace(t *testing.T) {
|
||||||
@ -323,7 +316,7 @@ func TestSyncDeploymentDeletionRace(t *testing.T) {
|
|||||||
f.expectGetDeploymentAction(d)
|
f.expectGetDeploymentAction(d)
|
||||||
// Sync should fail and requeue to let cache catch up.
|
// Sync should fail and requeue to let cache catch up.
|
||||||
// Don't start informers, since we don't want cache to catch up for this test.
|
// Don't start informers, since we don't want cache to catch up for this test.
|
||||||
f.runExpectError(getKey(d, t), false)
|
f.runExpectError(testutil.GetKey(d, t), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// issue: https://github.com/kubernetes/kubernetes/issues/23218
|
// issue: https://github.com/kubernetes/kubernetes/issues/23218
|
||||||
@ -337,7 +330,7 @@ func TestDontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
|
|||||||
|
|
||||||
// Normally there should be a status update to sync observedGeneration but the fake
|
// Normally there should be a status update to sync observedGeneration but the fake
|
||||||
// deployment has no generation set so there is no action happpening here.
|
// deployment has no generation set so there is no action happpening here.
|
||||||
f.run(getKey(d, t))
|
f.run(testutil.GetKey(d, t))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReentrantRollback(t *testing.T) {
|
func TestReentrantRollback(t *testing.T) {
|
||||||
@ -364,7 +357,7 @@ func TestReentrantRollback(t *testing.T) {
|
|||||||
// Rollback is done here
|
// Rollback is done here
|
||||||
f.expectUpdateDeploymentAction(d)
|
f.expectUpdateDeploymentAction(d)
|
||||||
// Expect no update on replica sets though
|
// Expect no update on replica sets though
|
||||||
f.run(getKey(d, t))
|
f.run(testutil.GetKey(d, t))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPodDeletionEnqueuesRecreateDeployment ensures that the deletion of a pod
|
// TestPodDeletionEnqueuesRecreateDeployment ensures that the deletion of a pod
|
||||||
|
@ -49,6 +49,7 @@ go_test(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/apis/core/install:go_default_library",
|
"//pkg/apis/core/install:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
|
"//pkg/controller/testutil:go_default_library",
|
||||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||||
|
@ -40,12 +40,14 @@ import (
|
|||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
|
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
var alwaysReady = func() bool { return true }
|
var alwaysReady = func() bool { return true }
|
||||||
|
|
||||||
func newJob(parallelism, completions, backoffLimit int32) *batch.Job {
|
func newJob(parallelism, completions, backoffLimit int32) *batch.Job {
|
||||||
j := &batch.Job{
|
j := &batch.Job{
|
||||||
|
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foobar",
|
Name: "foobar",
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
@ -86,15 +88,6 @@ func newJob(parallelism, completions, backoffLimit int32) *batch.Job {
|
|||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
func getKey(job *batch.Job, t *testing.T) string {
|
|
||||||
if key, err := controller.KeyFunc(job); err != nil {
|
|
||||||
t.Errorf("Unexpected error getting key for job %v: %v", job.Name, err)
|
|
||||||
return ""
|
|
||||||
} else {
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) {
|
func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) {
|
||||||
sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod())
|
sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod())
|
||||||
jm := NewJobController(sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient)
|
jm := NewJobController(sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient)
|
||||||
@ -301,7 +294,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
setPodsStatuses(podIndexer, job, tc.pendingPods, tc.activePods, tc.succeededPods, tc.failedPods)
|
setPodsStatuses(podIndexer, job, tc.pendingPods, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||||
|
|
||||||
// run
|
// run
|
||||||
forget, err := manager.syncJob(getKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
|
|
||||||
// We need requeue syncJob task if podController error
|
// We need requeue syncJob task if podController error
|
||||||
if tc.podControllerError != nil {
|
if tc.podControllerError != nil {
|
||||||
@ -388,7 +381,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
failedPods int32
|
failedPods int32
|
||||||
|
|
||||||
// expectations
|
// expectations
|
||||||
expectedForgetKey bool
|
expectedForGetKey bool
|
||||||
expectedDeletions int32
|
expectedDeletions int32
|
||||||
expectedActive int32
|
expectedActive int32
|
||||||
expectedSucceeded int32
|
expectedSucceeded int32
|
||||||
@ -441,12 +434,12 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
setPodsStatuses(podIndexer, job, 0, tc.activePods, tc.succeededPods, tc.failedPods)
|
setPodsStatuses(podIndexer, job, 0, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||||
|
|
||||||
// run
|
// run
|
||||||
forget, err := manager.syncJob(getKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("%s: unexpected error when syncing jobs %v", name, err)
|
t.Errorf("%s: unexpected error when syncing jobs %v", name, err)
|
||||||
}
|
}
|
||||||
if forget != tc.expectedForgetKey {
|
if forget != tc.expectedForGetKey {
|
||||||
t.Errorf("%s: unexpected forget value. Expected %v, saw %v\n", name, tc.expectedForgetKey, forget)
|
t.Errorf("%s: unexpected forget value. Expected %v, saw %v\n", name, tc.expectedForGetKey, forget)
|
||||||
}
|
}
|
||||||
// validate created/deleted pods
|
// validate created/deleted pods
|
||||||
if int32(len(fakePodControl.Templates)) != 0 {
|
if int32(len(fakePodControl.Templates)) != 0 {
|
||||||
@ -504,7 +497,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) {
|
|||||||
job.Status.StartTime = &start
|
job.Status.StartTime = &start
|
||||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
|
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
forget, err := manager.syncJob(getKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error when syncing jobs %v", err)
|
t.Errorf("Unexpected error when syncing jobs %v", err)
|
||||||
}
|
}
|
||||||
@ -533,7 +526,7 @@ func TestSyncJobComplete(t *testing.T) {
|
|||||||
job := newJob(1, 1, 6)
|
job := newJob(1, 1, 6)
|
||||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", ""))
|
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", ""))
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
forget, err := manager.syncJob(getKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error when syncing jobs %v", err)
|
t.Fatalf("Unexpected error when syncing jobs %v", err)
|
||||||
}
|
}
|
||||||
@ -559,7 +552,7 @@ func TestSyncJobDeleted(t *testing.T) {
|
|||||||
manager.jobStoreSynced = alwaysReady
|
manager.jobStoreSynced = alwaysReady
|
||||||
manager.updateHandler = func(job *batch.Job) error { return nil }
|
manager.updateHandler = func(job *batch.Job) error { return nil }
|
||||||
job := newJob(2, 2, 6)
|
job := newJob(2, 2, 6)
|
||||||
forget, err := manager.syncJob(getKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error when syncing jobs %v", err)
|
t.Errorf("Unexpected error when syncing jobs %v", err)
|
||||||
}
|
}
|
||||||
@ -584,12 +577,12 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
|||||||
manager.jobStoreSynced = alwaysReady
|
manager.jobStoreSynced = alwaysReady
|
||||||
updateError := fmt.Errorf("Update error")
|
updateError := fmt.Errorf("Update error")
|
||||||
manager.updateHandler = func(job *batch.Job) error {
|
manager.updateHandler = func(job *batch.Job) error {
|
||||||
manager.queue.AddRateLimited(getKey(job, t))
|
manager.queue.AddRateLimited(testutil.GetKey(job, t))
|
||||||
return updateError
|
return updateError
|
||||||
}
|
}
|
||||||
job := newJob(2, 2, 6)
|
job := newJob(2, 2, 6)
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
forget, err := manager.syncJob(getKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
if err == nil || err != updateError {
|
if err == nil || err != updateError {
|
||||||
t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
|
t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
|
||||||
}
|
}
|
||||||
@ -598,7 +591,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
t.Log("Waiting for a job in the queue")
|
t.Log("Waiting for a job in the queue")
|
||||||
key, _ := manager.queue.Get()
|
key, _ := manager.queue.Get()
|
||||||
expectedKey := getKey(job, t)
|
expectedKey := testutil.GetKey(job, t)
|
||||||
if key != expectedKey {
|
if key != expectedKey {
|
||||||
t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key)
|
t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key)
|
||||||
}
|
}
|
||||||
@ -1160,7 +1153,7 @@ func TestSyncJobExpectations(t *testing.T) {
|
|||||||
podIndexer.Add(&pods[1])
|
podIndexer.Add(&pods[1])
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
manager.syncJob(getKey(job, t))
|
manager.syncJob(testutil.GetKey(job, t))
|
||||||
if len(fakePodControl.Templates) != 0 {
|
if len(fakePodControl.Templates) != 0 {
|
||||||
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
|
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
|
||||||
}
|
}
|
||||||
@ -1314,7 +1307,7 @@ func TestJobBackoffReset(t *testing.T) {
|
|||||||
|
|
||||||
// job & pods setup
|
// job & pods setup
|
||||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
||||||
key := getKey(job, t)
|
key := testutil.GetKey(job, t)
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||||
|
|
||||||
@ -1472,7 +1465,7 @@ func TestJobBackoffForOnFailure(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// run
|
// run
|
||||||
forget, err := manager.syncJob(getKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error syncing job. Got %#v", err)
|
t.Errorf("unexpected error syncing job. Got %#v", err)
|
||||||
|
@ -51,6 +51,7 @@ go_test(
|
|||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
|
"//pkg/controller/testutil:go_default_library",
|
||||||
"//pkg/securitycontext:go_default_library",
|
"//pkg/securitycontext:go_default_library",
|
||||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
|
@ -47,6 +47,7 @@ import (
|
|||||||
utiltesting "k8s.io/client-go/util/testing"
|
utiltesting "k8s.io/client-go/util/testing"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
|
. "k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
"k8s.io/kubernetes/pkg/securitycontext"
|
"k8s.io/kubernetes/pkg/securitycontext"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -78,18 +79,9 @@ func skipListerFunc(verb string, url url.URL) bool {
|
|||||||
|
|
||||||
var alwaysReady = func() bool { return true }
|
var alwaysReady = func() bool { return true }
|
||||||
|
|
||||||
func getKey(rs *apps.ReplicaSet, t *testing.T) string {
|
|
||||||
if key, err := controller.KeyFunc(rs); err != nil {
|
|
||||||
t.Errorf("Unexpected error getting key for ReplicaSet %v: %v", rs.Name, err)
|
|
||||||
return ""
|
|
||||||
} else {
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet {
|
func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet {
|
||||||
rs := &apps.ReplicaSet{
|
rs := &apps.ReplicaSet{
|
||||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ReplicaSet"},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
Name: "foobar",
|
Name: "foobar",
|
||||||
@ -216,7 +208,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
|||||||
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||||
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,7 +236,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
|||||||
|
|
||||||
go manager.worker()
|
go manager.worker()
|
||||||
|
|
||||||
expected := getKey(rsSpec, t)
|
expected := GetKey(rsSpec, t)
|
||||||
select {
|
select {
|
||||||
case key := <-received:
|
case key := <-received:
|
||||||
if key != expected {
|
if key != expected {
|
||||||
@ -271,7 +263,7 @@ func TestSyncReplicaSetCreateFailures(t *testing.T) {
|
|||||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
manager.syncReplicaSet(getKey(rs, t))
|
manager.syncReplicaSet(GetKey(rs, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, fakePodControl.CreateLimit, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, fakePodControl.CreateLimit, 0, 0)
|
||||||
expectedLimit := 0
|
expectedLimit := 0
|
||||||
for pass := uint8(0); expectedLimit <= fakePodControl.CreateLimit; pass++ {
|
for pass := uint8(0); expectedLimit <= fakePodControl.CreateLimit; pass++ {
|
||||||
@ -310,7 +302,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||||||
rsSpec.Status.Replicas = 1
|
rsSpec.Status.Replicas = 1
|
||||||
rsSpec.Status.ReadyReplicas = 1
|
rsSpec.Status.ReadyReplicas = 1
|
||||||
rsSpec.Status.AvailableReplicas = 1
|
rsSpec.Status.AvailableReplicas = 1
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
||||||
|
|
||||||
// Expectations prevents replicas but not an update on status
|
// Expectations prevents replicas but not an update on status
|
||||||
@ -318,7 +310,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||||||
rsSpec.Status.ReadyReplicas = 0
|
rsSpec.Status.ReadyReplicas = 0
|
||||||
rsSpec.Status.AvailableReplicas = 0
|
rsSpec.Status.AvailableReplicas = 0
|
||||||
fakePodControl.Clear()
|
fakePodControl.Clear()
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||||
|
|
||||||
// Get the key for the controller
|
// Get the key for the controller
|
||||||
@ -336,13 +328,13 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||||||
fakePodControl.Clear()
|
fakePodControl.Clear()
|
||||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
fakePodControl.Err = fmt.Errorf("Fake Error")
|
||||||
|
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
||||||
|
|
||||||
// This replica should not need a Lowering of expectations, since the previous create failed
|
// This replica should not need a Lowering of expectations, since the previous create failed
|
||||||
fakePodControl.Clear()
|
fakePodControl.Clear()
|
||||||
fakePodControl.Err = nil
|
fakePodControl.Err = nil
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
||||||
|
|
||||||
// 2 PUT for the ReplicaSet status during dormancy window.
|
// 2 PUT for the ReplicaSet status during dormancy window.
|
||||||
@ -746,7 +738,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||||
|
|
||||||
for i := 0; i < numReplicas; i += burstReplicas {
|
for i := 0; i < numReplicas; i += burstReplicas {
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||||
|
|
||||||
// The store accrues active pods. It's also used by the ReplicaSet to determine how many
|
// The store accrues active pods. It's also used by the ReplicaSet to determine how many
|
||||||
// replicas to create.
|
// replicas to create.
|
||||||
@ -785,7 +777,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||||||
|
|
||||||
// To accurately simulate a watch we must delete the exact pods
|
// To accurately simulate a watch we must delete the exact pods
|
||||||
// the rs is waiting for.
|
// the rs is waiting for.
|
||||||
expectedDels := manager.expectations.GetUIDs(getKey(rsSpec, t))
|
expectedDels := manager.expectations.GetUIDs(GetKey(rsSpec, t))
|
||||||
podsToDelete := []*v1.Pod{}
|
podsToDelete := []*v1.Pod{}
|
||||||
isController := true
|
isController := true
|
||||||
for _, key := range expectedDels.List() {
|
for _, key := range expectedDels.List() {
|
||||||
@ -819,7 +811,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||||||
|
|
||||||
// Check that the ReplicaSet didn't take any action for all the above pods
|
// Check that the ReplicaSet didn't take any action for all the above pods
|
||||||
fakePodControl.Clear()
|
fakePodControl.Clear()
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||||
|
|
||||||
// Create/Delete the last pod
|
// Create/Delete the last pod
|
||||||
@ -829,7 +821,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[expectedPods-1])
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[expectedPods-1])
|
||||||
manager.addPod(&pods.Items[expectedPods-1])
|
manager.addPod(&pods.Items[expectedPods-1])
|
||||||
} else {
|
} else {
|
||||||
expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t))
|
expectedDel := manager.expectations.GetUIDs(GetKey(rsSpec, t))
|
||||||
if expectedDel.Len() != 1 {
|
if expectedDel.Len() != 1 {
|
||||||
t.Fatalf("Waiting on unexpected number of deletes.")
|
t.Fatalf("Waiting on unexpected number of deletes.")
|
||||||
}
|
}
|
||||||
@ -903,7 +895,7 @@ func TestRSSyncExpectations(t *testing.T) {
|
|||||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(GetKey(rsSpec, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -920,7 +912,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
// This should set expectations for the ReplicaSet
|
// This should set expectations for the ReplicaSet
|
||||||
manager.syncReplicaSet(getKey(rs, t))
|
manager.syncReplicaSet(GetKey(rs, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
|
||||||
fakePodControl.Clear()
|
fakePodControl.Clear()
|
||||||
|
|
||||||
@ -937,7 +929,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||||||
t.Errorf("No expectations found for ReplicaSet")
|
t.Errorf("No expectations found for ReplicaSet")
|
||||||
}
|
}
|
||||||
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs)
|
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs)
|
||||||
manager.syncReplicaSet(getKey(rs, t))
|
manager.syncReplicaSet(GetKey(rs, t))
|
||||||
|
|
||||||
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
|
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
|
||||||
t.Errorf("Found expectaions, expected none since the ReplicaSet has been deleted.")
|
t.Errorf("Found expectaions, expected none since the ReplicaSet has been deleted.")
|
||||||
@ -946,7 +938,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||||||
// This should have no effect, since we've deleted the ReplicaSet.
|
// This should have no effect, since we've deleted the ReplicaSet.
|
||||||
podExp.Add(-1, 0)
|
podExp.Add(-1, 0)
|
||||||
informers.Core().V1().Pods().Informer().GetIndexer().Replace(make([]interface{}, 0), "0")
|
informers.Core().V1().Pods().Informer().GetIndexer().Replace(make([]interface{}, 0), "0")
|
||||||
manager.syncReplicaSet(getKey(rs, t))
|
manager.syncReplicaSet(GetKey(rs, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -995,7 +987,7 @@ func TestOverlappingRSs(t *testing.T) {
|
|||||||
pod.OwnerReferences = []metav1.OwnerReference{
|
pod.OwnerReferences = []metav1.OwnerReference{
|
||||||
{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name, Controller: &isController},
|
{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name, Controller: &isController},
|
||||||
}
|
}
|
||||||
rsKey := getKey(rs, t)
|
rsKey := GetKey(rs, t)
|
||||||
|
|
||||||
manager.addPod(pod)
|
manager.addPod(pod)
|
||||||
queueRS, _ := manager.queue.Get()
|
queueRS, _ := manager.queue.Get()
|
||||||
@ -1123,7 +1115,7 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
|||||||
pod := newPod("pod", rs, v1.PodRunning, nil, true)
|
pod := newPod("pod", rs, v1.PodRunning, nil, true)
|
||||||
pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
|
pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
|
||||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
|
||||||
err := manager.syncReplicaSet(getKey(rs, t))
|
err := manager.syncReplicaSet(GetKey(rs, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1145,7 +1137,7 @@ func TestPatchPodFails(t *testing.T) {
|
|||||||
// let both patches fail. The rs controller will assume it fails to take
|
// let both patches fail. The rs controller will assume it fails to take
|
||||||
// control of the pods and requeue to try again.
|
// control of the pods and requeue to try again.
|
||||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
fakePodControl.Err = fmt.Errorf("Fake Error")
|
||||||
rsKey := getKey(rs, t)
|
rsKey := GetKey(rs, t)
|
||||||
err := processSync(manager, rsKey)
|
err := processSync(manager, rsKey)
|
||||||
if err == nil || !strings.Contains(err.Error(), "Fake Error") {
|
if err == nil || !strings.Contains(err.Error(), "Fake Error") {
|
||||||
t.Errorf("expected Fake Error, got %+v", err)
|
t.Errorf("expected Fake Error, got %+v", err)
|
||||||
@ -1174,7 +1166,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
|||||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
|
||||||
|
|
||||||
// no patch, no create
|
// no patch, no create
|
||||||
err := manager.syncReplicaSet(getKey(rs, t))
|
err := manager.syncReplicaSet(GetKey(rs, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1200,7 +1192,7 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
|
|||||||
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
|
||||||
|
|
||||||
// sync should abort.
|
// sync should abort.
|
||||||
err := manager.syncReplicaSet(getKey(rs, t))
|
err := manager.syncReplicaSet(GetKey(rs, t))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("syncReplicaSet() err = nil, expected non-nil")
|
t.Error("syncReplicaSet() err = nil, expected non-nil")
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,7 @@ go_library(
|
|||||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,9 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@ -38,6 +40,7 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||||
@ -46,6 +49,10 @@ import (
|
|||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||||
|
)
|
||||||
|
|
||||||
// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It
|
// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It
|
||||||
// allows test cases to have fine-grained control over mock behaviors. We also need
|
// allows test cases to have fine-grained control over mock behaviors. We also need
|
||||||
// PodsInterface and PodInterface to test list & delet pods, which is implemented in
|
// PodsInterface and PodInterface to test list & delet pods, which is implemented in
|
||||||
@ -485,3 +492,27 @@ func GetZones(nodeHandler *FakeNodeHandler) []string {
|
|||||||
func CreateZoneID(region, zone string) string {
|
func CreateZoneID(region, zone string) string {
|
||||||
return region + ":\x00:" + zone
|
return region + ":\x00:" + zone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetKey is a helper function used by controllers unit tests to get the
|
||||||
|
// key for a given kubernetes resource.
|
||||||
|
func GetKey(obj interface{}, t *testing.T) string {
|
||||||
|
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||||
|
if ok {
|
||||||
|
// if tombstone , try getting the value from tombstone.Obj
|
||||||
|
obj = tombstone.Obj
|
||||||
|
}
|
||||||
|
val := reflect.ValueOf(obj).Elem()
|
||||||
|
name := val.FieldByName("Name").String()
|
||||||
|
kind := val.FieldByName("Kind").String()
|
||||||
|
// Note kind is not always set in the tests, so ignoring that for now
|
||||||
|
if len(name) == 0 || len(kind) == 0 {
|
||||||
|
t.Errorf("Unexpected object %v", obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := keyFunc(obj)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error getting key for %v %v: %v", kind, name, err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user