Merge pull request #59883 from kow3ns/ds-cntrl-v1

Automatic merge from submit-queue (batch tested with PRs 59286, 59743, 59883, 60190, 60165). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

DaemonSet Controller and tests to apps/v1

**What this PR does / why we need it**:
Updates the DaemonSet controller, its integration tests, and its e2e tests to use the apps/v1 API.

**Release note**:
```release-note
The DaemonSet controller, its integration tests, and its e2e tests, have been updated to use the apps/v1 API.
```
This commit is contained in:
Kubernetes Submit Queue 2018-02-23 20:09:35 -08:00 committed by GitHub
commit e833d6880e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 345 additions and 456 deletions

View File

@ -21,10 +21,31 @@ limitations under the License.
package app package app
import ( import (
"fmt"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/controller/statefulset" "k8s.io/kubernetes/pkg/controller/statefulset"
) )
func startDaemonSetController(ctx ControllerContext) (bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}] {
return false, nil
}
dsc, err := daemon.NewDaemonSetsController(
ctx.InformerFactory.Apps().V1().DaemonSets(),
ctx.InformerFactory.Apps().V1().ControllerRevisions(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Core().V1().Nodes(),
ctx.ClientBuilder.ClientOrDie("daemon-set-controller"),
)
if err != nil {
return true, fmt.Errorf("error creating DaemonSets controller: %v", err)
}
go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop)
return true, nil
}
func startStatefulSetController(ctx ControllerContext) (bool, error) { func startStatefulSetController(ctx ControllerContext) (bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"}] { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"}] {
return false, nil return false, nil

View File

@ -24,29 +24,10 @@ import (
"fmt" "fmt"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/controller/deployment" "k8s.io/kubernetes/pkg/controller/deployment"
"k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/pkg/controller/replicaset"
) )
func startDaemonSetController(ctx ControllerContext) (bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}] {
return false, nil
}
dsc, err := daemon.NewDaemonSetsController(
ctx.InformerFactory.Extensions().V1beta1().DaemonSets(),
ctx.InformerFactory.Apps().V1beta1().ControllerRevisions(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Core().V1().Nodes(),
ctx.ClientBuilder.ClientOrDie("daemon-set-controller"),
)
if err != nil {
return true, fmt.Errorf("error creating DaemonSets controller: %v", err)
}
go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop)
return true, nil
}
func startDeploymentController(ctx ControllerContext) (bool, error) { func startDeploymentController(ctx ControllerContext) (bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] {
return false, nil return false, nil

View File

@ -61,7 +61,7 @@ go_library(
"//pkg/util/taints:go_default_library", "//pkg/util/taints:go_default_library",
"//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library",

View File

@ -21,7 +21,7 @@ import (
"sync" "sync"
"github.com/golang/glog" "github.com/golang/glog"
appsv1beta1 "k8s.io/api/apps/v1beta1" apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
@ -436,18 +436,18 @@ func NewControllerRevisionControllerRefManager(
// If the error is nil, either the reconciliation succeeded, or no // If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ControllerRevisions that you now own is // reconciliation was necessary. The list of ControllerRevisions that you now own is
// returned. // returned.
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*appsv1beta1.ControllerRevision) ([]*appsv1beta1.ControllerRevision, error) { func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
var claimed []*appsv1beta1.ControllerRevision var claimed []*apps.ControllerRevision
var errlist []error var errlist []error
match := func(obj metav1.Object) bool { match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels())) return m.Selector.Matches(labels.Set(obj.GetLabels()))
} }
adopt := func(obj metav1.Object) error { adopt := func(obj metav1.Object) error {
return m.AdoptControllerRevision(obj.(*appsv1beta1.ControllerRevision)) return m.AdoptControllerRevision(obj.(*apps.ControllerRevision))
} }
release := func(obj metav1.Object) error { release := func(obj metav1.Object) error {
return m.ReleaseControllerRevision(obj.(*appsv1beta1.ControllerRevision)) return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
} }
for _, h := range histories { for _, h := range histories {
@ -465,7 +465,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if // AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
// the patching fails. // the patching fails.
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *appsv1beta1.ControllerRevision) error { func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error {
if err := m.CanAdopt(); err != nil { if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err) return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
} }
@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller. // ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored. // It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *appsv1beta1.ControllerRevision) error { func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error {
glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID)

View File

@ -27,9 +27,8 @@ go_library(
"//pkg/util/labels:go_default_library", "//pkg/util/labels:go_default_library",
"//pkg/util/metrics:go_default_library", "//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
@ -42,16 +41,14 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library", "//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library",
@ -68,7 +65,6 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/api/legacyscheme:go_default_library", "//pkg/api/legacyscheme:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
@ -76,8 +72,8 @@ go_test(
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/securitycontext:go_default_library", "//pkg/securitycontext:go_default_library",
"//pkg/util/labels:go_default_library", "//pkg/util/labels:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -23,9 +23,8 @@ import (
"sync" "sync"
"time" "time"
apps "k8s.io/api/apps/v1beta1" apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -34,16 +33,14 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
appsinformers "k8s.io/client-go/informers/apps/v1beta1" appsinformers "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1" coreinformers "k8s.io/client-go/informers/core/v1"
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
unversionedapps "k8s.io/client-go/kubernetes/typed/apps/v1"
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" appslisters "k8s.io/client-go/listers/apps/v1"
appslisters "k8s.io/client-go/listers/apps/v1beta1"
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/integer" "k8s.io/client-go/util/integer"
@ -82,7 +79,7 @@ const (
) )
// controllerKind contains the schema.GroupVersionKind for this controller type. // controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = extensions.SchemeGroupVersion.WithKind("DaemonSet") var controllerKind = apps.SchemeGroupVersion.WithKind("DaemonSet")
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored // DaemonSetsController is responsible for synchronizing DaemonSet objects stored
// in the system with actual running pods. // in the system with actual running pods.
@ -99,12 +96,12 @@ type DaemonSetsController struct {
// To allow injection of syncDaemonSet for testing. // To allow injection of syncDaemonSet for testing.
syncHandler func(dsKey string) error syncHandler func(dsKey string) error
// used for unit testing // used for unit testing
enqueueDaemonSet func(ds *extensions.DaemonSet) enqueueDaemonSet func(ds *apps.DaemonSet)
enqueueDaemonSetRateLimited func(ds *extensions.DaemonSet) enqueueDaemonSetRateLimited func(ds *apps.DaemonSet)
// A TTLCache of pod creates/deletes each ds expects to see // A TTLCache of pod creates/deletes each ds expects to see
expectations controller.ControllerExpectationsInterface expectations controller.ControllerExpectationsInterface
// dsLister can list/get daemonsets from the shared informer's store // dsLister can list/get daemonsets from the shared informer's store
dsLister extensionslisters.DaemonSetLister dsLister appslisters.DaemonSetLister
// dsStoreSynced returns true if the daemonset store has been synced at least once. // dsStoreSynced returns true if the daemonset store has been synced at least once.
// Added as a member to the struct to allow injection for testing. // Added as a member to the struct to allow injection for testing.
dsStoreSynced cache.InformerSynced dsStoreSynced cache.InformerSynced
@ -134,7 +131,7 @@ type DaemonSetsController struct {
} }
// NewDaemonSetsController creates a new DaemonSetsController // NewDaemonSetsController creates a new DaemonSetsController
func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) { func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset. // TODO: remove the wrapper when every clients have moved to use the clientset.
@ -163,13 +160,13 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
ds := obj.(*extensions.DaemonSet) ds := obj.(*apps.DaemonSet)
glog.V(4).Infof("Adding daemon set %s", ds.Name) glog.V(4).Infof("Adding daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds) dsc.enqueueDaemonSet(ds)
}, },
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
oldDS := old.(*extensions.DaemonSet) oldDS := old.(*apps.DaemonSet)
curDS := cur.(*extensions.DaemonSet) curDS := cur.(*apps.DaemonSet)
glog.V(4).Infof("Updating daemon set %s", oldDS.Name) glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
dsc.enqueueDaemonSet(curDS) dsc.enqueueDaemonSet(curDS)
}, },
@ -211,14 +208,14 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo
} }
func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) { func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
ds, ok := obj.(*extensions.DaemonSet) ds, ok := obj.(*apps.DaemonSet)
if !ok { if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown) tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return return
} }
ds, ok = tombstone.Obj.(*extensions.DaemonSet) ds, ok = tombstone.Obj.(*apps.DaemonSet)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj)) utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj))
return return
@ -272,7 +269,7 @@ func (dsc *DaemonSetsController) processNextWorkItem() bool {
return true return true
} }
func (dsc *DaemonSetsController) enqueue(ds *extensions.DaemonSet) { func (dsc *DaemonSetsController) enqueue(ds *apps.DaemonSet) {
key, err := controller.KeyFunc(ds) key, err := controller.KeyFunc(ds)
if err != nil { if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err)) utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err))
@ -283,7 +280,7 @@ func (dsc *DaemonSetsController) enqueue(ds *extensions.DaemonSet) {
dsc.queue.Add(key) dsc.queue.Add(key)
} }
func (dsc *DaemonSetsController) enqueueRateLimited(ds *extensions.DaemonSet) { func (dsc *DaemonSetsController) enqueueRateLimited(ds *apps.DaemonSet) {
key, err := controller.KeyFunc(ds) key, err := controller.KeyFunc(ds)
if err != nil { if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err)) utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err))
@ -305,7 +302,7 @@ func (dsc *DaemonSetsController) enqueueDaemonSetAfter(obj interface{}, after ti
} }
// getDaemonSetsForPod returns a list of DaemonSets that potentially match the pod. // getDaemonSetsForPod returns a list of DaemonSets that potentially match the pod.
func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*extensions.DaemonSet { func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*apps.DaemonSet {
sets, err := dsc.dsLister.GetPodDaemonSets(pod) sets, err := dsc.dsLister.GetPodDaemonSets(pod)
if err != nil { if err != nil {
return nil return nil
@ -320,7 +317,7 @@ func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*extensions.
// getDaemonSetsForHistory returns a list of DaemonSets that potentially // getDaemonSetsForHistory returns a list of DaemonSets that potentially
// match a ControllerRevision. // match a ControllerRevision.
func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.ControllerRevision) []*extensions.DaemonSet { func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.ControllerRevision) []*apps.DaemonSet {
daemonSets, err := dsc.dsLister.GetHistoryDaemonSets(history) daemonSets, err := dsc.dsLister.GetHistoryDaemonSets(history)
if err != nil || len(daemonSets) == 0 { if err != nil || len(daemonSets) == 0 {
return nil return nil
@ -736,7 +733,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
// This also reconciles ControllerRef by adopting/orphaning. // This also reconciles ControllerRef by adopting/orphaning.
// Note that returned Pods are pointers to objects in the cache. // Note that returned Pods are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first. // If you want to modify one, you need to deep-copy it first.
func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.Pod, error) { func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, error) {
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil { if err != nil {
return nil, err return nil, err
@ -751,7 +748,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.
// If any adoptions are attempted, we should first recheck for deletion with // If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing Pods (see #42639). // an uncached quorum read sometime after listing Pods (see #42639).
dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{}) fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -770,7 +767,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.
// This also reconciles ControllerRef by adopting/orphaning. // This also reconciles ControllerRef by adopting/orphaning.
// Note that returned Pods are pointers to objects in the cache. // Note that returned Pods are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first. // If you want to modify one, you need to deep-copy it first.
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) { func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[string][]*v1.Pod, error) {
claimedPods, err := dsc.getDaemonPods(ds) claimedPods, err := dsc.getDaemonPods(ds)
if err != nil { if err != nil {
return nil, err return nil, err
@ -787,7 +784,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet)
// resolveControllerRef returns the controller referenced by a ControllerRef, // resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller // or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind. // of the correct Kind.
func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.DaemonSet { func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.DaemonSet {
// We can't look up by UID, so look up by Name and then verify UID. // We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind. // Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerKind.Kind { if controllerRef.Kind != controllerKind.Kind {
@ -809,7 +806,7 @@ func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controll
// After figuring out which nodes should run a Pod of ds but not yet running one and // After figuring out which nodes should run a Pod of ds but not yet running one and
// which nodes should not run a Pod of ds but currently running one, it calls function // which nodes should not run a Pod of ds but currently running one, it calls function
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds. // syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) error { func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
// Find out which nodes are running the daemon pods controlled by ds. // Find out which nodes are running the daemon pods controlled by ds.
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil { if err != nil {
@ -891,7 +888,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) e
// syncNodes deletes given pods and creates new daemon set pods on the given nodes // syncNodes deletes given pods and creates new daemon set pods on the given nodes
// returns slice with erros if any // returns slice with erros if any
func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error { func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
// We need to set expectations before creating/deleting pods to avoid race conditions. // We need to set expectations before creating/deleting pods to avoid race conditions.
dsKey, err := controller.KeyFunc(ds) dsKey, err := controller.KeyFunc(ds)
if err != nil { if err != nil {
@ -915,7 +912,13 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet
glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff) glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
createWait := sync.WaitGroup{} createWait := sync.WaitGroup{}
template := util.CreatePodTemplate(ds.Spec.Template, ds.Spec.TemplateGeneration, hash) // If the returned error is not nil we have a parse error.
// The controller handles this via the hash.
generation, err := util.GetTemplateGeneration(ds)
if err != nil {
generation = nil
}
template := util.CreatePodTemplate(ds.Spec.Template, generation, hash)
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
// and double with each successful iteration in a kind of "slow start". // and double with each successful iteration in a kind of "slow start".
// This handles attempts to start large numbers of pods that would // This handles attempts to start large numbers of pods that would
@ -989,7 +992,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet
return utilerrors.NewAggregate(errors) return utilerrors.NewAggregate(errors)
} }
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error { func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error {
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled && if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled && int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
int(ds.Status.NumberMisscheduled) == numberMisscheduled && int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
@ -1028,7 +1031,7 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
return updateErr return updateErr
} }
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet, hash string) error { func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string) error {
glog.V(4).Infof("Updating daemon set status") glog.V(4).Infof("Updating daemon set status")
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil { if err != nil {
@ -1063,7 +1066,13 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet,
numberAvailable++ numberAvailable++
} }
} }
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod, hash) { // If the returned error is not nil we have a parse error.
// The controller handles this via the hash.
generation, err := util.GetTemplateGeneration(ds)
if err != nil {
generation = nil
}
if util.IsPodUpdated(pod, hash, generation) {
updatedNumberScheduled++ updatedNumberScheduled++
} }
} }
@ -1075,7 +1084,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet,
} }
numberUnavailable := desiredNumberScheduled - numberAvailable numberUnavailable := desiredNumberScheduled - numberAvailable
err = storeDaemonSetStatus(dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable) err = storeDaemonSetStatus(dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
if err != nil { if err != nil {
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err) return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
} }
@ -1122,7 +1131,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to construct revisions of DaemonSet: %v", err) return fmt.Errorf("failed to construct revisions of DaemonSet: %v", err)
} }
hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
if ds.DeletionTimestamp != nil || !dsc.expectations.SatisfiedExpectations(dsKey) { if ds.DeletionTimestamp != nil || !dsc.expectations.SatisfiedExpectations(dsKey) {
// Only update status. // Only update status.
@ -1137,8 +1146,8 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
// Process rolling updates if we're ready. // Process rolling updates if we're ready.
if dsc.expectations.SatisfiedExpectations(dsKey) { if dsc.expectations.SatisfiedExpectations(dsKey) {
switch ds.Spec.UpdateStrategy.Type { switch ds.Spec.UpdateStrategy.Type {
case extensions.OnDeleteDaemonSetStrategyType: case apps.OnDeleteDaemonSetStrategyType:
case extensions.RollingUpdateDaemonSetStrategyType: case apps.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ds, hash) err = dsc.rollingUpdate(ds, hash)
} }
if err != nil { if err != nil {
@ -1154,7 +1163,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
return dsc.updateDaemonSetStatus(ds, hash) return dsc.updateDaemonSetStatus(ds, hash)
} }
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *extensions.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) { func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) {
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems. // DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint notReady:NoExecute here // Add infinite toleration for taint notReady:NoExecute here
// to survive taint-based eviction enforced by NodeController // to survive taint-based eviction enforced by NodeController
@ -1240,7 +1249,7 @@ func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *ext
// * shouldContinueRunning: // * shouldContinueRunning:
// Returns true when a daemonset should continue running on a node if a daemonset pod is already // Returns true when a daemonset should continue running on a node if a daemonset pod is already
// running on that node. // running on that node.
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) { func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) {
newPod := NewPod(ds, node.Name) newPod := NewPod(ds, node.Name)
// Because these bools require an && of all their required conditions, we start // Because these bools require an && of all their required conditions, we start
@ -1325,7 +1334,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
} }
// NewPod creates a new pod // NewPod creates a new pod
func NewPod(ds *extensions.DaemonSet, nodeName string) *v1.Pod { func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta} newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
newPod.Namespace = ds.Namespace newPod.Namespace = ds.Namespace
newPod.Spec.NodeName = nodeName newPod.Spec.NodeName = nodeName
@ -1363,7 +1372,7 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
} }
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker. // byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
type byCreationTimestamp []*extensions.DaemonSet type byCreationTimestamp []*apps.DaemonSet
func (o byCreationTimestamp) Len() int { return len(o) } func (o byCreationTimestamp) Len() int { return len(o) }
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }

View File

@ -24,8 +24,8 @@ import (
"sync" "sync"
"testing" "testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -40,7 +40,6 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@ -83,7 +82,7 @@ var (
}} }}
) )
func getKey(ds *extensions.DaemonSet, t *testing.T) string { func getKey(ds *apps.DaemonSet, t *testing.T) string {
key, err := controller.KeyFunc(ds) key, err := controller.KeyFunc(ds)
if err != nil { if err != nil {
@ -92,19 +91,18 @@ func getKey(ds *extensions.DaemonSet, t *testing.T) string {
return key return key
} }
func newDaemonSet(name string) *extensions.DaemonSet { func newDaemonSet(name string) *apps.DaemonSet {
two := int32(2) two := int32(2)
return &extensions.DaemonSet{ return &apps.DaemonSet{
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: name, Name: name,
Namespace: metav1.NamespaceDefault, Namespace: metav1.NamespaceDefault,
}, },
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
RevisionHistoryLimit: &two, RevisionHistoryLimit: &two,
UpdateStrategy: extensions.DaemonSetUpdateStrategy{ UpdateStrategy: apps.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType, Type: apps.OnDeleteDaemonSetStrategyType,
}, },
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
@ -127,22 +125,22 @@ func newDaemonSet(name string) *extensions.DaemonSet {
} }
} }
func newRollbackStrategy() *extensions.DaemonSetUpdateStrategy { func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {
one := intstr.FromInt(1) one := intstr.FromInt(1)
return &extensions.DaemonSetUpdateStrategy{ return &apps.DaemonSetUpdateStrategy{
Type: extensions.RollingUpdateDaemonSetStrategyType, Type: apps.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &extensions.RollingUpdateDaemonSet{MaxUnavailable: &one}, RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
} }
} }
func newOnDeleteStrategy() *extensions.DaemonSetUpdateStrategy { func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy {
return &extensions.DaemonSetUpdateStrategy{ return &apps.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType, Type: apps.OnDeleteDaemonSetStrategyType,
} }
} }
func updateStrategies() []*extensions.DaemonSetUpdateStrategy { func updateStrategies() []*apps.DaemonSetUpdateStrategy {
return []*extensions.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()} return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
} }
func newNode(name string, label map[string]string) *v1.Node { func newNode(name string, label map[string]string) *v1.Node {
@ -170,14 +168,14 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]
} }
} }
func newPod(podName string, nodeName string, label map[string]string, ds *extensions.DaemonSet) *v1.Pod { func newPod(podName string, nodeName string, label map[string]string, ds *apps.DaemonSet) *v1.Pod {
// Add hash unique label to the pod // Add hash unique label to the pod
newLabels := label newLabels := label
var podSpec v1.PodSpec var podSpec v1.PodSpec
// Copy pod spec from DaemonSet template, or use a default one if DaemonSet is nil // Copy pod spec from DaemonSet template, or use a default one if DaemonSet is nil
if ds != nil { if ds != nil {
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount)) hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
newLabels = labelsutil.CloneAndAddLabel(label, extensions.DefaultDaemonSetUniqueLabelKey, hash) newLabels = labelsutil.CloneAndAddLabel(label, apps.DefaultDaemonSetUniqueLabelKey, hash)
podSpec = ds.Spec.Template.Spec podSpec = ds.Spec.Template.Spec
} else { } else {
podSpec = v1.PodSpec{ podSpec = v1.PodSpec{
@ -212,14 +210,14 @@ func newPod(podName string, nodeName string, label map[string]string, ds *extens
return pod return pod
} }
func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) { func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) {
for i := 0; i < number; i++ { for i := 0; i < number; i++ {
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds) pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
podStore.Add(pod) podStore.Add(pod)
} }
} }
func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) { func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) {
for i := 0; i < number; i++ { for i := 0; i < number; i++ {
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds) pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
pod.Status = v1.PodStatus{Phase: v1.PodFailed} pod.Status = v1.PodStatus{Phase: v1.PodFailed}
@ -299,8 +297,8 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
dsc, err := NewDaemonSetsController( dsc, err := NewDaemonSetsController(
informerFactory.Extensions().V1beta1().DaemonSets(), informerFactory.Apps().V1().DaemonSets(),
informerFactory.Apps().V1beta1().ControllerRevisions(), informerFactory.Apps().V1().ControllerRevisions(),
informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(), informerFactory.Core().V1().Nodes(),
clientset, clientset,
@ -322,8 +320,8 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
return &daemonSetsController{ return &daemonSetsController{
dsc, dsc,
informerFactory.Extensions().V1beta1().DaemonSets().Informer().GetStore(), informerFactory.Apps().V1().DaemonSets().Informer().GetStore(),
informerFactory.Apps().V1beta1().ControllerRevisions().Informer().GetStore(), informerFactory.Apps().V1().ControllerRevisions().Informer().GetStore(),
informerFactory.Core().V1().Pods().Informer().GetStore(), informerFactory.Core().V1().Pods().Informer().GetStore(),
informerFactory.Core().V1().Nodes().Informer().GetStore(), informerFactory.Core().V1().Nodes().Informer().GetStore(),
fakeRecorder, fakeRecorder,
@ -346,7 +344,7 @@ func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePod
} }
// Make sure the ControllerRefs are correct. // Make sure the ControllerRefs are correct.
for _, controllerRef := range fakePodControl.ControllerRefs { for _, controllerRef := range fakePodControl.ControllerRefs {
if got, want := controllerRef.APIVersion, "extensions/v1beta1"; got != want { if got, want := controllerRef.APIVersion, "apps/v1"; got != want {
t.Errorf("controllerRef.APIVersion = %q, want %q", got, want) t.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
} }
if got, want := controllerRef.Kind, "DaemonSet"; got != want { if got, want := controllerRef.Kind, "DaemonSet"; got != want {
@ -358,7 +356,7 @@ func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePod
} }
} }
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) { func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) {
key, err := controller.KeyFunc(ds) key, err := controller.KeyFunc(ds)
if err != nil { if err != nil {
t.Errorf("Could not get key for daemon.") t.Errorf("Could not get key for daemon.")
@ -368,7 +366,7 @@ func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *
} }
// clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations. // clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations.
func clearExpectations(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, fakePodControl *fakePodControl) { func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, fakePodControl *fakePodControl) {
fakePodControl.Clear() fakePodControl.Clear()
key, err := controller.KeyFunc(ds) key, err := controller.KeyFunc(ds)
@ -459,13 +457,13 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
t.Fatalf("error creating DaemonSets controller: %v", err) t.Fatalf("error creating DaemonSets controller: %v", err)
} }
var updated *extensions.DaemonSet var updated *apps.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if action.GetSubresource() != "status" { if action.GetSubresource() != "status" {
return false, nil, nil return false, nil, nil
} }
if u, ok := action.(core.UpdateAction); ok { if u, ok := action.(core.UpdateAction); ok {
updated = u.GetObject().(*extensions.DaemonSet) updated = u.GetObject().(*apps.DaemonSet)
} }
return false, nil, nil return false, nil, nil
}) })
@ -585,9 +583,9 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
}) })
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
switch strategy.Type { switch strategy.Type {
case extensions.OnDeleteDaemonSetStrategyType: case apps.OnDeleteDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2)
case extensions.RollingUpdateDaemonSetStrategyType: case apps.RollingUpdateDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3)
default: default:
t.Fatalf("unexpected UpdateStrategy %+v", strategy) t.Fatalf("unexpected UpdateStrategy %+v", strategy)
@ -615,9 +613,9 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
}) })
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
switch strategy.Type { switch strategy.Type {
case extensions.OnDeleteDaemonSetStrategyType: case apps.OnDeleteDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2)
case extensions.RollingUpdateDaemonSetStrategyType: case apps.RollingUpdateDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3)
default: default:
t.Fatalf("unexpected UpdateStrategy %+v", strategy) t.Fatalf("unexpected UpdateStrategy %+v", strategy)
@ -1123,13 +1121,13 @@ func TestNumberReadyStatus(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err) t.Fatalf("error creating DaemonSets controller: %v", err)
} }
var updated *extensions.DaemonSet var updated *apps.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if action.GetSubresource() != "status" { if action.GetSubresource() != "status" {
return false, nil, nil return false, nil, nil
} }
if u, ok := action.(core.UpdateAction); ok { if u, ok := action.(core.UpdateAction); ok {
updated = u.GetObject().(*extensions.DaemonSet) updated = u.GetObject().(*apps.DaemonSet)
} }
return false, nil, nil return false, nil, nil
}) })
@ -1166,13 +1164,13 @@ func TestObservedGeneration(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err) t.Fatalf("error creating DaemonSets controller: %v", err)
} }
var updated *extensions.DaemonSet var updated *apps.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if action.GetSubresource() != "status" { if action.GetSubresource() != "status" {
return false, nil, nil return false, nil, nil
} }
if u, ok := action.(core.UpdateAction); ok { if u, ok := action.(core.UpdateAction); ok {
updated = u.GetObject().(*extensions.DaemonSet) updated = u.GetObject().(*apps.DaemonSet)
} }
return false, nil, nil return false, nil, nil
}) })
@ -1385,7 +1383,7 @@ func setNodeTaint(node *v1.Node, taints []v1.Taint) {
node.Spec.Taints = taints node.Spec.Taints = taints
} }
func setDaemonSetToleration(ds *extensions.DaemonSet, tolerations []v1.Toleration) { func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) {
ds.Spec.Template.Spec.Tolerations = tolerations ds.Spec.Template.Spec.Tolerations = tolerations
} }
@ -1482,9 +1480,9 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False") utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
switch strategy.Type { switch strategy.Type {
case extensions.OnDeleteDaemonSetStrategyType: case apps.OnDeleteDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2)
case extensions.RollingUpdateDaemonSetStrategyType: case apps.RollingUpdateDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3)
default: default:
t.Fatalf("unexpected UpdateStrategy %+v", strategy) t.Fatalf("unexpected UpdateStrategy %+v", strategy)
@ -1493,9 +1491,9 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
// Enabling critical pod annotation feature gate should create critical pod // Enabling critical pod annotation feature gate should create critical pod
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True") utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
switch strategy.Type { switch strategy.Type {
case extensions.OnDeleteDaemonSetStrategyType: case apps.OnDeleteDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2)
case extensions.RollingUpdateDaemonSetStrategyType: case apps.RollingUpdateDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 3) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 3)
default: default:
t.Fatalf("unexpected UpdateStrategy %+v", strategy) t.Fatalf("unexpected UpdateStrategy %+v", strategy)
@ -1534,7 +1532,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
} }
} }
func setDaemonSetCritical(ds *extensions.DaemonSet) { func setDaemonSetCritical(ds *apps.DaemonSet) {
ds.Namespace = api.NamespaceSystem ds.Namespace = api.NamespaceSystem
if ds.Spec.Template.ObjectMeta.Annotations == nil { if ds.Spec.Template.ObjectMeta.Annotations == nil {
ds.Spec.Template.ObjectMeta.Annotations = make(map[string]string) ds.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
@ -1547,14 +1545,14 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
predicateName string predicateName string
podsOnNode []*v1.Pod podsOnNode []*v1.Pod
nodeCondition []v1.NodeCondition nodeCondition []v1.NodeCondition
ds *extensions.DaemonSet ds *apps.DaemonSet
wantToRun, shouldSchedule, shouldContinueRunning bool wantToRun, shouldSchedule, shouldContinueRunning bool
err error err error
}{ }{
{ {
predicateName: "ShouldRunDaemonPod", predicateName: "ShouldRunDaemonPod",
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1570,8 +1568,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
}, },
{ {
predicateName: "InsufficientResourceError", predicateName: "InsufficientResourceError",
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1587,8 +1585,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
}, },
{ {
predicateName: "ErrPodNotMatchHostName", predicateName: "ErrPodNotMatchHostName",
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1615,8 +1613,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
}, },
}, },
}, },
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1650,8 +1648,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
}, },
}, },
}, },
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1679,8 +1677,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
}, },
}, },
}, },
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1696,8 +1694,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
}, },
{ {
predicateName: "ErrNodeSelectorNotMatch", predicateName: "ErrNodeSelectorNotMatch",
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1715,8 +1713,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
}, },
{ {
predicateName: "ShouldRunDaemonPod", predicateName: "ShouldRunDaemonPod",
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1734,8 +1732,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
}, },
{ {
predicateName: "ErrPodAffinityNotMatch", predicateName: "ErrPodAffinityNotMatch",
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1769,8 +1767,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
}, },
{ {
predicateName: "ShouldRunDaemonPod", predicateName: "ShouldRunDaemonPod",
ds: &extensions.DaemonSet{ ds: &apps.DaemonSet{
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1845,14 +1843,14 @@ func TestUpdateNode(t *testing.T) {
test string test string
newNode *v1.Node newNode *v1.Node
oldNode *v1.Node oldNode *v1.Node
ds *extensions.DaemonSet ds *apps.DaemonSet
shouldEnqueue bool shouldEnqueue bool
}{ }{
{ {
test: "Nothing changed, should not enqueue", test: "Nothing changed, should not enqueue",
oldNode: newNode("node1", nil), oldNode: newNode("node1", nil),
newNode: newNode("node1", nil), newNode: newNode("node1", nil),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("ds") ds := newDaemonSet("ds")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
return ds return ds
@ -1863,7 +1861,7 @@ func TestUpdateNode(t *testing.T) {
test: "Node labels changed", test: "Node labels changed",
oldNode: newNode("node1", nil), oldNode: newNode("node1", nil),
newNode: newNode("node1", simpleNodeLabel), newNode: newNode("node1", simpleNodeLabel),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("ds") ds := newDaemonSet("ds")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
return ds return ds
@ -1893,7 +1891,7 @@ func TestUpdateNode(t *testing.T) {
manager.dsStore.Add(c.ds) manager.dsStore.Add(c.ds)
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 0) syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 0)
manager.enqueueDaemonSet = func(ds *extensions.DaemonSet) { manager.enqueueDaemonSet = func(ds *apps.DaemonSet) {
if ds.Name == "ds" { if ds.Name == "ds" {
enqueued = true enqueued = true
} }
@ -1917,7 +1915,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
node *v1.Node node *v1.Node
existPods []*v1.Pod existPods []*v1.Pod
deletedPod *v1.Pod deletedPod *v1.Pod
ds *extensions.DaemonSet ds *apps.DaemonSet
shouldEnqueue bool shouldEnqueue bool
}{ }{
{ {
@ -1952,7 +1950,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
Spec: podSpec, Spec: podSpec,
} }
}(), }(),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("ds") ds := newDaemonSet("ds")
ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m") ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
return ds return ds
@ -1997,7 +1995,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
Spec: podSpec, Spec: podSpec,
} }
}(), }(),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("ds") ds := newDaemonSet("ds")
ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m") ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
return ds return ds
@ -2039,7 +2037,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
Spec: podSpec, Spec: podSpec,
} }
}(), }(),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("ds") ds := newDaemonSet("ds")
ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m") ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
return ds return ds
@ -2061,15 +2059,15 @@ func TestDeleteNoDaemonPod(t *testing.T) {
manager.podStore.Add(pod) manager.podStore.Add(pod)
} }
switch strategy.Type { switch strategy.Type {
case extensions.OnDeleteDaemonSetStrategyType: case apps.OnDeleteDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 2) syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 2)
case extensions.RollingUpdateDaemonSetStrategyType: case apps.RollingUpdateDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 3) syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 3)
default: default:
t.Fatalf("unexpected UpdateStrategy %+v", strategy) t.Fatalf("unexpected UpdateStrategy %+v", strategy)
} }
manager.enqueueDaemonSetRateLimited = func(ds *extensions.DaemonSet) { manager.enqueueDaemonSetRateLimited = func(ds *apps.DaemonSet) {
if ds.Name == "ds" { if ds.Name == "ds" {
enqueued = true enqueued = true
} }

View File

@ -23,9 +23,8 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
apps "k8s.io/api/apps/v1beta1" apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -41,7 +40,7 @@ import (
// rollingUpdate deletes old daemon set pods making sure that no more than // rollingUpdate deletes old daemon set pods making sure that no more than
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable // ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, hash string) error { func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string) error {
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil { if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err) return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
@ -82,7 +81,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, hash st
// constructHistory finds all histories controlled by the given DaemonSet, and // constructHistory finds all histories controlled by the given DaemonSet, and
// update current history revision number, or create current history if need to. // update current history revision number, or create current history if need to.
// It also deduplicates current history, and adds missing unique labels to existing histories. // It also deduplicates current history, and adds missing unique labels to existing histories.
func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) { func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
var histories []*apps.ControllerRevision var histories []*apps.ControllerRevision
var currentHistories []*apps.ControllerRevision var currentHistories []*apps.ControllerRevision
histories, err = dsc.controlledHistories(ds) histories, err = dsc.controlledHistories(ds)
@ -92,10 +91,10 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
for _, history := range histories { for _, history := range histories {
// Add the unique label if it's not already added to the history // Add the unique label if it's not already added to the history
// We use history name instead of computing hash, so that we don't need to worry about hash collision // We use history name instead of computing hash, so that we don't need to worry about hash collision
if _, ok := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; !ok { if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
toUpdate := history.DeepCopy() toUpdate := history.DeepCopy()
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate) history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -130,7 +129,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
if cur.Revision < currRevision { if cur.Revision < currRevision {
toUpdate := cur.DeepCopy() toUpdate := cur.DeepCopy()
toUpdate.Revision = currRevision toUpdate.Revision = currRevision
_, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate) _, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -139,7 +138,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
return cur, old, err return cur, old, err
} }
func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []*apps.ControllerRevision) error { func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds) nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil { if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err) return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
@ -155,7 +154,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []
liveHashes := make(map[string]bool) liveHashes := make(map[string]bool)
for _, pods := range nodesToDaemonPods { for _, pods := range nodesToDaemonPods {
for _, pod := range pods { for _, pod := range pods {
if hash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 { if hash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 {
liveHashes[hash] = true liveHashes[hash] = true
} }
} }
@ -164,7 +163,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []
// Find all live history with the above hashes // Find all live history with the above hashes
liveHistory := make(map[string]bool) liveHistory := make(map[string]bool)
for _, history := range old { for _, history := range old {
if hash := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] { if hash := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] {
liveHistory[history.Name] = true liveHistory[history.Name] = true
} }
} }
@ -199,7 +198,7 @@ func maxRevision(histories []*apps.ControllerRevision) int64 {
return max return max
} }
func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) { func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
if len(curHistories) == 1 { if len(curHistories) == 1 {
return curHistories[0], nil return curHistories[0], nil
} }
@ -222,12 +221,12 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur
return nil, err return nil, err
} }
for _, pod := range pods { for _, pod := range pods {
if pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] { if pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] {
toUpdate := pod.DeepCopy() toUpdate := pod.DeepCopy()
if toUpdate.Labels == nil { if toUpdate.Labels == nil {
toUpdate.Labels = make(map[string]string) toUpdate.Labels = make(map[string]string)
} }
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate) _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)
if err != nil { if err != nil {
return nil, err return nil, err
@ -247,7 +246,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur
// This also reconciles ControllerRef by adopting/orphaning. // This also reconciles ControllerRef by adopting/orphaning.
// Note that returned histories are pointers to objects in the cache. // Note that returned histories are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first. // If you want to modify one, you need to deep-copy it first.
func (dsc *DaemonSetsController) controlledHistories(ds *extensions.DaemonSet) ([]*apps.ControllerRevision, error) { func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil { if err != nil {
return nil, err return nil, err
@ -277,7 +276,7 @@ func (dsc *DaemonSetsController) controlledHistories(ds *extensions.DaemonSet) (
} }
// Match check if the given DaemonSet's template matches the template stored in the given history. // Match check if the given DaemonSet's template matches the template stored in the given history.
func Match(ds *extensions.DaemonSet, history *apps.ControllerRevision) (bool, error) { func Match(ds *apps.DaemonSet, history *apps.ControllerRevision) (bool, error) {
patch, err := getPatch(ds) patch, err := getPatch(ds)
if err != nil { if err != nil {
return false, err return false, err
@ -289,7 +288,7 @@ func Match(ds *extensions.DaemonSet, history *apps.ControllerRevision) (bool, er
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the // previous version. If the returned error is nil the patch is valid. The current state that we save is just the
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously // PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
// recorded patches. // recorded patches.
func getPatch(ds *extensions.DaemonSet) ([]byte, error) { func getPatch(ds *apps.DaemonSet) ([]byte, error) {
dsBytes, err := json.Marshal(ds) dsBytes, err := json.Marshal(ds)
if err != nil { if err != nil {
return nil, err return nil, err
@ -312,7 +311,7 @@ func getPatch(ds *extensions.DaemonSet) ([]byte, error) {
return patch, err return patch, err
} }
func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int64) (*apps.ControllerRevision, error) { func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
patch, err := getPatch(ds) patch, err := getPatch(ds)
if err != nil { if err != nil {
return nil, err return nil, err
@ -323,7 +322,7 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Namespace: ds.Namespace, Namespace: ds.Namespace,
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, extensions.DefaultDaemonSetUniqueLabelKey, hash), Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, hash),
Annotations: ds.Annotations, Annotations: ds.Annotations,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)}, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)},
}, },
@ -331,10 +330,10 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int
Revision: revision, Revision: revision,
} }
history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Create(history) history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history)
if errors.IsAlreadyExists(err) { if errors.IsAlreadyExists(err) {
// TODO: Is it okay to get from historyLister? // TODO: Is it okay to get from historyLister?
existedHistory, getErr := dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{}) existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
if getErr != nil { if getErr != nil {
return nil, getErr return nil, getErr
} }
@ -367,13 +366,19 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int
return history, err return history, err
} }
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) { func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) {
var newPods []*v1.Pod var newPods []*v1.Pod
var oldPods []*v1.Pod var oldPods []*v1.Pod
for _, pods := range nodeToDaemonPods { for _, pods := range nodeToDaemonPods {
for _, pod := range pods { for _, pod := range pods {
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod, hash) { // If the returned error is not nil we have a parse error.
// The controller handles this via the hash.
generation, err := util.GetTemplateGeneration(ds)
if err != nil {
generation = nil
}
if util.IsPodUpdated(pod, hash, generation) {
newPods = append(newPods, pod) newPods = append(newPods, pod)
} else { } else {
oldPods = append(oldPods, pod) oldPods = append(oldPods, pod)
@ -383,7 +388,7 @@ func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, n
return newPods, oldPods return newPods, oldPods
} }
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) { func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
glog.V(4).Infof("Getting unavailable numbers") glog.V(4).Infof("Getting unavailable numbers")
// TODO: get nodeList once in syncDaemonSet and pass it to other functions // TODO: get nodeList once in syncDaemonSet and pass it to other functions
nodeList, err := dsc.nodeLister.List(labels.Everything()) nodeList, err := dsc.nodeLister.List(labels.Everything())

View File

@ -19,8 +19,8 @@ package daemon
import ( import (
"testing" "testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
) )
@ -38,10 +38,9 @@ func TestDaemonSetUpdatesPods(t *testing.T) {
markPodsReady(podControl.podStore) markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable) intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.Spec.TemplateGeneration++
manager.dsStore.Update(ds) manager.dsStore.Update(ds)
clearExpectations(t, manager, ds, podControl) clearExpectations(t, manager, ds, podControl)
@ -80,10 +79,9 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
markPodsReady(podControl.podStore) markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable) intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.Spec.TemplateGeneration++
manager.dsStore.Update(ds) manager.dsStore.Update(ds)
// new pods are not ready numUnavailable == maxUnavailable // new pods are not ready numUnavailable == maxUnavailable
@ -109,10 +107,9 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable) intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.Spec.TemplateGeneration++
manager.dsStore.Update(ds) manager.dsStore.Update(ds)
// all old pods are unavailable so should be removed // all old pods are unavailable so should be removed
@ -137,9 +134,9 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable) intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds) manager.dsStore.Update(ds)
// template is not changed no pod should be removed // template is not changed no pod should be removed
@ -152,7 +149,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
cases := []struct { cases := []struct {
name string name string
Manager *daemonSetsController Manager *daemonSetsController
ds *extensions.DaemonSet ds *apps.DaemonSet
nodeToPods map[string][]*v1.Pod nodeToPods map[string][]*v1.Pod
maxUnavailable int maxUnavailable int
numUnavailable int numUnavailable int
@ -167,10 +164,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
} }
return manager return manager
}(), }(),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("x") ds := newDaemonSet("x")
intStr := intstr.FromInt(0) intStr := intstr.FromInt(0)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds return ds
}(), }(),
nodeToPods: make(map[string][]*v1.Pod), nodeToPods: make(map[string][]*v1.Pod),
@ -187,10 +184,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
addNodes(manager.nodeStore, 0, 2, nil) addNodes(manager.nodeStore, 0, 2, nil)
return manager return manager
}(), }(),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("x") ds := newDaemonSet("x")
intStr := intstr.FromInt(1) intStr := intstr.FromInt(1)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds return ds
}(), }(),
nodeToPods: func() map[string][]*v1.Pod { nodeToPods: func() map[string][]*v1.Pod {
@ -216,10 +213,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
addNodes(manager.nodeStore, 0, 2, nil) addNodes(manager.nodeStore, 0, 2, nil)
return manager return manager
}(), }(),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("x") ds := newDaemonSet("x")
intStr := intstr.FromInt(0) intStr := intstr.FromInt(0)
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds return ds
}(), }(),
nodeToPods: func() map[string][]*v1.Pod { nodeToPods: func() map[string][]*v1.Pod {
@ -242,10 +239,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
addNodes(manager.nodeStore, 0, 2, nil) addNodes(manager.nodeStore, 0, 2, nil)
return manager return manager
}(), }(),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("x") ds := newDaemonSet("x")
intStr := intstr.FromString("50%") intStr := intstr.FromString("50%")
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds return ds
}(), }(),
nodeToPods: func() map[string][]*v1.Pod { nodeToPods: func() map[string][]*v1.Pod {
@ -271,10 +268,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
addNodes(manager.nodeStore, 0, 2, nil) addNodes(manager.nodeStore, 0, 2, nil)
return manager return manager
}(), }(),
ds: func() *extensions.DaemonSet { ds: func() *apps.DaemonSet {
ds := newDaemonSet("x") ds := newDaemonSet("x")
intStr := intstr.FromString("50%") intStr := intstr.FromString("50%")
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds return ds
}(), }(),
nodeToPods: func() map[string][]*v1.Pod { nodeToPods: func() map[string][]*v1.Pod {

View File

@ -16,7 +16,7 @@ go_library(
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/kubelet/types:go_default_library", "//pkg/kubelet/types:go_default_library",
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/util/labels:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -18,7 +18,9 @@ package util
import ( import (
"fmt" "fmt"
"strconv"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -28,13 +30,28 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
) )
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
// deprecated annotation. If no annotation is found nil is returned. If the annotation is found and fails to parse
// nil is returned with an error. If the generation can be parsed from the annotation, a pointer to the parsed int64
// value is returned.
func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) {
annotation, found := ds.Annotations[apps.DeprecatedTemplateGeneration]
if !found {
return nil, nil
}
generation, err := strconv.ParseInt(annotation, 10, 64)
if err != nil {
return nil, err
}
return &generation, nil
}
// CreatePodTemplate returns copy of provided template with additional // CreatePodTemplate returns copy of provided template with additional
// label which contains templateGeneration (for backward compatibility), // label which contains templateGeneration (for backward compatibility),
// hash of provided template and sets default daemon tolerations. // hash of provided template and sets default daemon tolerations.
func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash string) v1.PodTemplateSpec { func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
newTemplate := *template.DeepCopy() newTemplate := *template.DeepCopy()
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems. // DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint notReady:NoExecute here // Add infinite toleration for taint notReady:NoExecute here
@ -81,12 +98,12 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash strin
}) })
} }
templateGenerationStr := fmt.Sprint(generation) if newTemplate.ObjectMeta.Labels == nil {
newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel( newTemplate.ObjectMeta.Labels = make(map[string]string)
template.ObjectMeta.Labels, }
extensions.DaemonSetTemplateGenerationKey, if generation != nil {
templateGenerationStr, newTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey] = fmt.Sprint(*generation)
) }
// TODO: do we need to validate if the DaemonSet is RollingUpdate or not? // TODO: do we need to validate if the DaemonSet is RollingUpdate or not?
if len(hash) > 0 { if len(hash) > 0 {
newTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = hash newTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = hash
@ -95,9 +112,10 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash strin
} }
// IsPodUpdated checks if pod contains label value that either matches templateGeneration or hash // IsPodUpdated checks if pod contains label value that either matches templateGeneration or hash
func IsPodUpdated(dsTemplateGeneration int64, pod *v1.Pod, hash string) bool { func IsPodUpdated(pod *v1.Pod, hash string, dsTemplateGeneration *int64) bool {
// Compare with hash to see if the pod is updated, need to maintain backward compatibility of templateGeneration // Compare with hash to see if the pod is updated, need to maintain backward compatibility of templateGeneration
templateMatches := pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration) templateMatches := dsTemplateGeneration != nil &&
pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration)
hashMatches := len(hash) > 0 && pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] == hash hashMatches := len(hash) > 0 && pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] == hash
return hashMatches || templateMatches return hashMatches || templateMatches
} }

View File

@ -47,13 +47,14 @@ func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
} }
func TestIsPodUpdated(t *testing.T) { func TestIsPodUpdated(t *testing.T) {
templateGeneration := int64(12345) templateGeneration := int64Ptr(12345)
badGeneration := int64Ptr(12345)
hash := "55555" hash := "55555"
labels := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration), extensions.DefaultDaemonSetUniqueLabelKey: hash} labels := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration), extensions.DefaultDaemonSetUniqueLabelKey: hash}
labelsNoHash := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration)} labelsNoHash := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration)}
tests := []struct { tests := []struct {
test string test string
templateGeneration int64 templateGeneration *int64
pod *v1.Pod pod *v1.Pod
hash string hash string
isUpdated bool isUpdated bool
@ -95,14 +96,14 @@ func TestIsPodUpdated(t *testing.T) {
}, },
{ {
"templateGeneration doesn't match, hash does", "templateGeneration doesn't match, hash does",
templateGeneration + 1, badGeneration,
newPod("pod1", "node1", labels), newPod("pod1", "node1", labels),
hash, hash,
true, true,
}, },
{ {
"templateGeneration and hash don't match", "templateGeneration and hash don't match",
templateGeneration + 1, badGeneration,
newPod("pod1", "node1", labels), newPod("pod1", "node1", labels),
hash + "123", hash + "123",
false, false,
@ -130,7 +131,7 @@ func TestIsPodUpdated(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range tests {
updated := IsPodUpdated(test.templateGeneration, test.pod, test.hash) updated := IsPodUpdated(test.pod, test.hash, test.templateGeneration)
if updated != test.isUpdated { if updated != test.isUpdated {
t.Errorf("%s: IsPodUpdated returned wrong value. Expected %t, got %t", test.test, test.isUpdated, updated) t.Errorf("%s: IsPodUpdated returned wrong value. Expected %t, got %t", test.test, test.isUpdated, updated)
} }
@ -139,19 +140,19 @@ func TestIsPodUpdated(t *testing.T) {
func TestCreatePodTemplate(t *testing.T) { func TestCreatePodTemplate(t *testing.T) {
tests := []struct { tests := []struct {
templateGeneration int64 templateGeneration *int64
hash string hash string
expectUniqueLabel bool expectUniqueLabel bool
}{ }{
{int64(1), "", false}, {int64Ptr(1), "", false},
{int64(2), "3242341807", true}, {int64Ptr(2), "3242341807", true},
} }
for _, test := range tests { for _, test := range tests {
podTemplateSpec := v1.PodTemplateSpec{} podTemplateSpec := v1.PodTemplateSpec{}
newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash) newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash)
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey] val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
if !exists || val != fmt.Sprint(test.templateGeneration) { if !exists || val != fmt.Sprint(*test.templateGeneration) {
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", test.templateGeneration, val) t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val)
} }
val, exists = newPodTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] val, exists = newPodTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
if test.expectUniqueLabel && (!exists || val != test.hash) { if test.expectUniqueLabel && (!exists || val != test.hash) {
@ -162,3 +163,8 @@ func TestCreatePodTemplate(t *testing.T) {
} }
} }
} }
func int64Ptr(i int) *int64 {
li := int64(i)
return &li
}

View File

@ -23,9 +23,8 @@ import (
"text/tabwriter" "text/tabwriter"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -35,8 +34,6 @@ import (
"k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
clientappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1"
clientextv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" apiv1 "k8s.io/kubernetes/pkg/apis/core/v1"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
@ -187,11 +184,11 @@ type DaemonSetHistoryViewer struct {
// ViewHistory returns a revision-to-history map as the revision history of a deployment // ViewHistory returns a revision-to-history map as the revision history of a deployment
// TODO: this should be a describer // TODO: this should be a describer
func (h *DaemonSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { func (h *DaemonSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) {
ds, history, err := daemonSetHistory(h.c.ExtensionsV1beta1(), h.c.AppsV1beta1(), namespace, name) ds, history, err := daemonSetHistory(h.c.AppsV1(), namespace, name)
if err != nil { if err != nil {
return "", err return "", err
} }
historyInfo := make(map[int64]*appsv1beta1.ControllerRevision) historyInfo := make(map[int64]*appsv1.ControllerRevision)
for _, history := range history { for _, history := range history {
// TODO: for now we assume revisions don't overlap, we may need to handle it // TODO: for now we assume revisions don't overlap, we may need to handle it
historyInfo[history.Revision] = history historyInfo[history.Revision] = history
@ -290,11 +287,11 @@ func controlledHistoryV1(
// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor // controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor
func controlledHistory( func controlledHistory(
apps clientappsv1beta1.AppsV1beta1Interface, apps clientappsv1.AppsV1Interface,
namespace string, namespace string,
selector labels.Selector, selector labels.Selector,
accessor metav1.Object) ([]*appsv1beta1.ControllerRevision, error) { accessor metav1.Object) ([]*appsv1.ControllerRevision, error) {
var result []*appsv1beta1.ControllerRevision var result []*appsv1.ControllerRevision
historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil { if err != nil {
return nil, err return nil, err
@ -311,10 +308,9 @@ func controlledHistory(
// daemonSetHistory returns the DaemonSet named name in namespace and all ControllerRevisions in its history. // daemonSetHistory returns the DaemonSet named name in namespace and all ControllerRevisions in its history.
func daemonSetHistory( func daemonSetHistory(
ext clientextv1beta1.ExtensionsV1beta1Interface, apps clientappsv1.AppsV1Interface,
apps clientappsv1beta1.AppsV1beta1Interface, namespace, name string) (*appsv1.DaemonSet, []*appsv1.ControllerRevision, error) {
namespace, name string) (*extensionsv1beta1.DaemonSet, []*appsv1beta1.ControllerRevision, error) { ds, err := apps.DaemonSets(namespace).Get(name, metav1.GetOptions{})
ds, err := ext.DaemonSets(namespace).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to retrieve DaemonSet %s: %v", name, err) return nil, nil, fmt.Errorf("failed to retrieve DaemonSet %s: %v", name, err)
} }
@ -357,7 +353,7 @@ func statefulSetHistory(
} }
// applyDaemonSetHistory returns a specific revision of DaemonSet by applying the given history to a copy of the given DaemonSet // applyDaemonSetHistory returns a specific revision of DaemonSet by applying the given history to a copy of the given DaemonSet
func applyDaemonSetHistory(ds *extensionsv1beta1.DaemonSet, history *appsv1beta1.ControllerRevision) (*extensionsv1beta1.DaemonSet, error) { func applyDaemonSetHistory(ds *appsv1.DaemonSet, history *appsv1.ControllerRevision) (*appsv1.DaemonSet, error) {
clone := ds.DeepCopy() clone := ds.DeepCopy()
cloneBytes, err := json.Marshal(clone) cloneBytes, err := json.Marshal(clone)
if err != nil { if err != nil {

View File

@ -25,7 +25,6 @@ import (
"syscall" "syscall"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extv1beta1 "k8s.io/api/extensions/v1beta1" extv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
@ -257,7 +256,7 @@ func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations ma
if err != nil { if err != nil {
return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error())
} }
ds, history, err := daemonSetHistory(r.c.ExtensionsV1beta1(), r.c.AppsV1beta1(), accessor.GetNamespace(), accessor.GetName()) ds, history, err := daemonSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName())
if err != nil { if err != nil {
return "", err return "", err
} }
@ -316,7 +315,7 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations
return "", fmt.Errorf("no last revision to roll back to") return "", fmt.Errorf("no last revision to roll back to")
} }
toHistory := findHistoryV1(toRevision, history) toHistory := findHistory(toRevision, history)
if toHistory == nil { if toHistory == nil {
return "", revisionNotFoundErr(toRevision) return "", revisionNotFoundErr(toRevision)
} }
@ -346,44 +345,16 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations
return rollbackSuccess, nil return rollbackSuccess, nil
} }
// TODO: When all the controllers have been updated to use v1, rename this function findHistoryV1()->findHistory() and // findHistory returns a controllerrevision of a specific revision from the given controllerrevisions.
// TODO: remove the original findHistory()
// findHistoryV1 returns a controllerrevision of a specific revision from the given controllerrevisions.
// It returns nil if no such controllerrevision exists. // It returns nil if no such controllerrevision exists.
// If toRevision is 0, the last previously used history is returned. // If toRevision is 0, the last previously used history is returned.
func findHistoryV1(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision { func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision {
if toRevision == 0 && len(allHistory) <= 1 { if toRevision == 0 && len(allHistory) <= 1 {
return nil return nil
} }
// Find the history to rollback to // Find the history to rollback to
var toHistory *appsv1.ControllerRevision var toHistory *appsv1.ControllerRevision
if toRevision == 0 {
// If toRevision == 0, find the latest revision (2nd max)
sort.Sort(historiesByRevisionV1(allHistory))
toHistory = allHistory[len(allHistory)-2]
} else {
for _, h := range allHistory {
if h.Revision == toRevision {
// If toRevision != 0, find the history with matching revision
return h
}
}
}
return toHistory
}
// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions.
// It returns nil if no such controllerrevision exists.
// If toRevision is 0, the last previously used history is returned.
func findHistory(toRevision int64, allHistory []*appsv1beta1.ControllerRevision) *appsv1beta1.ControllerRevision {
if toRevision == 0 && len(allHistory) <= 1 {
return nil
}
// Find the history to rollback to
var toHistory *appsv1beta1.ControllerRevision
if toRevision == 0 { if toRevision == 0 {
// If toRevision == 0, find the latest revision (2nd max) // If toRevision == 0, find the latest revision (2nd max)
sort.Sort(historiesByRevision(allHistory)) sort.Sort(historiesByRevision(allHistory))
@ -417,19 +388,10 @@ func revisionNotFoundErr(r int64) error {
} }
// TODO: copied from daemon controller, should extract to a library // TODO: copied from daemon controller, should extract to a library
type historiesByRevision []*appsv1beta1.ControllerRevision type historiesByRevision []*appsv1.ControllerRevision
func (h historiesByRevision) Len() int { return len(h) } func (h historiesByRevision) Len() int { return len(h) }
func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h historiesByRevision) Less(i, j int) bool { func (h historiesByRevision) Less(i, j int) bool {
return h[i].Revision < h[j].Revision return h[i].Revision < h[j].Revision
} }
// TODO: copied from daemon controller, should extract to a library
type historiesByRevisionV1 []*appsv1.ControllerRevision
func (h historiesByRevisionV1) Len() int { return len(h) }
func (h historiesByRevisionV1) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h historiesByRevisionV1) Less(i, j int) bool {
return h[i].Revision < h[j].Revision
}

View File

@ -48,13 +48,11 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -22,10 +22,8 @@ import (
"strings" "strings"
"time" "time"
apps "k8s.io/api/apps/v1beta1" apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -67,7 +65,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
AfterEach(func() { AfterEach(func() {
// Clean up // Clean up
daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}) daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets") Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
if daemonsets != nil && len(daemonsets.Items) > 0 { if daemonsets != nil && len(daemonsets.Items) > 0 {
for _, ds := range daemonsets.Items { for _, ds := range daemonsets.Items {
@ -80,7 +78,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
} }
} }
if daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets)) framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets))
} else { } else {
framework.Logf("unable to dump daemonsets: %v", err) framework.Logf("unable to dump daemonsets: %v", err)
@ -114,7 +112,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
@ -138,7 +136,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("Creating daemon %q with a node selector", dsName) framework.Logf("Creating daemon %q with a node selector", dsName)
ds := newDaemonSet(dsName, image, complexLabel) ds := newDaemonSet(dsName, image, complexLabel)
ds.Spec.Template.Spec.NodeSelector = nodeSelector ds.Spec.Template.Spec.NodeSelector = nodeSelector
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds) ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Initially, daemon pods should not be running on any nodes.") By("Initially, daemon pods should not be running on any nodes.")
@ -167,7 +165,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`, patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel]) daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(HaveOccurred(), "error patching daemon set") Expect(err).NotTo(HaveOccurred(), "error patching daemon set")
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
Expect(len(daemonSetLabels)).To(Equal(1)) Expect(len(daemonSetLabels)).To(Equal(1))
@ -199,7 +197,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
}, },
}, },
} }
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds) ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Initially, daemon pods should not be running on any nodes.") By("Initially, daemon pods should not be running on any nodes.")
@ -229,7 +227,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
@ -253,54 +251,43 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
framework.Logf("Creating simple daemon set %s", dsName) framework.Logf("Creating simple daemon set %s", dsName)
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1)))
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
By("Make sure all daemon pods have correct template generation 1")
templateGeneration := "1"
err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
Expect(err).NotTo(HaveOccurred())
// Check history and labels // Check history and labels
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForHistoryCreated(c, ns, label, 1) waitForHistoryCreated(c, ns, label, 1)
first := curHistory(listDaemonHistories(c, ns, label), ds) first := curHistory(listDaemonHistories(c, ns, label), ds)
firstHash := first.Labels[extensions.DefaultDaemonSetUniqueLabelKey] firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
Expect(first.Revision).To(Equal(int64(1))) Expect(first.Revision).To(Equal(int64(1)))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration) checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
By("Update daemon pods image.") By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage) patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(2)))
By("Check that daemon pods images aren't updated.") By("Check that daemon pods images aren't updated.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Make sure all daemon pods have correct template generation 1")
err = checkDaemonPodsTemplateGeneration(c, ns, label, templateGeneration)
Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods are still running on every node of the cluster.") By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForHistoryCreated(c, ns, label, 2) waitForHistoryCreated(c, ns, label, 2)
cur := curHistory(listDaemonHistories(c, ns, label), ds) cur := curHistory(listDaemonHistories(c, ns, label), ds)
Expect(cur.Revision).To(Equal(int64(2))) Expect(cur.Revision).To(Equal(int64(2)))
Expect(cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash)) Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration) checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
}) })
It("Should update pod when spec was updated and update strategy is RollingUpdate", func() { It("Should update pod when spec was updated and update strategy is RollingUpdate", func() {
@ -309,11 +296,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
templateGeneration := int64(999) templateGeneration := int64(999)
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration) framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.TemplateGeneration = templateGeneration ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
By("Check that daemon pods launch on every node of the cluster.") By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
@ -324,20 +309,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check history and labels // Check history and labels
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForHistoryCreated(c, ns, label, 1) waitForHistoryCreated(c, ns, label, 1)
cur := curHistory(listDaemonHistories(c, ns, label), ds) cur := curHistory(listDaemonHistories(c, ns, label), ds)
hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
Expect(cur.Revision).To(Equal(int64(1))) Expect(cur.Revision).To(Equal(int64(1)))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration)) checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
By("Update daemon pods image.") By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage) patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
templateGeneration++ templateGeneration++
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
By("Check that daemon pods images are updated.") By("Check that daemon pods images are updated.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
@ -352,90 +336,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForHistoryCreated(c, ns, label, 2) waitForHistoryCreated(c, ns, label, 2)
cur = curHistory(listDaemonHistories(c, ns, label), ds) cur = curHistory(listDaemonHistories(c, ns, label), ds)
hash = cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
Expect(cur.Revision).To(Equal(int64(2))) Expect(cur.Revision).To(Equal(int64(2)))
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration)) checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
})
It("Should adopt existing pods when creating a RollingUpdate DaemonSet regardless of templateGeneration", func() {
label := map[string]string{daemonsetNameLabel: dsName}
// 1. Create a RollingUpdate DaemonSet
templateGeneration := int64(999)
framework.Logf("Creating simple RollingUpdate DaemonSet %s with templateGeneration %d", dsName, templateGeneration)
ds := newDaemonSet(dsName, image, label)
ds.Spec.TemplateGeneration = templateGeneration
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
Expect(err).NotTo(HaveOccurred())
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
framework.Logf("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
framework.Logf("Make sure all daemon pods have correct template generation %d", templateGeneration)
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
Expect(err).NotTo(HaveOccurred())
// 2. Orphan DaemonSet pods
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", dsName)
deleteDaemonSetAndOrphan(c, ds)
// 3. Adopt DaemonSet pods (no restart)
newDSName := "adopt"
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName)
newDS := newDaemonSet(newDSName, image, label)
newDS.Spec.TemplateGeneration = templateGeneration
newDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
newDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newDS)
Expect(err).NotTo(HaveOccurred())
Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
Expect(apiequality.Semantic.DeepEqual(newDS.Spec.Template, ds.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newDS.Name)
waitDaemonSetAdoption(c, newDS, ds.Name, templateGeneration)
// 4. Orphan DaemonSet pods again
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newDSName)
deleteDaemonSetAndOrphan(c, newDS)
// 5. Adopt DaemonSet pods (no restart) as long as template matches, even when templateGeneration doesn't match
newAdoptDSName := "adopt-template-matches"
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
newAdoptDS := newDaemonSet(newAdoptDSName, image, label)
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
Expect(err).NotTo(HaveOccurred())
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(int64(1)))
Expect(newAdoptDS.Spec.TemplateGeneration).NotTo(Equal(templateGeneration))
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
framework.Logf(fmt.Sprintf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name))
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
// 6. Orphan DaemonSet pods again
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newAdoptDSName)
deleteDaemonSetAndOrphan(c, newAdoptDS)
// 7. Adopt DaemonSet pods (no restart) as long as templateGeneration matches, even when template doesn't match
newAdoptDSName = "adopt-template-generation-matches"
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
newAdoptDS = newDaemonSet(newAdoptDSName, image, label)
newAdoptDS.Spec.Template.Spec.Containers[0].Name = "not-match"
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
newAdoptDS.Spec.TemplateGeneration = templateGeneration
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
Expect(err).NotTo(HaveOccurred())
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).NotTo(BeTrue(), "DaemonSet template should not match")
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name)
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
}) })
It("Should rollback without unnecessary restarts", func() { It("Should rollback without unnecessary restarts", func() {
@ -445,8 +352,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("Create a RollingUpdate DaemonSet") framework.Logf("Create a RollingUpdate DaemonSet")
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds) ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("Check that daemon pods launch on every node of the cluster") framework.Logf("Check that daemon pods launch on every node of the cluster")
@ -456,7 +363,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("Update the DaemonSet to trigger a rollout") framework.Logf("Update the DaemonSet to trigger a rollout")
// We use a nonexistent image here, so that we make sure it won't finish // We use a nonexistent image here, so that we make sure it won't finish
newImage := "foo:non-existent" newImage := "foo:non-existent"
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) { newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage update.Spec.Template.Spec.Containers[0].Image = newImage
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -483,7 +390,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Expect(len(newPods)).NotTo(Equal(0)) Expect(len(newPods)).NotTo(Equal(0))
framework.Logf("Roll back the DaemonSet before rollout is complete") framework.Logf("Roll back the DaemonSet before rollout is complete")
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) { rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = image update.Spec.Template.Spec.Containers[0].Image = image
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -511,11 +418,11 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents. // deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
// It also checks that all dependents are orphaned, and the DaemonSet is deleted. // It also checks that all dependents are orphaned, and the DaemonSet is deleted.
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) { func deleteDaemonSetAndOrphan(c clientset.Interface, ds *apps.DaemonSet) {
trueVar := true trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar} deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
err := c.ExtensionsV1beta1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions) err := c.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
@ -526,12 +433,12 @@ func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) {
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted") Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
} }
func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet { func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
return &extensions.DaemonSet{ return &apps.DaemonSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: dsName, Name: dsName,
}, },
Spec: extensions.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Labels: label, Labels: label,
@ -623,7 +530,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
return newNode, nil return newNode, nil
} }
func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nodeNames []string) func() (bool, error) { func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames []string) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil { if err != nil {
@ -662,14 +569,14 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nod
} }
} }
func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) { func checkRunningOnAllNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
nodeNames := schedulableNodes(f.ClientSet, ds) nodeNames := schedulableNodes(f.ClientSet, ds)
return checkDaemonPodOnNodes(f, ds, nodeNames)() return checkDaemonPodOnNodes(f, ds, nodeNames)()
} }
} }
func schedulableNodes(c clientset.Interface, ds *extensions.DaemonSet) []string { func schedulableNodes(c clientset.Interface, ds *apps.DaemonSet) []string {
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeNames := make([]string, 0) nodeNames := make([]string, 0)
@ -696,7 +603,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
} }
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node // canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool { func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
newPod := daemon.NewPod(ds, node.Name) newPod := daemon.NewPod(ds, node.Name)
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
@ -708,12 +615,12 @@ func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
return fit return fit
} }
func checkRunningOnNoNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) { func checkRunningOnNoNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
return checkDaemonPodOnNodes(f, ds, make([]string, 0)) return checkDaemonPodOnNodes(f, ds, make([]string, 0))
} }
func checkDaemonStatus(f *framework.Framework, dsName string) error { func checkDaemonStatus(f *framework.Framework, dsName string) error {
ds, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{}) ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("Could not get daemon set from v1.") return fmt.Errorf("Could not get daemon set from v1.")
} }
@ -724,7 +631,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error {
return nil return nil
} }
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.DaemonSet, image string, maxUnavailable int) func() (bool, error) { func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{}) podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{})
if err != nil { if err != nil {
@ -770,7 +677,7 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m
if !controller.IsPodActive(&pod) { if !controller.IsPodActive(&pod) {
continue continue
} }
podTemplateGeneration := pod.Labels[extensions.DaemonSetTemplateGenerationKey] podTemplateGeneration := pod.Labels[apps.DeprecatedTemplateGeneration]
if podTemplateGeneration != templateGeneration { if podTemplateGeneration != templateGeneration {
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration) return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
} }
@ -780,7 +687,7 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) { func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
_, err := c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{}) _, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) { if !apierrs.IsNotFound(err) {
return false, err return false, err
} }
@ -840,7 +747,7 @@ func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.
} }
} }
func waitDaemonSetAdoption(c clientset.Interface, ds *extensions.DaemonSet, podPrefix string, podTemplateGeneration int64) { func waitDaemonSetAdoption(c clientset.Interface, ds *apps.DaemonSet, podPrefix string, podTemplateGeneration int64) {
ns := ds.Namespace ns := ds.Namespace
label := ds.Spec.Template.Labels label := ds.Spec.Template.Labels
@ -868,16 +775,13 @@ func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[
return nil return nil
} }
func checkDaemonSetPodsLabels(podList *v1.PodList, hash, templateGeneration string) { func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
for _, pod := range podList.Items { for _, pod := range podList.Items {
podHash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
podTemplate := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
Expect(len(podHash)).To(BeNumerically(">", 0)) Expect(len(podHash)).To(BeNumerically(">", 0))
if len(hash) > 0 { if len(hash) > 0 {
Expect(podHash).To(Equal(hash)) Expect(podHash).To(Equal(hash))
} }
Expect(len(podTemplate)).To(BeNumerically(">", 0))
Expect(podTemplate).To(Equal(templateGeneration))
} }
} }
@ -902,19 +806,19 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList { func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
selector := labels.Set(label).AsSelector() selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
historyList, err := c.AppsV1beta1().ControllerRevisions(ns).List(options) historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(len(historyList.Items)).To(BeNumerically(">", 0)) Expect(len(historyList.Items)).To(BeNumerically(">", 0))
return historyList return historyList
} }
func curHistory(historyList *apps.ControllerRevisionList, ds *extensions.DaemonSet) *apps.ControllerRevision { func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *apps.ControllerRevision {
var curHistory *apps.ControllerRevision var curHistory *apps.ControllerRevision
foundCurHistories := 0 foundCurHistories := 0
for i := range historyList.Items { for i := range historyList.Items {
history := &historyList.Items[i] history := &historyList.Items[i]
// Every history should have the hash label // Every history should have the hash label
Expect(len(history.Labels[extensions.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0)) Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0))
match, err := daemon.Match(ds, history) match, err := daemon.Match(ds, history)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if match { if match {

View File

@ -50,6 +50,7 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types" gomegatypes "github.com/onsi/gomega/types"
apps "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1" batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
@ -3176,10 +3177,10 @@ func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Obj
}) })
} }
type updateDSFunc func(*extensions.DaemonSet) type updateDSFunc func(*apps.DaemonSet)
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *extensions.DaemonSet, err error) { func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *apps.DaemonSet, err error) {
daemonsets := c.ExtensionsV1beta1().DaemonSets(namespace) daemonsets := c.AppsV1().DaemonSets(namespace)
var updateErr error var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil { if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {

View File

@ -18,16 +18,16 @@ go_test(
"//pkg/controller/daemon:go_default_library", "//pkg/controller/daemon:go_default_library",
"//pkg/util/metrics:go_default_library", "//pkg/util/metrics:go_default_library",
"//test/integration/framework:go_default_library", "//test/integration/framework:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library",
], ],

View File

@ -22,16 +22,16 @@ import (
"testing" "testing"
"time" "time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
appstyped "k8s.io/client-go/kubernetes/typed/apps/v1"
corev1typed "k8s.io/client-go/kubernetes/typed/core/v1" corev1typed "k8s.io/client-go/kubernetes/typed/core/v1"
extensionsv1beta1typed "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -53,8 +53,8 @@ func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonS
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-informers")), resyncPeriod) informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-informers")), resyncPeriod)
metrics.UnregisterMetricAndUntrackRateLimiterUsage("daemon_controller") metrics.UnregisterMetricAndUntrackRateLimiterUsage("daemon_controller")
dc, err := daemon.NewDaemonSetsController( dc, err := daemon.NewDaemonSetsController(
informers.Extensions().V1beta1().DaemonSets(), informers.Apps().V1().DaemonSets(),
informers.Apps().V1beta1().ControllerRevisions(), informers.Apps().V1().ControllerRevisions(),
informers.Core().V1().Pods(), informers.Core().V1().Pods(),
informers.Core().V1().Nodes(), informers.Core().V1().Nodes(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-controller")), clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-controller")),
@ -70,22 +70,22 @@ func testLabels() map[string]string {
return map[string]string{"name": "test"} return map[string]string{"name": "test"}
} }
func newDaemonSet(name, namespace string) *v1beta1.DaemonSet { func newDaemonSet(name, namespace string) *apps.DaemonSet {
two := int32(2) two := int32(2)
return &v1beta1.DaemonSet{ return &apps.DaemonSet{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet", Kind: "DaemonSet",
APIVersion: "extensions/v1beta1", APIVersion: "apps/v1",
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: namespace, Namespace: namespace,
Name: name, Name: name,
}, },
Spec: v1beta1.DaemonSetSpec{ Spec: apps.DaemonSetSpec{
RevisionHistoryLimit: &two, RevisionHistoryLimit: &two,
Selector: &metav1.LabelSelector{MatchLabels: testLabels()}, Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{ UpdateStrategy: apps.DaemonSetUpdateStrategy{
Type: v1beta1.OnDeleteDaemonSetStrategyType, Type: apps.OnDeleteDaemonSetStrategyType,
}, },
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -99,22 +99,22 @@ func newDaemonSet(name, namespace string) *v1beta1.DaemonSet {
} }
} }
func newRollbackStrategy() *v1beta1.DaemonSetUpdateStrategy { func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {
one := intstr.FromInt(1) one := intstr.FromInt(1)
return &v1beta1.DaemonSetUpdateStrategy{ return &apps.DaemonSetUpdateStrategy{
Type: v1beta1.RollingUpdateDaemonSetStrategyType, Type: apps.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &v1beta1.RollingUpdateDaemonSet{MaxUnavailable: &one}, RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
} }
} }
func newOnDeleteStrategy() *v1beta1.DaemonSetUpdateStrategy { func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy {
return &v1beta1.DaemonSetUpdateStrategy{ return &apps.DaemonSetUpdateStrategy{
Type: v1beta1.OnDeleteDaemonSetStrategyType, Type: apps.OnDeleteDaemonSetStrategyType,
} }
} }
func updateStrategies() []*v1beta1.DaemonSetUpdateStrategy { func updateStrategies() []*apps.DaemonSetUpdateStrategy {
return []*v1beta1.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()} return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
} }
func allocatableResources(memory, cpu string) v1.ResourceList { func allocatableResources(memory, cpu string) v1.ResourceList {
@ -189,9 +189,6 @@ func validateDaemonSetPodsAndMarkReady(
return false, fmt.Errorf("Pod %s has %d OwnerReferences, expected only 1", pod.Name, len(ownerReferences)) return false, fmt.Errorf("Pod %s has %d OwnerReferences, expected only 1", pod.Name, len(ownerReferences))
} }
controllerRef := ownerReferences[0] controllerRef := ownerReferences[0]
if got, want := controllerRef.APIVersion, "extensions/v1beta1"; got != want {
t.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
}
if got, want := controllerRef.Kind, "DaemonSet"; got != want { if got, want := controllerRef.Kind, "DaemonSet"; got != want {
t.Errorf("controllerRef.Kind = %q, want %q", got, want) t.Errorf("controllerRef.Kind = %q, want %q", got, want)
} }
@ -219,7 +216,7 @@ func validateDaemonSetPodsAndMarkReady(
} }
func validateDaemonSetStatus( func validateDaemonSetStatus(
dsClient extensionsv1beta1typed.DaemonSetInterface, dsClient appstyped.DaemonSetInterface,
dsName string, dsName string,
dsNamespace string, dsNamespace string,
expectedNumberReady int32, expectedNumberReady int32,
@ -267,7 +264,7 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t) ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t)
defer framework.DeleteTestingNamespace(ns, server, t) defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name) dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podClient := clientset.CoreV1().Pods(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name)
nodeClient := clientset.CoreV1().Nodes() nodeClient := clientset.CoreV1().Nodes()
podInformer := informers.Core().V1().Pods().Informer() podInformer := informers.Core().V1().Pods().Informer()
@ -300,7 +297,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
defer framework.DeleteTestingNamespace(ns, server, t) defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name) dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podClient := clientset.CoreV1().Pods(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name)
nodeClient := clientset.CoreV1().Nodes() nodeClient := clientset.CoreV1().Nodes()
podInformer := informers.Core().V1().Pods().Informer() podInformer := informers.Core().V1().Pods().Informer()
@ -330,7 +327,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t)
defer framework.DeleteTestingNamespace(ns, server, t) defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name) dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podClient := clientset.CoreV1().Pods(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name)
nodeClient := clientset.CoreV1().Nodes() nodeClient := clientset.CoreV1().Nodes()
podInformer := informers.Core().V1().Pods().Informer() podInformer := informers.Core().V1().Pods().Informer()
@ -367,7 +364,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
ns := framework.CreateTestingNamespace("insufficient-capacity", server, t) ns := framework.CreateTestingNamespace("insufficient-capacity", server, t)
defer framework.DeleteTestingNamespace(ns, server, t) defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name) dsClient := clientset.AppsV1().DaemonSets(ns.Name)
nodeClient := clientset.CoreV1().Nodes() nodeClient := clientset.CoreV1().Nodes()
eventClient := corev1typed.New(clientset.CoreV1().RESTClient()).Events(ns.Namespace) eventClient := corev1typed.New(clientset.CoreV1().RESTClient()).Events(ns.Namespace)
stopCh := make(chan struct{}) stopCh := make(chan struct{})