mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-13 11:25:19 +00:00
cmd/kube-controller-manager
This commit is contained in:
@@ -23,12 +23,13 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -58,7 +59,7 @@ const DeletionTimeout = 2 * 60 * time.Second
|
||||
type updater func(*policy.PodDisruptionBudget) error
|
||||
|
||||
type DisruptionController struct {
|
||||
kubeClient internalclientset.Interface
|
||||
kubeClient clientset.Interface
|
||||
|
||||
pdbStore cache.Store
|
||||
pdbController *cache.Controller
|
||||
@@ -98,9 +99,9 @@ type controllerAndScale struct {
|
||||
|
||||
// podControllerFinder is a function type that maps a pod to a list of
|
||||
// controllers and their scale.
|
||||
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error)
|
||||
type podControllerFinder func(*v1.Pod) ([]controllerAndScale, error)
|
||||
|
||||
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface) *DisruptionController {
|
||||
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface) *DisruptionController {
|
||||
dc := &DisruptionController{
|
||||
kubeClient: kubeClient,
|
||||
podController: podInformer.GetController(),
|
||||
@@ -108,7 +109,7 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
recheckQueue: workqueue.NewNamedDelayingQueue("disruption-recheck"),
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||
dc.recorder = dc.broadcaster.NewRecorder(v1.EventSource{Component: "controllermanager"})
|
||||
|
||||
dc.getUpdater = func() updater { return dc.writePdbStatus }
|
||||
|
||||
@@ -122,11 +123,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
|
||||
dc.pdbStore, dc.pdbController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&policy.PodDisruptionBudget{},
|
||||
@@ -141,14 +142,14 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
|
||||
dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{},
|
||||
&v1.ReplicationController{},
|
||||
30*time.Second,
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
@@ -158,11 +159,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
|
||||
dc.rsLister.Indexer, dc.rsController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.ReplicaSet{},
|
||||
@@ -174,11 +175,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
|
||||
dc.dIndexer, dc.dController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.Deployment{},
|
||||
@@ -204,7 +205,7 @@ func (dc *DisruptionController) finders() []podControllerFinder {
|
||||
}
|
||||
|
||||
// getPodReplicaSets finds replicasets which have no matching deployments.
|
||||
func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAndScale, error) {
|
||||
func (dc *DisruptionController) getPodReplicaSets(pod *v1.Pod) ([]controllerAndScale, error) {
|
||||
cas := []controllerAndScale{}
|
||||
rss, err := dc.rsLister.GetPodReplicaSets(pod)
|
||||
// GetPodReplicaSets returns an error only if no ReplicaSets are found. We
|
||||
@@ -220,7 +221,7 @@ func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAnd
|
||||
if err == nil { // A deployment was found, so this finder will not count this RS.
|
||||
continue
|
||||
}
|
||||
controllerScale[rs.UID] = rs.Spec.Replicas
|
||||
controllerScale[rs.UID] = *(rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
for uid, scale := range controllerScale {
|
||||
@@ -231,7 +232,7 @@ func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAnd
|
||||
}
|
||||
|
||||
// getPodDeployments finds deployments for any replicasets which are being managed by deployments.
|
||||
func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAndScale, error) {
|
||||
func (dc *DisruptionController) getPodDeployments(pod *v1.Pod) ([]controllerAndScale, error) {
|
||||
cas := []controllerAndScale{}
|
||||
rss, err := dc.rsLister.GetPodReplicaSets(pod)
|
||||
// GetPodReplicaSets returns an error only if no ReplicaSets are found. We
|
||||
@@ -248,7 +249,7 @@ func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAnd
|
||||
continue
|
||||
}
|
||||
for _, d := range ds {
|
||||
controllerScale[d.UID] = d.Spec.Replicas
|
||||
controllerScale[d.UID] = *(d.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,12 +260,12 @@ func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAnd
|
||||
return cas, nil
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPodReplicationControllers(pod *api.Pod) ([]controllerAndScale, error) {
|
||||
func (dc *DisruptionController) getPodReplicationControllers(pod *v1.Pod) ([]controllerAndScale, error) {
|
||||
cas := []controllerAndScale{}
|
||||
rcs, err := dc.rcLister.GetPodControllers(pod)
|
||||
if err == nil {
|
||||
for _, rc := range rcs {
|
||||
cas = append(cas, controllerAndScale{UID: rc.UID, scale: rc.Spec.Replicas})
|
||||
cas = append(cas, controllerAndScale{UID: rc.UID, scale: *(rc.Spec.Replicas)})
|
||||
}
|
||||
}
|
||||
return cas, nil
|
||||
@@ -274,7 +275,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
|
||||
glog.V(0).Infof("Starting disruption controller")
|
||||
if dc.kubeClient != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
dc.broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: dc.kubeClient.Core().Events("")})
|
||||
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.Core().Events("")})
|
||||
} else {
|
||||
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
@@ -310,7 +311,7 @@ func (dc *DisruptionController) removeDb(obj interface{}) {
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("addPod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
@@ -322,7 +323,7 @@ func (dc *DisruptionController) addPod(obj interface{}) {
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) updatePod(old, cur interface{}) {
|
||||
pod := cur.(*api.Pod)
|
||||
pod := cur.(*v1.Pod)
|
||||
glog.V(4).Infof("updatePod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
@@ -334,7 +335,7 @@ func (dc *DisruptionController) updatePod(old, cur interface{}) {
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
// the deleted key/value. Note that this value might be stale. If the pod
|
||||
@@ -346,7 +347,7 @@ func (dc *DisruptionController) deletePod(obj interface{}) {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
|
||||
return
|
||||
@@ -380,7 +381,7 @@ func (dc *DisruptionController) enqueuePdbForRecheck(pdb *policy.PodDisruptionBu
|
||||
dc.recheckQueue.AddAfter(key, delay)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPdbForPod(pod *api.Pod) *policy.PodDisruptionBudget {
|
||||
func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionBudget {
|
||||
// GetPodPodDisruptionBudgets returns an error only if no
|
||||
// PodDisruptionBudgets are found. We don't return that as an error to the
|
||||
// caller.
|
||||
@@ -393,25 +394,25 @@ func (dc *DisruptionController) getPdbForPod(pod *api.Pod) *policy.PodDisruption
|
||||
if len(pdbs) > 1 {
|
||||
msg := fmt.Sprintf("Pod %q/%q matches multiple PodDisruptionBudgets. Chose %q arbitrarily.", pod.Namespace, pod.Name, pdbs[0].Name)
|
||||
glog.Warning(msg)
|
||||
dc.recorder.Event(pod, api.EventTypeWarning, "MultiplePodDisruptionBudgets", msg)
|
||||
dc.recorder.Event(pod, v1.EventTypeWarning, "MultiplePodDisruptionBudgets", msg)
|
||||
}
|
||||
return &pdbs[0]
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*api.Pod, error) {
|
||||
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) {
|
||||
sel, err := unversioned.LabelSelectorAsSelector(pdb.Spec.Selector)
|
||||
if sel.Empty() {
|
||||
return []*api.Pod{}, nil
|
||||
return []*v1.Pod{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return []*api.Pod{}, err
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(pdb.Namespace).List(sel)
|
||||
if err != nil {
|
||||
return []*api.Pod{}, err
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
// TODO: Do we need to copy here?
|
||||
result := make([]*api.Pod, 0, len(pods))
|
||||
result := make([]*v1.Pod, 0, len(pods))
|
||||
for i := range pods {
|
||||
result = append(result, &(*pods[i]))
|
||||
}
|
||||
@@ -485,16 +486,16 @@ func (dc *DisruptionController) sync(key string) error {
|
||||
func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
|
||||
pods, err := dc.getPodsForPdb(pdb)
|
||||
if err != nil {
|
||||
dc.recorder.Eventf(pdb, api.EventTypeWarning, "NoPods", "Failed to get pods: %v", err)
|
||||
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "NoPods", "Failed to get pods: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
dc.recorder.Eventf(pdb, api.EventTypeNormal, "NoPods", "No matching pods found")
|
||||
dc.recorder.Eventf(pdb, v1.EventTypeNormal, "NoPods", "No matching pods found")
|
||||
}
|
||||
|
||||
expectedCount, desiredHealthy, err := dc.getExpectedPodCount(pdb, pods)
|
||||
if err != nil {
|
||||
dc.recorder.Eventf(pdb, api.EventTypeNormal, "ExpectedPods", "Failed to calculate the number of expected pods: %v", err)
|
||||
dc.recorder.Eventf(pdb, v1.EventTypeNormal, "ExpectedPods", "Failed to calculate the number of expected pods: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -512,7 +513,7 @@ func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBudget, pods []*api.Pod) (expectedCount, desiredHealthy int32, err error) {
|
||||
func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBudget, pods []*v1.Pod) (expectedCount, desiredHealthy int32, err error) {
|
||||
err = nil
|
||||
// TODO(davidopp): consider making the way expectedCount and rules about
|
||||
// permitted controller configurations (specifically, considering it an error
|
||||
@@ -554,11 +555,11 @@ func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBud
|
||||
}
|
||||
if controllerCount == 0 {
|
||||
err = fmt.Errorf("asked for percentage, but found no controllers for pod %q", pod.Name)
|
||||
dc.recorder.Event(pdb, api.EventTypeWarning, "NoControllers", err.Error())
|
||||
dc.recorder.Event(pdb, v1.EventTypeWarning, "NoControllers", err.Error())
|
||||
return
|
||||
} else if controllerCount > 1 {
|
||||
err = fmt.Errorf("pod %q has %v>1 controllers", pod.Name, controllerCount)
|
||||
dc.recorder.Event(pdb, api.EventTypeWarning, "TooManyControllers", err.Error())
|
||||
dc.recorder.Event(pdb, v1.EventTypeWarning, "TooManyControllers", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -581,7 +582,7 @@ func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBud
|
||||
return
|
||||
}
|
||||
|
||||
func countHealthyPods(pods []*api.Pod, disruptedPods map[string]unversioned.Time, currentTime time.Time) (currentHealthy int32) {
|
||||
func countHealthyPods(pods []*v1.Pod, disruptedPods map[string]unversioned.Time, currentTime time.Time) (currentHealthy int32) {
|
||||
Pod:
|
||||
for _, pod := range pods {
|
||||
// Pod is beeing deleted.
|
||||
@@ -592,7 +593,7 @@ Pod:
|
||||
if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) {
|
||||
continue
|
||||
}
|
||||
if api.IsPodReady(pod) {
|
||||
if v1.IsPodReady(pod) {
|
||||
currentHealthy++
|
||||
continue Pod
|
||||
}
|
||||
@@ -603,7 +604,7 @@ Pod:
|
||||
|
||||
// Builds new PodDisruption map, possibly removing items that refer to non-existing, already deleted
|
||||
// or not-deleted at all items. Also returns an information when this check should be repeated.
|
||||
func (dc *DisruptionController) buildDisruptedPodMap(pods []*api.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]unversioned.Time, *time.Time) {
|
||||
func (dc *DisruptionController) buildDisruptedPodMap(pods []*v1.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]unversioned.Time, *time.Time) {
|
||||
disruptedPods := pdb.Status.DisruptedPods
|
||||
result := make(map[string]unversioned.Time)
|
||||
var recheckTime *time.Time
|
||||
@@ -625,7 +626,7 @@ func (dc *DisruptionController) buildDisruptedPodMap(pods []*api.Pod, pdb *polic
|
||||
if expectedDeletion.Before(currentTime) {
|
||||
glog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s",
|
||||
pod.Namespace, pod.Name, disruptionTime.String(), pdb.Namespace, pdb.Name)
|
||||
dc.recorder.Eventf(pod, api.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't",
|
||||
dc.recorder.Eventf(pod, v1.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't",
|
||||
pdb.Namespace, pdb.Namespace)
|
||||
} else {
|
||||
if recheckTime == nil || expectedDeletion.Before(*recheckTime) {
|
||||
|
||||
@@ -25,9 +25,10 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -95,7 +96,7 @@ func newFakeDisruptionController() (*DisruptionController, *pdbStates) {
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
|
||||
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "disruption_test"})
|
||||
dc.recorder = dc.broadcaster.NewRecorder(v1.EventSource{Component: "disruption_test"})
|
||||
|
||||
return dc, ps
|
||||
}
|
||||
@@ -115,11 +116,11 @@ func newSelFooBar() *unversioned.LabelSelector {
|
||||
func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
|
||||
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
@@ -136,21 +137,21 @@ func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*pol
|
||||
return pdb, pdbName
|
||||
}
|
||||
|
||||
func newPod(t *testing.T, name string) (*api.Pod, string) {
|
||||
pod := &api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newPod(t *testing.T, name string) (*v1.Pod, string) {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Annotations: make(map[string]string),
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: api.PodSpec{},
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
{Type: api.PodReady, Status: api.ConditionTrue},
|
||||
Spec: v1.PodSpec{},
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -163,18 +164,18 @@ func newPod(t *testing.T, name string) (*api.Pod, string) {
|
||||
return pod, podName
|
||||
}
|
||||
|
||||
func newReplicationController(t *testing.T, size int32) (*api.ReplicationController, string) {
|
||||
rc := &api.ReplicationController{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: size,
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: &size,
|
||||
Selector: fooBar(),
|
||||
},
|
||||
}
|
||||
@@ -189,16 +190,16 @@ func newReplicationController(t *testing.T, size int32) (*api.ReplicationControl
|
||||
|
||||
func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
|
||||
d := &extensions.Deployment{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: size,
|
||||
Replicas: &size,
|
||||
Selector: newSelFooBar(),
|
||||
},
|
||||
}
|
||||
@@ -213,16 +214,16 @@ func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
|
||||
|
||||
func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
|
||||
rs := &extensions.ReplicaSet{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: size,
|
||||
Replicas: &size,
|
||||
Selector: newSelFooBar(),
|
||||
},
|
||||
}
|
||||
@@ -274,7 +275,7 @@ func TestUnavailable(t *testing.T) {
|
||||
dc.sync(pdbName)
|
||||
|
||||
// Add three pods, verifying that the counts go up at each step.
|
||||
pods := []*api.Pod{}
|
||||
pods := []*v1.Pod{}
|
||||
for i := int32(0); i < 4; i++ {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]unversioned.Time{})
|
||||
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
|
||||
@@ -285,7 +286,7 @@ func TestUnavailable(t *testing.T) {
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]unversioned.Time{})
|
||||
|
||||
// Now set one pod as unavailable
|
||||
pods[0].Status.Conditions = []api.PodCondition{}
|
||||
pods[0].Status.Conditions = []v1.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[0])
|
||||
dc.sync(pdbName)
|
||||
|
||||
@@ -387,7 +388,7 @@ func TestReplicationController(t *testing.T) {
|
||||
// about the RC. This is a known bug. TODO(mml): file issue
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{})
|
||||
|
||||
pods := []*api.Pod{}
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
for i := int32(0); i < 3; i++ {
|
||||
pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
|
||||
@@ -439,7 +440,7 @@ func TestTwoControllers(t *testing.T) {
|
||||
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{})
|
||||
|
||||
pods := []*api.Pod{}
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
unavailablePods := collectionSize - minimumOne - 1
|
||||
for i := int32(1); i <= collectionSize; i++ {
|
||||
@@ -447,7 +448,7 @@ func TestTwoControllers(t *testing.T) {
|
||||
pods = append(pods, pod)
|
||||
pod.Labels = rcLabels
|
||||
if i <= unavailablePods {
|
||||
pod.Status.Conditions = []api.PodCondition{}
|
||||
pod.Status.Conditions = []v1.PodCondition{}
|
||||
}
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
@@ -480,7 +481,7 @@ func TestTwoControllers(t *testing.T) {
|
||||
pods = append(pods, pod)
|
||||
pod.Labels = dLabels
|
||||
if i <= unavailablePods {
|
||||
pod.Status.Conditions = []api.PodCondition{}
|
||||
pod.Status.Conditions = []v1.PodCondition{}
|
||||
}
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
@@ -498,17 +499,17 @@ func TestTwoControllers(t *testing.T) {
|
||||
// but if we bring down two, it's not. Then we make the pod ready again and
|
||||
// verify that a disruption is permitted again.
|
||||
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{}
|
||||
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
pods[collectionSize-2].Status.Conditions = []api.PodCondition{}
|
||||
pods[collectionSize-2].Status.Conditions = []v1.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-2])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
|
||||
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
Reference in New Issue
Block a user