Merge pull request #99290 from mortent/PromotePDBsToGA

Promote PodDisruptionBudget to policy/v1
This commit is contained in:
Kubernetes Prow Robot
2021-03-09 11:05:05 -08:00
committed by GitHub
79 changed files with 6397 additions and 245 deletions

View File

@@ -25,7 +25,7 @@ import (
apps "k8s.io/api/apps/v1beta1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
policy "k8s.io/api/policy/v1beta1"
policy "k8s.io/api/policy/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
@@ -38,13 +38,13 @@ import (
"k8s.io/client-go/discovery"
appsv1informers "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
policyinformers "k8s.io/client-go/informers/policy/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
appsv1listers "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
policylisters "k8s.io/client-go/listers/policy/v1beta1"
policylisters "k8s.io/client-go/listers/policy/v1"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
@@ -506,9 +506,6 @@ func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) (
if err != nil {
return []*v1.Pod{}, err
}
if sel.Empty() {
return []*v1.Pod{}, nil
}
pods, err := dc.podLister.Pods(pdb.Namespace).List(sel)
if err != nil {
return []*v1.Pod{}, err
@@ -835,6 +832,6 @@ func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget,
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
// If this update fails, don't retry it. Allow the failure to get handled &
// retried in `processNextWorkItem()`.
_, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb, metav1.UpdateOptions{})
_, err := dc.kubeClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb, metav1.UpdateOptions{})
return err
}

View File

@@ -29,7 +29,7 @@ import (
apps "k8s.io/api/apps/v1"
autoscalingapi "k8s.io/api/autoscaling/v1"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
policy "k8s.io/api/policy/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
@@ -151,7 +151,7 @@ func newFakeDisruptionController() (*disruptionController, *pdbStates) {
dc := NewDisruptionController(
informerFactory.Core().V1().Pods(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
informerFactory.Policy().V1().PodDisruptionBudgets(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Apps().V1().ReplicaSets(),
informerFactory.Apps().V1().Deployments(),
@@ -175,7 +175,7 @@ func newFakeDisruptionController() (*disruptionController, *pdbStates) {
return &disruptionController{
dc,
informerFactory.Core().V1().Pods().Informer().GetStore(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Informer().GetStore(),
informerFactory.Policy().V1().PodDisruptionBudgets().Informer().GetStore(),
informerFactory.Core().V1().ReplicationControllers().Informer().GetStore(),
informerFactory.Apps().V1().ReplicaSets().Informer().GetStore(),
informerFactory.Apps().V1().Deployments().Informer().GetStore(),
@@ -402,7 +402,7 @@ func add(t *testing.T, store cache.Store, obj interface{}) {
}
}
// Create one with no selector. Verify it matches 0 pods.
// Create one with no selector. Verify it matches all pods
func TestNoSelector(t *testing.T) {
dc, ps := newFakeDisruptionController()
@@ -416,7 +416,7 @@ func TestNoSelector(t *testing.T) {
add(t, dc.podStore, pod)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
ps.VerifyPdbStatus(t, pdbName, 0, 1, 3, 1, map[string]metav1.Time{})
}
// Verify that available/expected counts go up as we add pods, then verify that
@@ -1151,7 +1151,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
// Create a PDB and 3 pods that match it.
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb, metav1.CreateOptions{})
pdb, err := dc.coreClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create PDB: %v", err)
}
@@ -1186,7 +1186,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
// These GVRs are copied from the generated fake code because they are not exported.
var (
podsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"}
poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1", Resource: "poddisruptionbudgets"}
)
// Bypass the coreClient.Fake and write directly to the ObjectTracker, because
@@ -1248,7 +1248,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
// (C) Whether or not sync() returned an error, the PDB status should reflect
// the evictions that took place.
finalPDB, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets("default").Get(context.TODO(), pdb.Name, metav1.GetOptions{})
finalPDB, err := dc.coreClient.PolicyV1().PodDisruptionBudgets("default").Get(context.TODO(), pdb.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get PDB: %v", err)
}