mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Add policy api version v1beta1 and disable v1alpha1
This commit is contained in:
parent
db68b906e1
commit
26acced6d8
@ -426,7 +426,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
|
||||
}
|
||||
}
|
||||
|
||||
groupVersion = "policy/v1alpha1"
|
||||
groupVersion = "policy/v1beta1"
|
||||
resources, found = resourceMap[groupVersion]
|
||||
glog.Infof("Attempting to start disruption controller, full resource map %+v", resourceMap)
|
||||
if containsVersion(versions, groupVersion) && found {
|
||||
|
@ -66,7 +66,7 @@ func New() *Generator {
|
||||
`+k8s.io/kubernetes/pkg/watch/versioned`,
|
||||
`k8s.io/kubernetes/pkg/api/unversioned`,
|
||||
`k8s.io/kubernetes/pkg/api/v1`,
|
||||
`k8s.io/kubernetes/pkg/apis/policy/v1alpha1`,
|
||||
`k8s.io/kubernetes/pkg/apis/policy/v1beta1`,
|
||||
`k8s.io/kubernetes/pkg/apis/extensions/v1beta1`,
|
||||
`k8s.io/kubernetes/pkg/apis/autoscaling/v1`,
|
||||
`k8s.io/kubernetes/pkg/apis/authorization/v1beta1`,
|
||||
|
@ -58,7 +58,7 @@ batch/v2alpha1 \
|
||||
certificates.k8s.io/v1alpha1 \
|
||||
extensions/v1beta1 \
|
||||
imagepolicy.k8s.io/v1alpha1 \
|
||||
policy/v1alpha1 \
|
||||
policy/v1beta1 \
|
||||
rbac.authorization.k8s.io/v1alpha1 \
|
||||
storage.k8s.io/v1beta1\
|
||||
}"
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@ -552,6 +553,10 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source)
|
||||
obj.APIPort = 20
|
||||
obj.DiscoveryPort = 20
|
||||
},
|
||||
func(s *policy.PodDisruptionBudgetStatus, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(s) // fuzz self without calling this function again
|
||||
s.PodDisruptionsAllowed = int32(c.Rand.Intn(2))
|
||||
},
|
||||
)
|
||||
return f
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ func TestKindForGroupVersionKinds(t *testing.T) {
|
||||
gvks := GroupVersions{
|
||||
GroupVersion{Group: "batch", Version: "v1"},
|
||||
GroupVersion{Group: "batch", Version: "v2alpha1"},
|
||||
GroupVersion{Group: "policy", Version: "v1alpha1"},
|
||||
GroupVersion{Group: "policy", Version: "v1beta1"},
|
||||
}
|
||||
cases := []struct {
|
||||
input []GroupVersionKind
|
||||
@ -170,8 +170,8 @@ func TestKindForGroupVersionKinds(t *testing.T) {
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
input: []GroupVersionKind{{Group: "policy", Version: "v1alpha1", Kind: "PodDisruptionBudget"}},
|
||||
target: GroupVersionKind{Group: "policy", Version: "v1alpha1", Kind: "PodDisruptionBudget"},
|
||||
input: []GroupVersionKind{{Group: "policy", Version: "v1beta1", Kind: "PodDisruptionBudget"}},
|
||||
target: GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodDisruptionBudget"},
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
|
@ -21,19 +21,19 @@ package install
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/apimachinery/announced"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if err := announced.NewGroupMetaFactory(
|
||||
&announced.GroupMetaFactoryArgs{
|
||||
GroupName: policy.GroupName,
|
||||
VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version},
|
||||
VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version},
|
||||
ImportPrefix: "k8s.io/kubernetes/pkg/apis/policy",
|
||||
AddInternalObjectsToScheme: policy.AddToScheme,
|
||||
},
|
||||
announced.VersionToSchemeFunc{
|
||||
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
|
||||
v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme,
|
||||
},
|
||||
).Announce().RegisterAndEnable(); err != nil {
|
||||
panic(err)
|
||||
|
@ -40,8 +40,8 @@ type PodDisruptionBudgetSpec struct {
|
||||
// PodDisruptionBudgetStatus represents information about the status of a
|
||||
// PodDisruptionBudget. Status may trail the actual state of a system.
|
||||
type PodDisruptionBudgetStatus struct {
|
||||
// Whether or not a disruption is currently allowed.
|
||||
PodDisruptionAllowed bool `json:"disruptionAllowed"`
|
||||
// Number of pod disruptions that are currently allowed.
|
||||
PodDisruptionsAllowed int32 `json:"disruptionsAllowed"`
|
||||
|
||||
// current number of healthy pods
|
||||
CurrentHealthy int32 `json:"currentHealthy"`
|
||||
|
50
pkg/apis/policy/v1beta1/register.go
Normal file
50
pkg/apis/policy/v1beta1/register.go
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "policy"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"}
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&PodDisruptionBudget{},
|
||||
&PodDisruptionBudgetList{},
|
||||
&Eviction{},
|
||||
&v1.ListOptions{},
|
||||
&v1.DeleteOptions{},
|
||||
&v1.ExportOptions{},
|
||||
)
|
||||
// Add the watch version that applies
|
||||
versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
@ -20,6 +20,7 @@ import (
|
||||
"reflect"
|
||||
|
||||
unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation"
|
||||
apivalidation "k8s.io/kubernetes/pkg/api/validation"
|
||||
extensionsvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/util/validation/field"
|
||||
@ -27,6 +28,7 @@ import (
|
||||
|
||||
func ValidatePodDisruptionBudget(pdb *policy.PodDisruptionBudget) field.ErrorList {
|
||||
allErrs := ValidatePodDisruptionBudgetSpec(pdb.Spec, field.NewPath("spec"))
|
||||
allErrs = append(allErrs, ValidatePodDisruptionBudgetStatus(pdb.Status, field.NewPath("status"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -39,6 +41,7 @@ func ValidatePodDisruptionBudgetUpdate(pdb, oldPdb *policy.PodDisruptionBudget)
|
||||
if !reflect.DeepEqual(pdb, oldPdb) {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to poddisruptionbudget spec are forbidden."))
|
||||
}
|
||||
allErrs = append(allErrs, ValidatePodDisruptionBudgetStatus(pdb.Status, field.NewPath("status"))...)
|
||||
|
||||
pdb.Generation = restoreGeneration
|
||||
return allErrs
|
||||
@ -53,3 +56,12 @@ func ValidatePodDisruptionBudgetSpec(spec policy.PodDisruptionBudgetSpec, fldPat
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidatePodDisruptionBudgetStatus(status policy.PodDisruptionBudgetStatus, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.PodDisruptionsAllowed), fldPath.Child("podDisruptionsAllowed"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentHealthy), fldPath.Child("currentHealthy"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredHealthy), fldPath.Child("desiredHealthy"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ExpectedPods), fldPath.Child("expectedPods"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
@ -60,3 +60,28 @@ func TestValidatePodDisruptionBudgetSpec(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodDisruptionBudgetStatus(t *testing.T) {
|
||||
successCases := []policy.PodDisruptionBudgetStatus{
|
||||
{PodDisruptionsAllowed: 10},
|
||||
{CurrentHealthy: 5},
|
||||
{DesiredHealthy: 3},
|
||||
{ExpectedPods: 2}}
|
||||
for _, c := range successCases {
|
||||
errors := ValidatePodDisruptionBudgetStatus(c, field.NewPath("status"))
|
||||
if len(errors) > 0 {
|
||||
t.Errorf("unexpected failure %v for %v", errors, c)
|
||||
}
|
||||
}
|
||||
failureCases := []policy.PodDisruptionBudgetStatus{
|
||||
{PodDisruptionsAllowed: -10},
|
||||
{CurrentHealthy: -5},
|
||||
{DesiredHealthy: -3},
|
||||
{ExpectedPods: -2}}
|
||||
for _, c := range failureCases {
|
||||
errors := ValidatePodDisruptionBudgetStatus(c, field.NewPath("status"))
|
||||
if len(errors) == 0 {
|
||||
t.Errorf("unexpected success for %v", c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -541,8 +541,8 @@ Pod:
|
||||
return
|
||||
}
|
||||
|
||||
// failSafe is an attempt to at least update the PodDisruptionAllowed field to
|
||||
// false if everything something else has failed. This is one place we
|
||||
// failSafe is an attempt to at least update the PodDisruptionsAllowed field to
|
||||
// 0 if everything else has failed. This is one place we
|
||||
// implement the "fail open" part of the design since if we manage to update
|
||||
// this field correctly, we will prevent the /evict handler from approving an
|
||||
// eviction when it may be unsafe to do so.
|
||||
@ -552,7 +552,7 @@ func (dc *DisruptionController) failSafe(pdb *policy.PodDisruptionBudget) error
|
||||
return err
|
||||
}
|
||||
newPdb := obj.(policy.PodDisruptionBudget)
|
||||
newPdb.Status.PodDisruptionAllowed = false
|
||||
newPdb.Status.PodDisruptionsAllowed = 0
|
||||
|
||||
return dc.getUpdater()(&newPdb)
|
||||
}
|
||||
@ -562,9 +562,12 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c
|
||||
// pods are in a safe state when their first pods appear but this controller
|
||||
// has not updated their status yet. This isn't the only race, but it's a
|
||||
// common one that's easy to detect.
|
||||
disruptionAllowed := currentHealthy-1 >= desiredHealthy && expectedCount > 0
|
||||
disruptionsAllowed := currentHealthy - desiredHealthy
|
||||
if expectedCount <= 0 || disruptionsAllowed <= 0 {
|
||||
disruptionsAllowed = 0
|
||||
}
|
||||
|
||||
if pdb.Status.CurrentHealthy == currentHealthy && pdb.Status.DesiredHealthy == desiredHealthy && pdb.Status.ExpectedPods == expectedCount && pdb.Status.PodDisruptionAllowed == disruptionAllowed {
|
||||
if pdb.Status.CurrentHealthy == currentHealthy && pdb.Status.DesiredHealthy == desiredHealthy && pdb.Status.ExpectedPods == expectedCount && pdb.Status.PodDisruptionsAllowed == disruptionsAllowed {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -575,10 +578,10 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c
|
||||
newPdb := obj.(policy.PodDisruptionBudget)
|
||||
|
||||
newPdb.Status = policy.PodDisruptionBudgetStatus{
|
||||
CurrentHealthy: currentHealthy,
|
||||
DesiredHealthy: desiredHealthy,
|
||||
ExpectedPods: expectedCount,
|
||||
PodDisruptionAllowed: disruptionAllowed,
|
||||
CurrentHealthy: currentHealthy,
|
||||
DesiredHealthy: desiredHealthy,
|
||||
ExpectedPods: expectedCount,
|
||||
PodDisruptionsAllowed: disruptionsAllowed,
|
||||
}
|
||||
|
||||
return dc.getUpdater()(&newPdb)
|
||||
|
@ -19,6 +19,7 @@ package disruption
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
@ -53,23 +54,25 @@ func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
|
||||
return (*ps)[key]
|
||||
}
|
||||
|
||||
func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionAllowed bool, currentHealthy, desiredHealthy, expectedPods int32) {
|
||||
func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32) {
|
||||
expectedStatus := policy.PodDisruptionBudgetStatus{
|
||||
PodDisruptionAllowed: disruptionAllowed,
|
||||
CurrentHealthy: currentHealthy,
|
||||
DesiredHealthy: desiredHealthy,
|
||||
ExpectedPods: expectedPods,
|
||||
PodDisruptionsAllowed: disruptionsAllowed,
|
||||
CurrentHealthy: currentHealthy,
|
||||
DesiredHealthy: desiredHealthy,
|
||||
ExpectedPods: expectedPods,
|
||||
}
|
||||
actualStatus := ps.Get(key).Status
|
||||
if !reflect.DeepEqual(actualStatus, expectedStatus) {
|
||||
debug.PrintStack()
|
||||
t.Fatalf("PDB %q status mismatch. Expected %+v but got %+v.", key, expectedStatus, actualStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionAllowed bool) {
|
||||
func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionsAllowed int32) {
|
||||
pdb := ps.Get(key)
|
||||
if pdb.Status.PodDisruptionAllowed != disruptionAllowed {
|
||||
t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionAllowed, pdb.Status.PodDisruptionAllowed)
|
||||
if pdb.Status.PodDisruptionsAllowed != disruptionsAllowed {
|
||||
debug.PrintStack()
|
||||
t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionsAllowed, pdb.Status.PodDisruptionsAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
@ -248,11 +251,11 @@ func TestNoSelector(t *testing.T) {
|
||||
|
||||
add(t, dc.pdbLister.Store, pdb)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, false, 0, 3, 0)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0)
|
||||
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, false, 0, 3, 0)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0)
|
||||
}
|
||||
|
||||
// Verify that available/expected counts go up as we add pods, then verify that
|
||||
@ -267,13 +270,13 @@ func TestUnavailable(t *testing.T) {
|
||||
// Add three pods, verifying that the counts go up at each step.
|
||||
pods := []*api.Pod{}
|
||||
for i := int32(0); i < 4; i++ {
|
||||
ps.VerifyPdbStatus(t, pdbName, false, i, 3, i)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i)
|
||||
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
|
||||
pods = append(pods, pod)
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
}
|
||||
ps.VerifyPdbStatus(t, pdbName, true, 4, 3, 4)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4)
|
||||
|
||||
// Now set one pod as unavailable
|
||||
pods[0].Status.Conditions = []api.PodCondition{}
|
||||
@ -281,7 +284,7 @@ func TestUnavailable(t *testing.T) {
|
||||
dc.sync(pdbName)
|
||||
|
||||
// Verify expected update
|
||||
ps.VerifyPdbStatus(t, pdbName, false, 3, 3, 4)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4)
|
||||
}
|
||||
|
||||
// Create a pod with no controller, and verify that a PDB with a percentage
|
||||
@ -293,13 +296,13 @@ func TestNakedPod(t *testing.T) {
|
||||
add(t, dc.pdbLister.Store, pdb)
|
||||
dc.sync(pdbName)
|
||||
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
|
||||
pod, _ := newPod(t, "naked")
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
}
|
||||
|
||||
// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
|
||||
@ -315,7 +318,7 @@ func TestReplicaSet(t *testing.T) {
|
||||
pod, _ := newPod(t, "pod")
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, false, 1, 2, 10)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10)
|
||||
}
|
||||
|
||||
// Verify that multiple controllers doesn't allow the PDB to be set true.
|
||||
@ -335,7 +338,7 @@ func TestMultipleControllers(t *testing.T) {
|
||||
dc.sync(pdbName)
|
||||
|
||||
// No controllers yet => no disruption allowed
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
|
||||
rc, _ := newReplicationController(t, 1)
|
||||
rc.Name = "rc 1"
|
||||
@ -343,7 +346,7 @@ func TestMultipleControllers(t *testing.T) {
|
||||
dc.sync(pdbName)
|
||||
|
||||
// One RC and 200%>1% healthy => disruption allowed
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, true)
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 1)
|
||||
|
||||
rc, _ = newReplicationController(t, 1)
|
||||
rc.Name = "rc 2"
|
||||
@ -351,7 +354,7 @@ func TestMultipleControllers(t *testing.T) {
|
||||
dc.sync(pdbName)
|
||||
|
||||
// 100%>1% healthy BUT two RCs => no disruption allowed
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
}
|
||||
|
||||
func TestReplicationController(t *testing.T) {
|
||||
@ -375,7 +378,7 @@ func TestReplicationController(t *testing.T) {
|
||||
dc.sync(pdbName)
|
||||
// It starts out at 0 expected because, with no pods, the PDB doesn't know
|
||||
// about the RC. This is a known bug. TODO(mml): file issue
|
||||
ps.VerifyPdbStatus(t, pdbName, false, 0, 0, 0)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0)
|
||||
|
||||
pods := []*api.Pod{}
|
||||
|
||||
@ -386,16 +389,16 @@ func TestReplicationController(t *testing.T) {
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
if i < 2 {
|
||||
ps.VerifyPdbStatus(t, pdbName, false, i+1, 2, 3)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3)
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, true, 3, 2, 3)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3)
|
||||
}
|
||||
}
|
||||
|
||||
rogue, _ := newPod(t, "rogue")
|
||||
add(t, dc.podLister.Indexer, rogue)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
}
|
||||
|
||||
func TestTwoControllers(t *testing.T) {
|
||||
@ -427,7 +430,7 @@ func TestTwoControllers(t *testing.T) {
|
||||
add(t, dc.rcLister.Indexer, rc)
|
||||
dc.sync(pdbName)
|
||||
|
||||
ps.VerifyPdbStatus(t, pdbName, false, 0, 0, 0)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0)
|
||||
|
||||
pods := []*api.Pod{}
|
||||
|
||||
@ -442,11 +445,11 @@ func TestTwoControllers(t *testing.T) {
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
if i <= unavailablePods {
|
||||
ps.VerifyPdbStatus(t, pdbName, false, 0, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize)
|
||||
} else if i-unavailablePods <= minimumOne {
|
||||
ps.VerifyPdbStatus(t, pdbName, false, i-unavailablePods, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize)
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, true, i-unavailablePods, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize)
|
||||
}
|
||||
}
|
||||
|
||||
@ -454,14 +457,14 @@ func TestTwoControllers(t *testing.T) {
|
||||
d.Spec.Selector = newSel(dLabels)
|
||||
add(t, dc.dLister.Indexer, d)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, true, minimumOne+1, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize)
|
||||
|
||||
rs, _ := newReplicaSet(t, collectionSize)
|
||||
rs.Spec.Selector = newSel(dLabels)
|
||||
rs.Labels = dLabels
|
||||
add(t, dc.rsLister.Indexer, rs)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, true, minimumOne+1, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize)
|
||||
|
||||
// By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
|
||||
unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
|
||||
@ -475,32 +478,33 @@ func TestTwoControllers(t *testing.T) {
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
if i <= unavailablePods {
|
||||
ps.VerifyPdbStatus(t, pdbName, false, minimumOne+1, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize)
|
||||
} else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
|
||||
ps.VerifyPdbStatus(t, pdbName, false, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, true, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, i-unavailablePods-(minimumTwo-(minimumOne+1)),
|
||||
(minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Now we verify we can bring down 1 pod and a disruption is still permitted,
|
||||
// but if we bring down two, it's not. Then we make the pod ready again and
|
||||
// verify that a disruption is permitted again.
|
||||
ps.VerifyPdbStatus(t, pdbName, true, 2+minimumTwo, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize)
|
||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, true, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||
|
||||
pods[collectionSize-2].Status.Conditions = []api.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-2])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, false, minimumTwo, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize)
|
||||
|
||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, true, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||
}
|
||||
|
||||
// Test pdb doesn't exist
|
||||
|
@ -32,7 +32,7 @@ import (
|
||||
batchapiv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
certificatesapiv1alpha1 "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
extensionsapiv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policyapiv1alpha1 "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
||||
policyapiv1beta1 "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
rbacapi "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
storageapiv1beta1 "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
@ -347,7 +347,7 @@ func DefaultAPIResourceConfigSource() *genericapiserver.ResourceConfig {
|
||||
authenticationv1beta1.SchemeGroupVersion,
|
||||
autoscalingapiv1.SchemeGroupVersion,
|
||||
appsapi.SchemeGroupVersion,
|
||||
policyapiv1alpha1.SchemeGroupVersion,
|
||||
policyapiv1beta1.SchemeGroupVersion,
|
||||
rbacapi.SchemeGroupVersion,
|
||||
storageapiv1beta1.SchemeGroupVersion,
|
||||
certificatesapiv1alpha1.SchemeGroupVersion,
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/rest"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
@ -103,11 +105,13 @@ func (r *EvictionREST) Create(ctx api.Context, obj runtime.Object) (runtime.Obje
|
||||
}
|
||||
|
||||
func (r *EvictionREST) checkAndDecrement(namespace string, pdb policy.PodDisruptionBudget) (ok bool, err error) {
|
||||
if !pdb.Status.PodDisruptionAllowed {
|
||||
if pdb.Status.PodDisruptionsAllowed < 0 {
|
||||
return false, fmt.Errorf("pdb disruptions allowed is negative")
|
||||
}
|
||||
if pdb.Status.PodDisruptionsAllowed == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pdb.Status.PodDisruptionAllowed = false
|
||||
pdb.Status.PodDisruptionsAllowed--
|
||||
if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(&pdb); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ func (c LegacyRESTStorageProvider) NewLegacyRESTStorage(restOptionsGetter generi
|
||||
}
|
||||
|
||||
var podDisruptionClient policyclient.PodDisruptionBudgetsGetter
|
||||
if policyGroupVersion := (unversioned.GroupVersion{Group: "policy", Version: "v1alpha1"}); registered.IsEnabledVersion(policyGroupVersion) {
|
||||
if policyGroupVersion := (unversioned.GroupVersion{Group: "policy", Version: "v1beta1"}); registered.IsEnabledVersion(policyGroupVersion) {
|
||||
apiGroupInfo.SubresourceGroupVersionKind["pods/eviction"] = policyGroupVersion.WithKind("Eviction")
|
||||
|
||||
var err error
|
||||
@ -234,7 +234,7 @@ func (c LegacyRESTStorageProvider) NewLegacyRESTStorage(restOptionsGetter generi
|
||||
if registered.IsEnabledVersion(unversioned.GroupVersion{Group: "autoscaling", Version: "v1"}) {
|
||||
restStorageMap["replicationControllers/scale"] = controllerStorage.Scale
|
||||
}
|
||||
if registered.IsEnabledVersion(unversioned.GroupVersion{Group: "policy", Version: "v1alpha1"}) {
|
||||
if registered.IsEnabledVersion(unversioned.GroupVersion{Group: "policy", Version: "v1beta1"}) {
|
||||
restStorageMap["pods/eviction"] = podStorage.Eviction
|
||||
}
|
||||
apiGroupInfo.VersionedResourcesStorageMap["v1"] = restStorageMap
|
||||
|
@ -53,10 +53,10 @@ func TestPodDisruptionBudgetStrategy(t *testing.T) {
|
||||
ObjectMeta: api.ObjectMeta{Name: pdb.Name, Namespace: pdb.Namespace},
|
||||
Spec: pdb.Spec,
|
||||
Status: policy.PodDisruptionBudgetStatus{
|
||||
PodDisruptionAllowed: true,
|
||||
CurrentHealthy: 3,
|
||||
DesiredHealthy: 3,
|
||||
ExpectedPods: 3,
|
||||
PodDisruptionsAllowed: 1,
|
||||
CurrentHealthy: 3,
|
||||
DesiredHealthy: 3,
|
||||
ExpectedPods: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@ -101,10 +101,10 @@ func TestPodDisruptionBudgetStatusStrategy(t *testing.T) {
|
||||
MinAvailable: intstr.FromInt(3),
|
||||
},
|
||||
Status: policy.PodDisruptionBudgetStatus{
|
||||
PodDisruptionAllowed: true,
|
||||
CurrentHealthy: 3,
|
||||
DesiredHealthy: 3,
|
||||
ExpectedPods: 3,
|
||||
PodDisruptionsAllowed: 1,
|
||||
CurrentHealthy: 3,
|
||||
DesiredHealthy: 3,
|
||||
ExpectedPods: 3,
|
||||
},
|
||||
}
|
||||
newPdb := &policy.PodDisruptionBudget{
|
||||
@ -114,10 +114,10 @@ func TestPodDisruptionBudgetStatusStrategy(t *testing.T) {
|
||||
MinAvailable: intstr.FromInt(2),
|
||||
},
|
||||
Status: policy.PodDisruptionBudgetStatus{
|
||||
PodDisruptionAllowed: false,
|
||||
CurrentHealthy: 2,
|
||||
DesiredHealthy: 3,
|
||||
ExpectedPods: 3,
|
||||
PodDisruptionsAllowed: 0,
|
||||
CurrentHealthy: 2,
|
||||
DesiredHealthy: 3,
|
||||
ExpectedPods: 3,
|
||||
},
|
||||
}
|
||||
StatusStrategy.PrepareForUpdate(ctx, newPdb, oldPdb)
|
||||
|
@ -19,7 +19,7 @@ package rest
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
policyapiv1alpha1 "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
||||
policyapiv1beta1 "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
poddisruptionbudgetetcd "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/etcd"
|
||||
)
|
||||
@ -31,17 +31,15 @@ var _ genericapiserver.RESTStorageProvider = &RESTStorageProvider{}
|
||||
func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter genericapiserver.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) {
|
||||
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(policy.GroupName)
|
||||
|
||||
if apiResourceConfigSource.AnyResourcesForVersionEnabled(policyapiv1alpha1.SchemeGroupVersion) {
|
||||
apiGroupInfo.VersionedResourcesStorageMap[policyapiv1alpha1.SchemeGroupVersion.Version] = p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter)
|
||||
apiGroupInfo.GroupMeta.GroupVersion = policyapiv1alpha1.SchemeGroupVersion
|
||||
if apiResourceConfigSource.AnyResourcesForVersionEnabled(policyapiv1beta1.SchemeGroupVersion) {
|
||||
apiGroupInfo.VersionedResourcesStorageMap[policyapiv1beta1.SchemeGroupVersion.Version] = p.v1beta1Storage(apiResourceConfigSource, restOptionsGetter)
|
||||
apiGroupInfo.GroupMeta.GroupVersion = policyapiv1beta1.SchemeGroupVersion
|
||||
}
|
||||
|
||||
return apiGroupInfo, true
|
||||
}
|
||||
|
||||
func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter genericapiserver.RESTOptionsGetter) map[string]rest.Storage {
|
||||
version := policyapiv1alpha1.SchemeGroupVersion
|
||||
|
||||
func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter genericapiserver.RESTOptionsGetter) map[string]rest.Storage {
|
||||
version := policyapiv1beta1.SchemeGroupVersion
|
||||
storage := map[string]rest.Storage{}
|
||||
if apiResourceConfigSource.ResourceEnabled(version.WithResource("poddisruptionbudgets")) {
|
||||
poddisruptionbudgetStorage, poddisruptionbudgetStatusStorage := poddisruptionbudgetetcd.NewREST(restOptionsGetter(policy.Resource("poddisruptionbudgets")))
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/client-go/pkg/api/unversioned"
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
policy "k8s.io/client-go/pkg/apis/policy/v1alpha1"
|
||||
policy "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/client-go/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
Loading…
Reference in New Issue
Block a user