mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 07:20:13 +00:00
Add policy api version v1beta1 and disable v1alpha1
This commit is contained in:
parent
db68b906e1
commit
26acced6d8
@ -426,7 +426,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
groupVersion = "policy/v1alpha1"
|
groupVersion = "policy/v1beta1"
|
||||||
resources, found = resourceMap[groupVersion]
|
resources, found = resourceMap[groupVersion]
|
||||||
glog.Infof("Attempting to start disruption controller, full resource map %+v", resourceMap)
|
glog.Infof("Attempting to start disruption controller, full resource map %+v", resourceMap)
|
||||||
if containsVersion(versions, groupVersion) && found {
|
if containsVersion(versions, groupVersion) && found {
|
||||||
|
@ -66,7 +66,7 @@ func New() *Generator {
|
|||||||
`+k8s.io/kubernetes/pkg/watch/versioned`,
|
`+k8s.io/kubernetes/pkg/watch/versioned`,
|
||||||
`k8s.io/kubernetes/pkg/api/unversioned`,
|
`k8s.io/kubernetes/pkg/api/unversioned`,
|
||||||
`k8s.io/kubernetes/pkg/api/v1`,
|
`k8s.io/kubernetes/pkg/api/v1`,
|
||||||
`k8s.io/kubernetes/pkg/apis/policy/v1alpha1`,
|
`k8s.io/kubernetes/pkg/apis/policy/v1beta1`,
|
||||||
`k8s.io/kubernetes/pkg/apis/extensions/v1beta1`,
|
`k8s.io/kubernetes/pkg/apis/extensions/v1beta1`,
|
||||||
`k8s.io/kubernetes/pkg/apis/autoscaling/v1`,
|
`k8s.io/kubernetes/pkg/apis/autoscaling/v1`,
|
||||||
`k8s.io/kubernetes/pkg/apis/authorization/v1beta1`,
|
`k8s.io/kubernetes/pkg/apis/authorization/v1beta1`,
|
||||||
|
@ -58,7 +58,7 @@ batch/v2alpha1 \
|
|||||||
certificates.k8s.io/v1alpha1 \
|
certificates.k8s.io/v1alpha1 \
|
||||||
extensions/v1beta1 \
|
extensions/v1beta1 \
|
||||||
imagepolicy.k8s.io/v1alpha1 \
|
imagepolicy.k8s.io/v1alpha1 \
|
||||||
policy/v1alpha1 \
|
policy/v1beta1 \
|
||||||
rbac.authorization.k8s.io/v1alpha1 \
|
rbac.authorization.k8s.io/v1alpha1 \
|
||||||
storage.k8s.io/v1beta1\
|
storage.k8s.io/v1beta1\
|
||||||
}"
|
}"
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||||
"k8s.io/kubernetes/pkg/apis/batch"
|
"k8s.io/kubernetes/pkg/apis/batch"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/policy"
|
||||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
@ -552,6 +553,10 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source)
|
|||||||
obj.APIPort = 20
|
obj.APIPort = 20
|
||||||
obj.DiscoveryPort = 20
|
obj.DiscoveryPort = 20
|
||||||
},
|
},
|
||||||
|
func(s *policy.PodDisruptionBudgetStatus, c fuzz.Continue) {
|
||||||
|
c.FuzzNoCustom(s) // fuzz self without calling this function again
|
||||||
|
s.PodDisruptionsAllowed = int32(c.Rand.Intn(2))
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
@ -152,7 +152,7 @@ func TestKindForGroupVersionKinds(t *testing.T) {
|
|||||||
gvks := GroupVersions{
|
gvks := GroupVersions{
|
||||||
GroupVersion{Group: "batch", Version: "v1"},
|
GroupVersion{Group: "batch", Version: "v1"},
|
||||||
GroupVersion{Group: "batch", Version: "v2alpha1"},
|
GroupVersion{Group: "batch", Version: "v2alpha1"},
|
||||||
GroupVersion{Group: "policy", Version: "v1alpha1"},
|
GroupVersion{Group: "policy", Version: "v1beta1"},
|
||||||
}
|
}
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
input []GroupVersionKind
|
input []GroupVersionKind
|
||||||
@ -170,8 +170,8 @@ func TestKindForGroupVersionKinds(t *testing.T) {
|
|||||||
ok: true,
|
ok: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []GroupVersionKind{{Group: "policy", Version: "v1alpha1", Kind: "PodDisruptionBudget"}},
|
input: []GroupVersionKind{{Group: "policy", Version: "v1beta1", Kind: "PodDisruptionBudget"}},
|
||||||
target: GroupVersionKind{Group: "policy", Version: "v1alpha1", Kind: "PodDisruptionBudget"},
|
target: GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodDisruptionBudget"},
|
||||||
ok: true,
|
ok: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -21,19 +21,19 @@ package install
|
|||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/announced"
|
"k8s.io/kubernetes/pkg/apimachinery/announced"
|
||||||
"k8s.io/kubernetes/pkg/apis/policy"
|
"k8s.io/kubernetes/pkg/apis/policy"
|
||||||
"k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
"k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if err := announced.NewGroupMetaFactory(
|
if err := announced.NewGroupMetaFactory(
|
||||||
&announced.GroupMetaFactoryArgs{
|
&announced.GroupMetaFactoryArgs{
|
||||||
GroupName: policy.GroupName,
|
GroupName: policy.GroupName,
|
||||||
VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version},
|
VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version},
|
||||||
ImportPrefix: "k8s.io/kubernetes/pkg/apis/policy",
|
ImportPrefix: "k8s.io/kubernetes/pkg/apis/policy",
|
||||||
AddInternalObjectsToScheme: policy.AddToScheme,
|
AddInternalObjectsToScheme: policy.AddToScheme,
|
||||||
},
|
},
|
||||||
announced.VersionToSchemeFunc{
|
announced.VersionToSchemeFunc{
|
||||||
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
|
v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme,
|
||||||
},
|
},
|
||||||
).Announce().RegisterAndEnable(); err != nil {
|
).Announce().RegisterAndEnable(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -40,8 +40,8 @@ type PodDisruptionBudgetSpec struct {
|
|||||||
// PodDisruptionBudgetStatus represents information about the status of a
|
// PodDisruptionBudgetStatus represents information about the status of a
|
||||||
// PodDisruptionBudget. Status may trail the actual state of a system.
|
// PodDisruptionBudget. Status may trail the actual state of a system.
|
||||||
type PodDisruptionBudgetStatus struct {
|
type PodDisruptionBudgetStatus struct {
|
||||||
// Whether or not a disruption is currently allowed.
|
// Number of pod disruptions that are currently allowed.
|
||||||
PodDisruptionAllowed bool `json:"disruptionAllowed"`
|
PodDisruptionsAllowed int32 `json:"disruptionsAllowed"`
|
||||||
|
|
||||||
// current number of healthy pods
|
// current number of healthy pods
|
||||||
CurrentHealthy int32 `json:"currentHealthy"`
|
CurrentHealthy int32 `json:"currentHealthy"`
|
||||||
|
50
pkg/apis/policy/v1beta1/register.go
Normal file
50
pkg/apis/policy/v1beta1/register.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1beta1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
versionedwatch "k8s.io/kubernetes/pkg/watch/versioned"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GroupName is the group name use in this package
|
||||||
|
const GroupName = "policy"
|
||||||
|
|
||||||
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
|
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"}
|
||||||
|
|
||||||
|
var (
|
||||||
|
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||||
|
AddToScheme = SchemeBuilder.AddToScheme
|
||||||
|
)
|
||||||
|
|
||||||
|
// Adds the list of known types to api.Scheme.
|
||||||
|
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||||
|
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||||
|
&PodDisruptionBudget{},
|
||||||
|
&PodDisruptionBudgetList{},
|
||||||
|
&Eviction{},
|
||||||
|
&v1.ListOptions{},
|
||||||
|
&v1.DeleteOptions{},
|
||||||
|
&v1.ExportOptions{},
|
||||||
|
)
|
||||||
|
// Add the watch version that applies
|
||||||
|
versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||||
|
return nil
|
||||||
|
}
|
@ -20,6 +20,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation"
|
unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation"
|
||||||
|
apivalidation "k8s.io/kubernetes/pkg/api/validation"
|
||||||
extensionsvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation"
|
extensionsvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation"
|
||||||
"k8s.io/kubernetes/pkg/apis/policy"
|
"k8s.io/kubernetes/pkg/apis/policy"
|
||||||
"k8s.io/kubernetes/pkg/util/validation/field"
|
"k8s.io/kubernetes/pkg/util/validation/field"
|
||||||
@ -27,6 +28,7 @@ import (
|
|||||||
|
|
||||||
func ValidatePodDisruptionBudget(pdb *policy.PodDisruptionBudget) field.ErrorList {
|
func ValidatePodDisruptionBudget(pdb *policy.PodDisruptionBudget) field.ErrorList {
|
||||||
allErrs := ValidatePodDisruptionBudgetSpec(pdb.Spec, field.NewPath("spec"))
|
allErrs := ValidatePodDisruptionBudgetSpec(pdb.Spec, field.NewPath("spec"))
|
||||||
|
allErrs = append(allErrs, ValidatePodDisruptionBudgetStatus(pdb.Status, field.NewPath("status"))...)
|
||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,6 +41,7 @@ func ValidatePodDisruptionBudgetUpdate(pdb, oldPdb *policy.PodDisruptionBudget)
|
|||||||
if !reflect.DeepEqual(pdb, oldPdb) {
|
if !reflect.DeepEqual(pdb, oldPdb) {
|
||||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to poddisruptionbudget spec are forbidden."))
|
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to poddisruptionbudget spec are forbidden."))
|
||||||
}
|
}
|
||||||
|
allErrs = append(allErrs, ValidatePodDisruptionBudgetStatus(pdb.Status, field.NewPath("status"))...)
|
||||||
|
|
||||||
pdb.Generation = restoreGeneration
|
pdb.Generation = restoreGeneration
|
||||||
return allErrs
|
return allErrs
|
||||||
@ -53,3 +56,12 @@ func ValidatePodDisruptionBudgetSpec(spec policy.PodDisruptionBudgetSpec, fldPat
|
|||||||
|
|
||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ValidatePodDisruptionBudgetStatus(status policy.PodDisruptionBudgetStatus, fldPath *field.Path) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.PodDisruptionsAllowed), fldPath.Child("podDisruptionsAllowed"))...)
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentHealthy), fldPath.Child("currentHealthy"))...)
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredHealthy), fldPath.Child("desiredHealthy"))...)
|
||||||
|
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ExpectedPods), fldPath.Child("expectedPods"))...)
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
@ -60,3 +60,28 @@ func TestValidatePodDisruptionBudgetSpec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidatePodDisruptionBudgetStatus(t *testing.T) {
|
||||||
|
successCases := []policy.PodDisruptionBudgetStatus{
|
||||||
|
{PodDisruptionsAllowed: 10},
|
||||||
|
{CurrentHealthy: 5},
|
||||||
|
{DesiredHealthy: 3},
|
||||||
|
{ExpectedPods: 2}}
|
||||||
|
for _, c := range successCases {
|
||||||
|
errors := ValidatePodDisruptionBudgetStatus(c, field.NewPath("status"))
|
||||||
|
if len(errors) > 0 {
|
||||||
|
t.Errorf("unexpected failure %v for %v", errors, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
failureCases := []policy.PodDisruptionBudgetStatus{
|
||||||
|
{PodDisruptionsAllowed: -10},
|
||||||
|
{CurrentHealthy: -5},
|
||||||
|
{DesiredHealthy: -3},
|
||||||
|
{ExpectedPods: -2}}
|
||||||
|
for _, c := range failureCases {
|
||||||
|
errors := ValidatePodDisruptionBudgetStatus(c, field.NewPath("status"))
|
||||||
|
if len(errors) == 0 {
|
||||||
|
t.Errorf("unexpected success for %v", c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -541,8 +541,8 @@ Pod:
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// failSafe is an attempt to at least update the PodDisruptionAllowed field to
|
// failSafe is an attempt to at least update the PodDisruptionsAllowed field to
|
||||||
// false if everything something else has failed. This is one place we
|
// 0 if everything else has failed. This is one place we
|
||||||
// implement the "fail open" part of the design since if we manage to update
|
// implement the "fail open" part of the design since if we manage to update
|
||||||
// this field correctly, we will prevent the /evict handler from approving an
|
// this field correctly, we will prevent the /evict handler from approving an
|
||||||
// eviction when it may be unsafe to do so.
|
// eviction when it may be unsafe to do so.
|
||||||
@ -552,7 +552,7 @@ func (dc *DisruptionController) failSafe(pdb *policy.PodDisruptionBudget) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
newPdb := obj.(policy.PodDisruptionBudget)
|
newPdb := obj.(policy.PodDisruptionBudget)
|
||||||
newPdb.Status.PodDisruptionAllowed = false
|
newPdb.Status.PodDisruptionsAllowed = 0
|
||||||
|
|
||||||
return dc.getUpdater()(&newPdb)
|
return dc.getUpdater()(&newPdb)
|
||||||
}
|
}
|
||||||
@ -562,9 +562,12 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c
|
|||||||
// pods are in a safe state when their first pods appear but this controller
|
// pods are in a safe state when their first pods appear but this controller
|
||||||
// has not updated their status yet. This isn't the only race, but it's a
|
// has not updated their status yet. This isn't the only race, but it's a
|
||||||
// common one that's easy to detect.
|
// common one that's easy to detect.
|
||||||
disruptionAllowed := currentHealthy-1 >= desiredHealthy && expectedCount > 0
|
disruptionsAllowed := currentHealthy - desiredHealthy
|
||||||
|
if expectedCount <= 0 || disruptionsAllowed <= 0 {
|
||||||
|
disruptionsAllowed = 0
|
||||||
|
}
|
||||||
|
|
||||||
if pdb.Status.CurrentHealthy == currentHealthy && pdb.Status.DesiredHealthy == desiredHealthy && pdb.Status.ExpectedPods == expectedCount && pdb.Status.PodDisruptionAllowed == disruptionAllowed {
|
if pdb.Status.CurrentHealthy == currentHealthy && pdb.Status.DesiredHealthy == desiredHealthy && pdb.Status.ExpectedPods == expectedCount && pdb.Status.PodDisruptionsAllowed == disruptionsAllowed {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -575,10 +578,10 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c
|
|||||||
newPdb := obj.(policy.PodDisruptionBudget)
|
newPdb := obj.(policy.PodDisruptionBudget)
|
||||||
|
|
||||||
newPdb.Status = policy.PodDisruptionBudgetStatus{
|
newPdb.Status = policy.PodDisruptionBudgetStatus{
|
||||||
CurrentHealthy: currentHealthy,
|
CurrentHealthy: currentHealthy,
|
||||||
DesiredHealthy: desiredHealthy,
|
DesiredHealthy: desiredHealthy,
|
||||||
ExpectedPods: expectedCount,
|
ExpectedPods: expectedCount,
|
||||||
PodDisruptionAllowed: disruptionAllowed,
|
PodDisruptionsAllowed: disruptionsAllowed,
|
||||||
}
|
}
|
||||||
|
|
||||||
return dc.getUpdater()(&newPdb)
|
return dc.getUpdater()(&newPdb)
|
||||||
|
@ -19,6 +19,7 @@ package disruption
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"runtime/debug"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
@ -53,23 +54,25 @@ func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
|
|||||||
return (*ps)[key]
|
return (*ps)[key]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionAllowed bool, currentHealthy, desiredHealthy, expectedPods int32) {
|
func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32) {
|
||||||
expectedStatus := policy.PodDisruptionBudgetStatus{
|
expectedStatus := policy.PodDisruptionBudgetStatus{
|
||||||
PodDisruptionAllowed: disruptionAllowed,
|
PodDisruptionsAllowed: disruptionsAllowed,
|
||||||
CurrentHealthy: currentHealthy,
|
CurrentHealthy: currentHealthy,
|
||||||
DesiredHealthy: desiredHealthy,
|
DesiredHealthy: desiredHealthy,
|
||||||
ExpectedPods: expectedPods,
|
ExpectedPods: expectedPods,
|
||||||
}
|
}
|
||||||
actualStatus := ps.Get(key).Status
|
actualStatus := ps.Get(key).Status
|
||||||
if !reflect.DeepEqual(actualStatus, expectedStatus) {
|
if !reflect.DeepEqual(actualStatus, expectedStatus) {
|
||||||
|
debug.PrintStack()
|
||||||
t.Fatalf("PDB %q status mismatch. Expected %+v but got %+v.", key, expectedStatus, actualStatus)
|
t.Fatalf("PDB %q status mismatch. Expected %+v but got %+v.", key, expectedStatus, actualStatus)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionAllowed bool) {
|
func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionsAllowed int32) {
|
||||||
pdb := ps.Get(key)
|
pdb := ps.Get(key)
|
||||||
if pdb.Status.PodDisruptionAllowed != disruptionAllowed {
|
if pdb.Status.PodDisruptionsAllowed != disruptionsAllowed {
|
||||||
t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionAllowed, pdb.Status.PodDisruptionAllowed)
|
debug.PrintStack()
|
||||||
|
t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionsAllowed, pdb.Status.PodDisruptionsAllowed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,11 +251,11 @@ func TestNoSelector(t *testing.T) {
|
|||||||
|
|
||||||
add(t, dc.pdbLister.Store, pdb)
|
add(t, dc.pdbLister.Store, pdb)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 0, 3, 0)
|
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0)
|
||||||
|
|
||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 0, 3, 0)
|
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that available/expected counts go up as we add pods, then verify that
|
// Verify that available/expected counts go up as we add pods, then verify that
|
||||||
@ -267,13 +270,13 @@ func TestUnavailable(t *testing.T) {
|
|||||||
// Add three pods, verifying that the counts go up at each step.
|
// Add three pods, verifying that the counts go up at each step.
|
||||||
pods := []*api.Pod{}
|
pods := []*api.Pod{}
|
||||||
for i := int32(0); i < 4; i++ {
|
for i := int32(0); i < 4; i++ {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, i, 3, i)
|
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i)
|
||||||
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
|
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
|
||||||
pods = append(pods, pod)
|
pods = append(pods, pod)
|
||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
}
|
}
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 4, 3, 4)
|
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4)
|
||||||
|
|
||||||
// Now set one pod as unavailable
|
// Now set one pod as unavailable
|
||||||
pods[0].Status.Conditions = []api.PodCondition{}
|
pods[0].Status.Conditions = []api.PodCondition{}
|
||||||
@ -281,7 +284,7 @@ func TestUnavailable(t *testing.T) {
|
|||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
|
|
||||||
// Verify expected update
|
// Verify expected update
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 3, 3, 4)
|
ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a pod with no controller, and verify that a PDB with a percentage
|
// Create a pod with no controller, and verify that a PDB with a percentage
|
||||||
@ -293,13 +296,13 @@ func TestNakedPod(t *testing.T) {
|
|||||||
add(t, dc.pdbLister.Store, pdb)
|
add(t, dc.pdbLister.Store, pdb)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
|
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
|
||||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||||
|
|
||||||
pod, _ := newPod(t, "naked")
|
pod, _ := newPod(t, "naked")
|
||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
|
|
||||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
|
// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
|
||||||
@ -315,7 +318,7 @@ func TestReplicaSet(t *testing.T) {
|
|||||||
pod, _ := newPod(t, "pod")
|
pod, _ := newPod(t, "pod")
|
||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 1, 2, 10)
|
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that multiple controllers doesn't allow the PDB to be set true.
|
// Verify that multiple controllers doesn't allow the PDB to be set true.
|
||||||
@ -335,7 +338,7 @@ func TestMultipleControllers(t *testing.T) {
|
|||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
|
|
||||||
// No controllers yet => no disruption allowed
|
// No controllers yet => no disruption allowed
|
||||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||||
|
|
||||||
rc, _ := newReplicationController(t, 1)
|
rc, _ := newReplicationController(t, 1)
|
||||||
rc.Name = "rc 1"
|
rc.Name = "rc 1"
|
||||||
@ -343,7 +346,7 @@ func TestMultipleControllers(t *testing.T) {
|
|||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
|
|
||||||
// One RC and 200%>1% healthy => disruption allowed
|
// One RC and 200%>1% healthy => disruption allowed
|
||||||
ps.VerifyDisruptionAllowed(t, pdbName, true)
|
ps.VerifyDisruptionAllowed(t, pdbName, 1)
|
||||||
|
|
||||||
rc, _ = newReplicationController(t, 1)
|
rc, _ = newReplicationController(t, 1)
|
||||||
rc.Name = "rc 2"
|
rc.Name = "rc 2"
|
||||||
@ -351,7 +354,7 @@ func TestMultipleControllers(t *testing.T) {
|
|||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
|
|
||||||
// 100%>1% healthy BUT two RCs => no disruption allowed
|
// 100%>1% healthy BUT two RCs => no disruption allowed
|
||||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReplicationController(t *testing.T) {
|
func TestReplicationController(t *testing.T) {
|
||||||
@ -375,7 +378,7 @@ func TestReplicationController(t *testing.T) {
|
|||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
// It starts out at 0 expected because, with no pods, the PDB doesn't know
|
// It starts out at 0 expected because, with no pods, the PDB doesn't know
|
||||||
// about the RC. This is a known bug. TODO(mml): file issue
|
// about the RC. This is a known bug. TODO(mml): file issue
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 0, 0, 0)
|
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0)
|
||||||
|
|
||||||
pods := []*api.Pod{}
|
pods := []*api.Pod{}
|
||||||
|
|
||||||
@ -386,16 +389,16 @@ func TestReplicationController(t *testing.T) {
|
|||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
if i < 2 {
|
if i < 2 {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, i+1, 2, 3)
|
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3)
|
||||||
} else {
|
} else {
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 3, 2, 3)
|
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rogue, _ := newPod(t, "rogue")
|
rogue, _ := newPod(t, "rogue")
|
||||||
add(t, dc.podLister.Indexer, rogue)
|
add(t, dc.podLister.Indexer, rogue)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyDisruptionAllowed(t, pdbName, false)
|
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTwoControllers(t *testing.T) {
|
func TestTwoControllers(t *testing.T) {
|
||||||
@ -427,7 +430,7 @@ func TestTwoControllers(t *testing.T) {
|
|||||||
add(t, dc.rcLister.Indexer, rc)
|
add(t, dc.rcLister.Indexer, rc)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
|
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 0, 0, 0)
|
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0)
|
||||||
|
|
||||||
pods := []*api.Pod{}
|
pods := []*api.Pod{}
|
||||||
|
|
||||||
@ -442,11 +445,11 @@ func TestTwoControllers(t *testing.T) {
|
|||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
if i <= unavailablePods {
|
if i <= unavailablePods {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 0, minimumOne, collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize)
|
||||||
} else if i-unavailablePods <= minimumOne {
|
} else if i-unavailablePods <= minimumOne {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, i-unavailablePods, minimumOne, collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize)
|
||||||
} else {
|
} else {
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, i-unavailablePods, minimumOne, collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -454,14 +457,14 @@ func TestTwoControllers(t *testing.T) {
|
|||||||
d.Spec.Selector = newSel(dLabels)
|
d.Spec.Selector = newSel(dLabels)
|
||||||
add(t, dc.dLister.Indexer, d)
|
add(t, dc.dLister.Indexer, d)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, minimumOne+1, minimumOne, collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize)
|
||||||
|
|
||||||
rs, _ := newReplicaSet(t, collectionSize)
|
rs, _ := newReplicaSet(t, collectionSize)
|
||||||
rs.Spec.Selector = newSel(dLabels)
|
rs.Spec.Selector = newSel(dLabels)
|
||||||
rs.Labels = dLabels
|
rs.Labels = dLabels
|
||||||
add(t, dc.rsLister.Indexer, rs)
|
add(t, dc.rsLister.Indexer, rs)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, minimumOne+1, minimumOne, collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize)
|
||||||
|
|
||||||
// By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
|
// By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
|
||||||
unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
|
unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
|
||||||
@ -475,32 +478,33 @@ func TestTwoControllers(t *testing.T) {
|
|||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
if i <= unavailablePods {
|
if i <= unavailablePods {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, minimumOne+1, minimumTwo, 2*collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize)
|
||||||
} else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
|
} else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||||
} else {
|
} else {
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, i-unavailablePods-(minimumTwo-(minimumOne+1)),
|
||||||
|
(minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now we verify we can bring down 1 pod and a disruption is still permitted,
|
// Now we verify we can bring down 1 pod and a disruption is still permitted,
|
||||||
// but if we bring down two, it's not. Then we make the pod ready again and
|
// but if we bring down two, it's not. Then we make the pod ready again and
|
||||||
// verify that a disruption is permitted again.
|
// verify that a disruption is permitted again.
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 2+minimumTwo, minimumTwo, 2*collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize)
|
||||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{}
|
pods[collectionSize-1].Status.Conditions = []api.PodCondition{}
|
||||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||||
|
|
||||||
pods[collectionSize-2].Status.Conditions = []api.PodCondition{}
|
pods[collectionSize-2].Status.Conditions = []api.PodCondition{}
|
||||||
update(t, dc.podLister.Indexer, pods[collectionSize-2])
|
update(t, dc.podLister.Indexer, pods[collectionSize-2])
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, minimumTwo, minimumTwo, 2*collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize)
|
||||||
|
|
||||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
|
pods[collectionSize-1].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
|
||||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test pdb doesn't exist
|
// Test pdb doesn't exist
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
batchapiv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
batchapiv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||||
certificatesapiv1alpha1 "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
certificatesapiv1alpha1 "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||||
extensionsapiv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensionsapiv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
policyapiv1alpha1 "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
policyapiv1beta1 "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||||
rbacapi "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
rbacapi "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||||
storageapiv1beta1 "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
storageapiv1beta1 "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||||
@ -347,7 +347,7 @@ func DefaultAPIResourceConfigSource() *genericapiserver.ResourceConfig {
|
|||||||
authenticationv1beta1.SchemeGroupVersion,
|
authenticationv1beta1.SchemeGroupVersion,
|
||||||
autoscalingapiv1.SchemeGroupVersion,
|
autoscalingapiv1.SchemeGroupVersion,
|
||||||
appsapi.SchemeGroupVersion,
|
appsapi.SchemeGroupVersion,
|
||||||
policyapiv1alpha1.SchemeGroupVersion,
|
policyapiv1beta1.SchemeGroupVersion,
|
||||||
rbacapi.SchemeGroupVersion,
|
rbacapi.SchemeGroupVersion,
|
||||||
storageapiv1beta1.SchemeGroupVersion,
|
storageapiv1beta1.SchemeGroupVersion,
|
||||||
certificatesapiv1alpha1.SchemeGroupVersion,
|
certificatesapiv1alpha1.SchemeGroupVersion,
|
||||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||||||
package etcd
|
package etcd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/rest"
|
"k8s.io/kubernetes/pkg/api/rest"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
@ -103,11 +105,13 @@ func (r *EvictionREST) Create(ctx api.Context, obj runtime.Object) (runtime.Obje
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EvictionREST) checkAndDecrement(namespace string, pdb policy.PodDisruptionBudget) (ok bool, err error) {
|
func (r *EvictionREST) checkAndDecrement(namespace string, pdb policy.PodDisruptionBudget) (ok bool, err error) {
|
||||||
if !pdb.Status.PodDisruptionAllowed {
|
if pdb.Status.PodDisruptionsAllowed < 0 {
|
||||||
|
return false, fmt.Errorf("pdb disruptions allowed is negative")
|
||||||
|
}
|
||||||
|
if pdb.Status.PodDisruptionsAllowed == 0 {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
pdb.Status.PodDisruptionsAllowed--
|
||||||
pdb.Status.PodDisruptionAllowed = false
|
|
||||||
if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(&pdb); err != nil {
|
if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(&pdb); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ func (c LegacyRESTStorageProvider) NewLegacyRESTStorage(restOptionsGetter generi
|
|||||||
}
|
}
|
||||||
|
|
||||||
var podDisruptionClient policyclient.PodDisruptionBudgetsGetter
|
var podDisruptionClient policyclient.PodDisruptionBudgetsGetter
|
||||||
if policyGroupVersion := (unversioned.GroupVersion{Group: "policy", Version: "v1alpha1"}); registered.IsEnabledVersion(policyGroupVersion) {
|
if policyGroupVersion := (unversioned.GroupVersion{Group: "policy", Version: "v1beta1"}); registered.IsEnabledVersion(policyGroupVersion) {
|
||||||
apiGroupInfo.SubresourceGroupVersionKind["pods/eviction"] = policyGroupVersion.WithKind("Eviction")
|
apiGroupInfo.SubresourceGroupVersionKind["pods/eviction"] = policyGroupVersion.WithKind("Eviction")
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
@ -234,7 +234,7 @@ func (c LegacyRESTStorageProvider) NewLegacyRESTStorage(restOptionsGetter generi
|
|||||||
if registered.IsEnabledVersion(unversioned.GroupVersion{Group: "autoscaling", Version: "v1"}) {
|
if registered.IsEnabledVersion(unversioned.GroupVersion{Group: "autoscaling", Version: "v1"}) {
|
||||||
restStorageMap["replicationControllers/scale"] = controllerStorage.Scale
|
restStorageMap["replicationControllers/scale"] = controllerStorage.Scale
|
||||||
}
|
}
|
||||||
if registered.IsEnabledVersion(unversioned.GroupVersion{Group: "policy", Version: "v1alpha1"}) {
|
if registered.IsEnabledVersion(unversioned.GroupVersion{Group: "policy", Version: "v1beta1"}) {
|
||||||
restStorageMap["pods/eviction"] = podStorage.Eviction
|
restStorageMap["pods/eviction"] = podStorage.Eviction
|
||||||
}
|
}
|
||||||
apiGroupInfo.VersionedResourcesStorageMap["v1"] = restStorageMap
|
apiGroupInfo.VersionedResourcesStorageMap["v1"] = restStorageMap
|
||||||
|
@ -53,10 +53,10 @@ func TestPodDisruptionBudgetStrategy(t *testing.T) {
|
|||||||
ObjectMeta: api.ObjectMeta{Name: pdb.Name, Namespace: pdb.Namespace},
|
ObjectMeta: api.ObjectMeta{Name: pdb.Name, Namespace: pdb.Namespace},
|
||||||
Spec: pdb.Spec,
|
Spec: pdb.Spec,
|
||||||
Status: policy.PodDisruptionBudgetStatus{
|
Status: policy.PodDisruptionBudgetStatus{
|
||||||
PodDisruptionAllowed: true,
|
PodDisruptionsAllowed: 1,
|
||||||
CurrentHealthy: 3,
|
CurrentHealthy: 3,
|
||||||
DesiredHealthy: 3,
|
DesiredHealthy: 3,
|
||||||
ExpectedPods: 3,
|
ExpectedPods: 3,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,10 +101,10 @@ func TestPodDisruptionBudgetStatusStrategy(t *testing.T) {
|
|||||||
MinAvailable: intstr.FromInt(3),
|
MinAvailable: intstr.FromInt(3),
|
||||||
},
|
},
|
||||||
Status: policy.PodDisruptionBudgetStatus{
|
Status: policy.PodDisruptionBudgetStatus{
|
||||||
PodDisruptionAllowed: true,
|
PodDisruptionsAllowed: 1,
|
||||||
CurrentHealthy: 3,
|
CurrentHealthy: 3,
|
||||||
DesiredHealthy: 3,
|
DesiredHealthy: 3,
|
||||||
ExpectedPods: 3,
|
ExpectedPods: 3,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
newPdb := &policy.PodDisruptionBudget{
|
newPdb := &policy.PodDisruptionBudget{
|
||||||
@ -114,10 +114,10 @@ func TestPodDisruptionBudgetStatusStrategy(t *testing.T) {
|
|||||||
MinAvailable: intstr.FromInt(2),
|
MinAvailable: intstr.FromInt(2),
|
||||||
},
|
},
|
||||||
Status: policy.PodDisruptionBudgetStatus{
|
Status: policy.PodDisruptionBudgetStatus{
|
||||||
PodDisruptionAllowed: false,
|
PodDisruptionsAllowed: 0,
|
||||||
CurrentHealthy: 2,
|
CurrentHealthy: 2,
|
||||||
DesiredHealthy: 3,
|
DesiredHealthy: 3,
|
||||||
ExpectedPods: 3,
|
ExpectedPods: 3,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
StatusStrategy.PrepareForUpdate(ctx, newPdb, oldPdb)
|
StatusStrategy.PrepareForUpdate(ctx, newPdb, oldPdb)
|
||||||
|
@ -19,7 +19,7 @@ package rest
|
|||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api/rest"
|
"k8s.io/kubernetes/pkg/api/rest"
|
||||||
"k8s.io/kubernetes/pkg/apis/policy"
|
"k8s.io/kubernetes/pkg/apis/policy"
|
||||||
policyapiv1alpha1 "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
policyapiv1beta1 "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||||
poddisruptionbudgetetcd "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/etcd"
|
poddisruptionbudgetetcd "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/etcd"
|
||||||
)
|
)
|
||||||
@ -31,17 +31,15 @@ var _ genericapiserver.RESTStorageProvider = &RESTStorageProvider{}
|
|||||||
func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter genericapiserver.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) {
|
func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter genericapiserver.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) {
|
||||||
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(policy.GroupName)
|
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(policy.GroupName)
|
||||||
|
|
||||||
if apiResourceConfigSource.AnyResourcesForVersionEnabled(policyapiv1alpha1.SchemeGroupVersion) {
|
if apiResourceConfigSource.AnyResourcesForVersionEnabled(policyapiv1beta1.SchemeGroupVersion) {
|
||||||
apiGroupInfo.VersionedResourcesStorageMap[policyapiv1alpha1.SchemeGroupVersion.Version] = p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter)
|
apiGroupInfo.VersionedResourcesStorageMap[policyapiv1beta1.SchemeGroupVersion.Version] = p.v1beta1Storage(apiResourceConfigSource, restOptionsGetter)
|
||||||
apiGroupInfo.GroupMeta.GroupVersion = policyapiv1alpha1.SchemeGroupVersion
|
apiGroupInfo.GroupMeta.GroupVersion = policyapiv1beta1.SchemeGroupVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
return apiGroupInfo, true
|
return apiGroupInfo, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter genericapiserver.RESTOptionsGetter) map[string]rest.Storage {
|
func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource genericapiserver.APIResourceConfigSource, restOptionsGetter genericapiserver.RESTOptionsGetter) map[string]rest.Storage {
|
||||||
version := policyapiv1alpha1.SchemeGroupVersion
|
version := policyapiv1beta1.SchemeGroupVersion
|
||||||
|
|
||||||
storage := map[string]rest.Storage{}
|
storage := map[string]rest.Storage{}
|
||||||
if apiResourceConfigSource.ResourceEnabled(version.WithResource("poddisruptionbudgets")) {
|
if apiResourceConfigSource.ResourceEnabled(version.WithResource("poddisruptionbudgets")) {
|
||||||
poddisruptionbudgetStorage, poddisruptionbudgetStatusStorage := poddisruptionbudgetetcd.NewREST(restOptionsGetter(policy.Resource("poddisruptionbudgets")))
|
poddisruptionbudgetStorage, poddisruptionbudgetStatusStorage := poddisruptionbudgetetcd.NewREST(restOptionsGetter(policy.Resource("poddisruptionbudgets")))
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
"k8s.io/client-go/pkg/api/unversioned"
|
"k8s.io/client-go/pkg/api/unversioned"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
api "k8s.io/client-go/pkg/api/v1"
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||||
policy "k8s.io/client-go/pkg/apis/policy/v1alpha1"
|
policy "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||||
"k8s.io/client-go/pkg/util/intstr"
|
"k8s.io/client-go/pkg/util/intstr"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
Loading…
Reference in New Issue
Block a user