mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
Introduce networking/v1alpha1 api, ClusterCIDR type
Introduce networking/v1alpha1 api group. Add `ClusterCIDR` type to networking/v1alpha1 api group, this type will enable the NodeIPAM controller to support multiple ClusterCIDRs.
This commit is contained in:
parent
51ea7b2169
commit
7093b10416
@ -393,6 +393,8 @@ API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RBDPool
|
||||
API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RadosUser
|
||||
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,CephFS
|
||||
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,StorageOS
|
||||
API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv4
|
||||
API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv6
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSON,Raw
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
||||
|
@ -260,6 +260,7 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{
|
||||
{Group: "batch", Version: "v2alpha1"}: {group: 17400, version: 9},
|
||||
{Group: "certificates.k8s.io", Version: "v1"}: {group: 17300, version: 15},
|
||||
{Group: "networking.k8s.io", Version: "v1"}: {group: 17200, version: 15},
|
||||
{Group: "networking.k8s.io", Version: "v1alpha1"}: {group: 17200, version: 1},
|
||||
{Group: "policy", Version: "v1"}: {group: 17100, version: 15},
|
||||
{Group: "policy", Version: "v1beta1"}: {group: 17100, version: 9},
|
||||
{Group: "rbac.authorization.k8s.io", Version: "v1"}: {group: 17000, version: 15},
|
||||
|
@ -92,6 +92,7 @@ events.k8s.io/v1 \
|
||||
events.k8s.io/v1beta1 \
|
||||
imagepolicy.k8s.io/v1alpha1 \
|
||||
networking.k8s.io/v1 \
|
||||
networking.k8s.io/v1alpha1 \
|
||||
networking.k8s.io/v1beta1 \
|
||||
node.k8s.io/v1 \
|
||||
node.k8s.io/v1alpha1 \
|
||||
|
@ -370,3 +370,62 @@ func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorR
|
||||
selector = selector.Add(*r)
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
// nodeSelectorRequirementsAsLabelRequirements converts the NodeSelectorRequirement
|
||||
// type to a labels.Requirement type.
|
||||
func nodeSelectorRequirementsAsLabelRequirements(nsr v1.NodeSelectorRequirement) (*labels.Requirement, error) {
|
||||
var op selection.Operator
|
||||
switch nsr.Operator {
|
||||
case v1.NodeSelectorOpIn:
|
||||
op = selection.In
|
||||
case v1.NodeSelectorOpNotIn:
|
||||
op = selection.NotIn
|
||||
case v1.NodeSelectorOpExists:
|
||||
op = selection.Exists
|
||||
case v1.NodeSelectorOpDoesNotExist:
|
||||
op = selection.DoesNotExist
|
||||
case v1.NodeSelectorOpGt:
|
||||
op = selection.GreaterThan
|
||||
case v1.NodeSelectorOpLt:
|
||||
op = selection.LessThan
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid node selector operator", nsr.Operator)
|
||||
}
|
||||
return labels.NewRequirement(nsr.Key, op, nsr.Values)
|
||||
}
|
||||
|
||||
// NodeSelectorAsSelector converts the NodeSelector api type into a struct that
|
||||
// implements labels.Selector
|
||||
// Note: This function should be kept in sync with the selector methods in
|
||||
// pkg/labels/selector.go
|
||||
func NodeSelectorAsSelector(ns *v1.NodeSelector) (labels.Selector, error) {
|
||||
if ns == nil {
|
||||
return labels.Nothing(), nil
|
||||
}
|
||||
if len(ns.NodeSelectorTerms) == 0 {
|
||||
return labels.Everything(), nil
|
||||
}
|
||||
var requirements []labels.Requirement
|
||||
|
||||
for _, nsTerm := range ns.NodeSelectorTerms {
|
||||
for _, expr := range nsTerm.MatchExpressions {
|
||||
req, err := nodeSelectorRequirementsAsLabelRequirements(expr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requirements = append(requirements, *req)
|
||||
}
|
||||
|
||||
for _, field := range nsTerm.MatchFields {
|
||||
req, err := nodeSelectorRequirementsAsLabelRequirements(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requirements = append(requirements, *req)
|
||||
}
|
||||
}
|
||||
|
||||
selector := labels.NewSelector()
|
||||
selector = selector.Add(requirements...)
|
||||
return selector, nil
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/v1beta1"
|
||||
)
|
||||
|
||||
@ -36,5 +37,6 @@ func Install(scheme *runtime.Scheme) {
|
||||
utilruntime.Must(networking.AddToScheme(scheme))
|
||||
utilruntime.Must(v1.AddToScheme(scheme))
|
||||
utilruntime.Must(v1beta1.AddToScheme(scheme))
|
||||
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
|
||||
utilruntime.Must(v1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
|
||||
}
|
||||
|
@ -52,6 +52,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&IngressList{},
|
||||
&IngressClass{},
|
||||
&IngressClassList{},
|
||||
&ClusterCIDR{},
|
||||
&ClusterCIDRList{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
@ -583,3 +583,67 @@ type ServiceBackendPort struct {
|
||||
// +optional
|
||||
Number int32
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterCIDR represents a single configuration for per-Node Pod CIDR
|
||||
// allocations when the MultiCIDRRangeAllocator is enabled (see the config for
|
||||
// kube-controller-manager). A cluster may have any number of ClusterCIDR
|
||||
// resources, all of which will be considered when allocating a CIDR for a
|
||||
// Node. A ClusterCIDR is eligible to be used for a given Node when the node
|
||||
// selector matches the node in question and has free CIDRs to allocate. In
|
||||
// case of multiple matching ClusterCIDR resources, the allocator will attempt
|
||||
// to break ties using internal heuristics, but any ClusterCIDR whose node
|
||||
// selector matches the Node may be used.
|
||||
type ClusterCIDR struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ObjectMeta
|
||||
|
||||
Spec ClusterCIDRSpec
|
||||
}
|
||||
|
||||
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
|
||||
type ClusterCIDRSpec struct {
|
||||
// NodeSelector defines which nodes the config is applicable to.
|
||||
// An empty or nil NodeSelector selects all nodes.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
NodeSelector *api.NodeSelector
|
||||
|
||||
// PerNodeHostBits defines the number of host bits to be configured per node.
|
||||
// A subnet mask determines how much of the address is used for network bits
|
||||
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
|
||||
// address into 24 bits for the network portion and 8 bits for the host portion.
|
||||
// To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6).
|
||||
// Minimum value is 4 (16 IPs).
|
||||
// This field is immutable.
|
||||
// +required
|
||||
PerNodeHostBits int32
|
||||
|
||||
// IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
|
||||
// At least one of IPv4 and IPv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
IPv4 string
|
||||
|
||||
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64").
|
||||
// At least one of IPv4 and IPv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
IPv6 string
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterCIDRList contains a list of ClusterCIDRs.
|
||||
type ClusterCIDRList struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of ClusterCIDRs.
|
||||
Items []ClusterCIDR
|
||||
}
|
||||
|
25
pkg/apis/networking/v1alpha1/defaults.go
Normal file
25
pkg/apis/networking/v1alpha1/defaults.go
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
23
pkg/apis/networking/v1alpha1/doc.go
Normal file
23
pkg/apis/networking/v1alpha1/doc.go
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/networking
|
||||
// +k8s:conversion-gen-external-types=k8s.io/api/networking/v1alpha1
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +k8s:defaulter-gen-input=k8s.io/api/networking/v1alpha1
|
||||
// +groupName=networking.k8s.io
|
||||
|
||||
package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/networking/v1alpha1"
|
45
pkg/apis/networking/v1alpha1/register.go
Normal file
45
pkg/apis/networking/v1alpha1/register.go
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package.
|
||||
const GroupName = "networking.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
localSchemeBuilder = &networkingv1alpha1.SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addDefaultingFuncs)
|
||||
}
|
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
pathvalidation "k8s.io/apimachinery/pkg/api/validation/path"
|
||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
@ -602,3 +603,89 @@ func allowInvalidWildcardHostRule(oldIngress *networking.Ingress) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateClusterCIDRName validates that the given name can be used as an
|
||||
// ClusterCIDR name.
|
||||
var ValidateClusterCIDRName = apimachineryvalidation.NameIsDNSLabel
|
||||
|
||||
// ValidateClusterCIDR validates a ClusterCIDR.
|
||||
func ValidateClusterCIDR(cc *networking.ClusterCIDR) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&cc.ObjectMeta, false, ValidateClusterCIDRName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateClusterCIDRSpec(&cc.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClusterCIDRSpec validates ClusterCIDR Spec.
|
||||
func ValidateClusterCIDRSpec(spec *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if spec.NodeSelector != nil {
|
||||
allErrs = append(allErrs, apivalidation.ValidateNodeSelector(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
}
|
||||
|
||||
// Validate if CIDR is specified for at least one IP Family(IPv4/IPv6).
|
||||
if spec.IPv4 == "" && spec.IPv6 == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath, "one or both of `ipv4` and `ipv6` must be specified"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Validate specified IPv4 CIDR and PerNodeHostBits.
|
||||
if spec.IPv4 != "" {
|
||||
allErrs = append(allErrs, validateCIDRConfig(spec.IPv4, spec.PerNodeHostBits, 32, v1.IPv4Protocol, fldPath)...)
|
||||
}
|
||||
|
||||
// Validate specified IPv6 CIDR and PerNodeHostBits.
|
||||
if spec.IPv6 != "" {
|
||||
allErrs = append(allErrs, validateCIDRConfig(spec.IPv6, spec.PerNodeHostBits, 128, v1.IPv6Protocol, fldPath)...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateCIDRConfig(configCIDR string, perNodeHostBits, maxMaskSize int32, ipFamily v1.IPFamily, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
minPerNodeHostBits := int32(4)
|
||||
|
||||
ip, ipNet, err := netutils.ParseCIDRSloppy(configCIDR)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, fmt.Sprintf("must be a valid CIDR: %s", configCIDR)))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
if ipFamily == v1.IPv4Protocol && !netutils.IsIPv4(ip) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv4 CIDR"))
|
||||
}
|
||||
if ipFamily == v1.IPv6Protocol && !netutils.IsIPv6(ip) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv6 CIDR"))
|
||||
}
|
||||
|
||||
// Validate PerNodeHostBits
|
||||
maskSize, _ := ipNet.Mask.Size()
|
||||
maxPerNodeHostBits := maxMaskSize - int32(maskSize)
|
||||
|
||||
if perNodeHostBits < minPerNodeHostBits {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be greater than or equal to %d", minPerNodeHostBits)))
|
||||
}
|
||||
if perNodeHostBits > maxPerNodeHostBits {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be less than or equal to %d", maxPerNodeHostBits)))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClusterCIDRUpdate tests if an update to a ClusterCIDR is valid.
|
||||
func ValidateClusterCIDRUpdate(update, old *networking.ClusterCIDR) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, validateClusterCIDRUpdateSpec(&update.Spec, &old.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateClusterCIDRUpdateSpec(update, old *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.NodeSelector, old.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.PerNodeHostBits, old.PerNodeHostBits, fldPath.Child("perNodeHostBits"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv4, old.IPv4, fldPath.Child("ipv4"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv6, old.IPv6, fldPath.Child("ipv6"))...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
@ -1982,3 +1982,216 @@ func TestValidateIngressStatusUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector {
|
||||
return &api.NodeSelector{
|
||||
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: key,
|
||||
Operator: op,
|
||||
Values: values,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeClusterCIDR(perNodeHostBits int32, ipv4, ipv6 string, nodeSelector *api.NodeSelector) *networking.ClusterCIDR {
|
||||
return &networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
ResourceVersion: "9",
|
||||
},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4,
|
||||
IPv6: ipv6,
|
||||
NodeSelector: nodeSelector,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClusterCIDR(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cc *networking.ClusterCIDR
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid SingleStack IPv4 ClusterCIDR",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits = maxPerNodeHostBits",
|
||||
cc: makeClusterCIDR(16, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits > minPerNodeHostBits",
|
||||
cc: makeClusterCIDR(4, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv6 ClusterCIDR",
|
||||
cc: makeClusterCIDR(8, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits = maxPerNodeHostBit",
|
||||
cc: makeClusterCIDR(64, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits > minPerNodeHostBit",
|
||||
cc: makeClusterCIDR(4, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv6 ClusterCIDR perNodeHostBits=100",
|
||||
cc: makeClusterCIDR(100, "", "fd00:1:1::/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid DualStack ClusterCIDR",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid DualStack ClusterCIDR, no NodeSelector",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", nil),
|
||||
expectErr: false,
|
||||
},
|
||||
// Failure cases.
|
||||
{
|
||||
name: "invalid ClusterCIDR, no IPv4 or IPv6 CIDR",
|
||||
cc: makeClusterCIDR(8, "", "", nil),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid ClusterCIDR, invalid nodeSelector",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("NoUppercaseOrSpecialCharsLike=Equals", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
// IPv4 tests.
|
||||
{
|
||||
name: "invalid SingleStack IPv4 ClusterCIDR, invalid spec.IPv4",
|
||||
cc: makeClusterCIDR(8, "test", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid Singlestack IPv4 ClusterCIDR, perNodeHostBits > maxPerNodeHostBits",
|
||||
cc: makeClusterCIDR(100, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid SingleStack IPv4 ClusterCIDR, perNodeHostBits < minPerNodeHostBits",
|
||||
cc: makeClusterCIDR(2, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
// IPv6 tests.
|
||||
{
|
||||
name: "invalid SingleStack IPv6 ClusterCIDR, invalid spec.IPv6",
|
||||
cc: makeClusterCIDR(8, "", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid SingleStack IPv6 ClusterCIDR, valid IPv4 CIDR in spec.IPv6",
|
||||
cc: makeClusterCIDR(8, "", "10.2.0.0/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits",
|
||||
cc: makeClusterCIDR(12, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits < minPerNodeHostBits",
|
||||
cc: makeClusterCIDR(3, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
// DualStack tests
|
||||
{
|
||||
name: "invalid DualStack ClusterCIDR, valid spec.IPv4, invalid spec.IPv6",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid DualStack ClusterCIDR, valid spec.IPv6, invalid spec.IPv4",
|
||||
cc: makeClusterCIDR(8, "testv4", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid DualStack ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits",
|
||||
cc: makeClusterCIDR(24, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid DualStack ClusterCIDR, valid IPv6 CIDR in spec.IPv4",
|
||||
cc: makeClusterCIDR(8, "fd00::/120", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
err := ValidateClusterCIDR(testCase.cc)
|
||||
if !testCase.expectErr && err != nil {
|
||||
t.Errorf("ValidateClusterCIDR(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err)
|
||||
}
|
||||
if testCase.expectErr && err == nil {
|
||||
t.Errorf("ValidateClusterCIDR(%+v) must return an error for test: %s, but got nil", testCase.cc, testCase.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClusterConfigUpdate(t *testing.T) {
|
||||
oldCCC := makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
cc *networking.ClusterCIDR
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "Successful update, no changes to ClusterCIDR.Spec",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.PerNodeHostBits",
|
||||
cc: makeClusterCIDR(12, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.IPv4",
|
||||
cc: makeClusterCIDR(8, "10.2.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.IPv6",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:2:/112", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.NodeSelector",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar2"})),
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
err := ValidateClusterCIDRUpdate(testCase.cc, oldCCC)
|
||||
if !testCase.expectErr && err != nil {
|
||||
t.Errorf("ValidateClusterCIDRUpdate(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err)
|
||||
}
|
||||
if testCase.expectErr && err == nil {
|
||||
t.Errorf("ValidateClusterCIDRUpdate(%+v) must return error for test: %s, but got nil", testCase.cc, testCase.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ import (
|
||||
eventsv1beta1 "k8s.io/api/events/v1beta1"
|
||||
flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
|
||||
networkingapiv1 "k8s.io/api/networking/v1"
|
||||
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
nodev1 "k8s.io/api/node/v1"
|
||||
nodev1beta1 "k8s.io/api/node/v1beta1"
|
||||
policyapiv1 "k8s.io/api/policy/v1"
|
||||
@ -689,6 +690,7 @@ var (
|
||||
// alphaAPIGroupVersionsDisabledByDefault holds the alpha APIs we have. They are always disabled by default.
|
||||
alphaAPIGroupVersionsDisabledByDefault = []schema.GroupVersion{
|
||||
apiserverinternalv1alpha1.SchemeGroupVersion,
|
||||
networkingapiv1alpha1.SchemeGroupVersion,
|
||||
storageapiv1alpha1.SchemeGroupVersion,
|
||||
flowcontrolv1alpha1.SchemeGroupVersion,
|
||||
}
|
||||
|
@ -71,6 +71,7 @@ func NewStorageFactoryConfig() *StorageFactoryConfig {
|
||||
//
|
||||
// TODO (https://github.com/kubernetes/kubernetes/issues/108451): remove the override in 1.25.
|
||||
// apisstorage.Resource("csistoragecapacities").WithVersion("v1beta1"),
|
||||
networking.Resource("clustercidrs").WithVersion("v1alpha1"),
|
||||
}
|
||||
|
||||
return &StorageFactoryConfig{
|
||||
|
17
pkg/registry/networking/clustercidr/doc.go
Normal file
17
pkg/registry/networking/clustercidr/doc.go
Normal file
@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clustercidr // import "k8s.io/kubernetes/pkg/registry/networking/clustercidr"
|
63
pkg/registry/networking/clustercidr/storage/storage.go
Normal file
63
pkg/registry/networking/clustercidr/storage/storage.go
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
networkingapi "k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/networking/clustercidr"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for ClusterCIDRs against etcd.
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against ClusterCIDRs.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &networkingapi.ClusterCIDR{} },
|
||||
NewListFunc: func() runtime.Object { return &networkingapi.ClusterCIDRList{} },
|
||||
DefaultQualifiedResource: networkingapi.Resource("clustercidrs"),
|
||||
|
||||
CreateStrategy: clustercidr.Strategy,
|
||||
UpdateStrategy: clustercidr.Strategy,
|
||||
DeleteStrategy: clustercidr.Strategy,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &REST{store}, nil
|
||||
}
|
||||
|
||||
// Implement ShortNamesProvider.
|
||||
var _ rest.ShortNamesProvider = &REST{}
|
||||
|
||||
// ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource.
|
||||
func (r *REST) ShortNames() []string {
|
||||
return []string{"cc"}
|
||||
}
|
196
pkg/registry/networking/clustercidr/storage/storage_test.go
Normal file
196
pkg/registry/networking/clustercidr/storage/storage_test.go
Normal file
@ -0,0 +1,196 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
|
||||
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
_ "k8s.io/kubernetes/pkg/apis/networking/install"
|
||||
"k8s.io/kubernetes/pkg/registry/registrytest"
|
||||
)
|
||||
|
||||
func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorageForResource(t, networking.Resource("clustercidrs"))
|
||||
restOptions := generic.RESTOptions{
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
DeleteCollectionWorkers: 1,
|
||||
ResourcePrefix: "clustercidrs",
|
||||
}
|
||||
clusterCIDRStorage, err := NewREST(restOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from REST storage: %v", err)
|
||||
}
|
||||
return clusterCIDRStorage, server
|
||||
}
|
||||
|
||||
var (
|
||||
namespace = metav1.NamespaceNone
|
||||
name = "foo-clustercidr"
|
||||
)
|
||||
|
||||
func newClusterCIDR() *networking.ClusterCIDR {
|
||||
return &networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: int32(8),
|
||||
IPv4: "10.1.0.0/16",
|
||||
IPv6: "fd00:1:1::/64",
|
||||
NodeSelector: &api.NodeSelector{
|
||||
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: api.NodeSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func validClusterCIDR() *networking.ClusterCIDR {
|
||||
return newClusterCIDR()
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
validCC := validClusterCIDR()
|
||||
noCIDRCC := validClusterCIDR()
|
||||
noCIDRCC.Spec.IPv4 = ""
|
||||
noCIDRCC.Spec.IPv6 = ""
|
||||
invalidCCPerNodeHostBits := validClusterCIDR()
|
||||
invalidCCPerNodeHostBits.Spec.PerNodeHostBits = 100
|
||||
invalidCCCIDR := validClusterCIDR()
|
||||
invalidCCCIDR.Spec.IPv6 = "10.1.0.0/16"
|
||||
|
||||
test.TestCreate(
|
||||
// valid
|
||||
validCC,
|
||||
//invalid
|
||||
noCIDRCC,
|
||||
invalidCCPerNodeHostBits,
|
||||
invalidCCCIDR,
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestUpdate(
|
||||
// valid
|
||||
validClusterCIDR(),
|
||||
// updateFunc
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*networking.ClusterCIDR)
|
||||
object.Finalizers = []string{"test.k8s.io/test-finalizer"}
|
||||
return object
|
||||
},
|
||||
// invalid updateFunc: ObjectMeta is not to be tampered with.
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*networking.ClusterCIDR)
|
||||
object.Name = ""
|
||||
return object
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestDelete(validClusterCIDR())
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestGet(validClusterCIDR())
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestList(validClusterCIDR())
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestWatch(
|
||||
validClusterCIDR(),
|
||||
// matching labels
|
||||
[]labels.Set{},
|
||||
// not matching labels
|
||||
[]labels.Set{
|
||||
{"a": "c"},
|
||||
{"foo": "bar"},
|
||||
},
|
||||
// matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": name},
|
||||
},
|
||||
// not matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "bar"},
|
||||
{"name": name},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestShortNames(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
expected := []string{"cc"}
|
||||
registrytest.AssertShortNames(t, storage, expected)
|
||||
}
|
82
pkg/registry/networking/clustercidr/strategy.go
Normal file
82
pkg/registry/networking/clustercidr/strategy.go
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clustercidr
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/validation"
|
||||
)
|
||||
|
||||
// clusterCIDRStrategy implements verification logic for ClusterCIDRs.
|
||||
type clusterCIDRStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
// Strategy is the default logic that applies when creating and updating clusterCIDR objects.
|
||||
var Strategy = clusterCIDRStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
|
||||
|
||||
// NamespaceScoped returns false because all clusterCIDRs do not need to be within a namespace.
|
||||
func (clusterCIDRStrategy) NamespaceScoped() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (clusterCIDRStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {}
|
||||
|
||||
func (clusterCIDRStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {}
|
||||
|
||||
// Validate validates a new ClusterCIDR.
|
||||
func (clusterCIDRStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
clusterCIDR := obj.(*networking.ClusterCIDR)
|
||||
return validation.ValidateClusterCIDR(clusterCIDR)
|
||||
}
|
||||
|
||||
// WarningsOnCreate returns warnings for the creation of the given object.
|
||||
func (clusterCIDRStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Canonicalize normalizes the object after validation.
|
||||
func (clusterCIDRStrategy) Canonicalize(obj runtime.Object) {}
|
||||
|
||||
// AllowCreateOnUpdate is false for ClusterCIDR; this means POST is needed to create one.
|
||||
func (clusterCIDRStrategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateUpdate is the default update validation for an end user.
|
||||
func (clusterCIDRStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
validationErrorList := validation.ValidateClusterCIDR(obj.(*networking.ClusterCIDR))
|
||||
updateErrorList := validation.ValidateClusterCIDRUpdate(obj.(*networking.ClusterCIDR), old.(*networking.ClusterCIDR))
|
||||
return append(validationErrorList, updateErrorList...)
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
func (clusterCIDRStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllowUnconditionalUpdate is the default update policy for ClusterCIDR objects.
|
||||
func (clusterCIDRStrategy) AllowUnconditionalUpdate() bool {
|
||||
return true
|
||||
}
|
86
pkg/registry/networking/clustercidr/strategy_test.go
Normal file
86
pkg/registry/networking/clustercidr/strategy_test.go
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clustercidr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
)
|
||||
|
||||
func newClusterCIDR() networking.ClusterCIDR {
|
||||
return networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: int32(8),
|
||||
IPv4: "10.1.0.0/16",
|
||||
IPv6: "fd00:1:1::/64",
|
||||
NodeSelector: &api.NodeSelector{
|
||||
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: api.NodeSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterCIDRStrategy(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
apiRequest := genericapirequest.RequestInfo{APIGroup: "networking.k8s.io",
|
||||
APIVersion: "v1alpha1",
|
||||
Resource: "clustercidrs",
|
||||
}
|
||||
ctx = genericapirequest.WithRequestInfo(ctx, &apiRequest)
|
||||
if Strategy.NamespaceScoped() {
|
||||
t.Errorf("ClusterCIDRs must be cluster scoped")
|
||||
}
|
||||
if Strategy.AllowCreateOnUpdate() {
|
||||
t.Errorf("ClusterCIDRs should not allow create on update")
|
||||
}
|
||||
|
||||
ccc := newClusterCIDR()
|
||||
Strategy.PrepareForCreate(ctx, &ccc)
|
||||
|
||||
errs := Strategy.Validate(ctx, &ccc)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("Unexpected error validating %v", errs)
|
||||
}
|
||||
invalidCCC := newClusterCIDR()
|
||||
invalidCCC.ResourceVersion = "4"
|
||||
invalidCCC.Spec = networking.ClusterCIDRSpec{}
|
||||
Strategy.PrepareForUpdate(ctx, &invalidCCC, &ccc)
|
||||
errs = Strategy.ValidateUpdate(ctx, &invalidCCC, &ccc)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("Expected a validation error")
|
||||
}
|
||||
if invalidCCC.ResourceVersion != "4" {
|
||||
t.Errorf("Incoming resource version on update should not be mutated")
|
||||
}
|
||||
}
|
@ -18,12 +18,14 @@ package rest
|
||||
|
||||
import (
|
||||
networkingapiv1 "k8s.io/api/networking/v1"
|
||||
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
clustercidrstore "k8s.io/kubernetes/pkg/registry/networking/clustercidr/storage"
|
||||
ingressstore "k8s.io/kubernetes/pkg/registry/networking/ingress/storage"
|
||||
ingressclassstore "k8s.io/kubernetes/pkg/registry/networking/ingressclass/storage"
|
||||
networkpolicystore "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage"
|
||||
@ -36,6 +38,12 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag
|
||||
// If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities.
|
||||
// TODO refactor the plumbing to provide the information in the APIGroupInfo
|
||||
|
||||
if storageMap, err := p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter); err != nil {
|
||||
return genericapiserver.APIGroupInfo{}, err
|
||||
} else if len(storageMap) > 0 {
|
||||
apiGroupInfo.VersionedResourcesStorageMap[networkingapiv1alpha1.SchemeGroupVersion.Version] = storageMap
|
||||
}
|
||||
|
||||
if storageMap, err := p.v1Storage(apiResourceConfigSource, restOptionsGetter); err != nil {
|
||||
return genericapiserver.APIGroupInfo{}, err
|
||||
} else if len(storageMap) > 0 {
|
||||
@ -80,6 +88,20 @@ func (p RESTStorageProvider) v1Storage(apiResourceConfigSource serverstorage.API
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) {
|
||||
storage := map[string]rest.Storage{}
|
||||
// clustercidrs
|
||||
if resource := "clustercidrs"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||
clusterCIDRCStorage, err := clustercidrstore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return storage, err
|
||||
}
|
||||
storage[resource] = clusterCIDRCStorage
|
||||
}
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
func (p RESTStorageProvider) GroupName() string {
|
||||
return networking.GroupName
|
||||
}
|
||||
|
@ -250,6 +250,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
||||
// used for pod deletion
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "create", "update").Groups(networkingGroup).Resources("clustercidrs").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
}
|
||||
|
@ -907,6 +907,15 @@ items:
|
||||
verbs:
|
||||
- delete
|
||||
- list
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- clustercidrs
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
- events.k8s.io
|
||||
|
23
staging/src/k8s.io/api/networking/v1alpha1/doc.go
Normal file
23
staging/src/k8s.io/api/networking/v1alpha1/doc.go
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:prerelease-lifecycle-gen=true
|
||||
// +groupName=networking.k8s.io
|
||||
|
||||
package v1alpha1 // import "k8s.io/api/networking/v1alpha1"
|
56
staging/src/k8s.io/api/networking/v1alpha1/register.go
Normal file
56
staging/src/k8s.io/api/networking/v1alpha1/register.go
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package.
|
||||
const GroupName = "networking.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// SchemeBuilder holds functions that add things to a scheme.
|
||||
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
|
||||
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
|
||||
// AddToScheme adds the types of this group into the given scheme.
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&ClusterCIDR{},
|
||||
&ClusterCIDRList{},
|
||||
)
|
||||
// Add the watch version that applies.
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
95
staging/src/k8s.io/api/networking/v1alpha1/types.go
Normal file
95
staging/src/k8s.io/api/networking/v1alpha1/types.go
Normal file
@ -0,0 +1,95 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.25
|
||||
|
||||
// ClusterCIDR represents a single configuration for per-Node Pod CIDR
|
||||
// allocations when the MultiCIDRRangeAllocator is enabled (see the config for
|
||||
// kube-controller-manager). A cluster may have any number of ClusterCIDR
|
||||
// resources, all of which will be considered when allocating a CIDR for a
|
||||
// Node. A ClusterCIDR is eligible to be used for a given Node when the node
|
||||
// selector matches the node in question and has free CIDRs to allocate. In
|
||||
// case of multiple matching ClusterCIDR resources, the allocator will attempt
|
||||
// to break ties using internal heuristics, but any ClusterCIDR whose node
|
||||
// selector matches the Node may be used.
|
||||
type ClusterCIDR struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec is the desired state of the ClusterCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
|
||||
type ClusterCIDRSpec struct {
|
||||
// NodeSelector defines which nodes the config is applicable to.
|
||||
// An empty or nil NodeSelector selects all nodes.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
|
||||
|
||||
// PerNodeHostBits defines the number of host bits to be configured per node.
|
||||
// A subnet mask determines how much of the address is used for network bits
|
||||
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
|
||||
// address into 24 bits for the network portion and 8 bits for the host portion.
|
||||
// To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6).
|
||||
// Minimum value is 4 (16 IPs).
|
||||
// This field is immutable.
|
||||
// +required
|
||||
PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"`
|
||||
|
||||
// IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
|
||||
// At least one of IPv4 and IPv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"`
|
||||
|
||||
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64").
|
||||
// At least one of IPv4 and IPv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.25
|
||||
|
||||
// ClusterCIDRList contains a list of ClusterCIDR.
|
||||
type ClusterCIDRList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is the list of ClusterCIDRs.
|
||||
Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
@ -234,6 +234,13 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/networking/v1alpha1
|
||||
gvr("networking.k8s.io", "v1alpha1", "clustercidrs"): {
|
||||
Stub: `{"metadata": {"name": "clustercidr1"}, "spec": {"perNodeHostBits": 8, "ipv4": "192.168.4.0/24", "ipv6": "fd00:1::/120", "nodeSelector": null}}`,
|
||||
ExpectedEtcdPath: "/registry/clustercidrs/clustercidr1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/policy/v1
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): {
|
||||
Stub: `{"metadata": {"name": "pdbv1"}, "spec": {"selector": {"matchLabels": {"anokkey": "anokvalue"}}}}`,
|
||||
|
6
vendor/modules.txt
vendored
6
vendor/modules.txt
vendored
@ -1393,6 +1393,7 @@ k8s.io/api/flowcontrol/v1beta1
|
||||
k8s.io/api/flowcontrol/v1beta2
|
||||
k8s.io/api/imagepolicy/v1alpha1
|
||||
k8s.io/api/networking/v1
|
||||
k8s.io/api/networking/v1alpha1
|
||||
k8s.io/api/networking/v1beta1
|
||||
k8s.io/api/node/v1
|
||||
k8s.io/api/node/v1alpha1
|
||||
@ -1718,6 +1719,7 @@ k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1
|
||||
k8s.io/client-go/applyconfigurations/internal
|
||||
k8s.io/client-go/applyconfigurations/meta/v1
|
||||
k8s.io/client-go/applyconfigurations/networking/v1
|
||||
k8s.io/client-go/applyconfigurations/networking/v1alpha1
|
||||
k8s.io/client-go/applyconfigurations/networking/v1beta1
|
||||
k8s.io/client-go/applyconfigurations/node/v1
|
||||
k8s.io/client-go/applyconfigurations/node/v1alpha1
|
||||
@ -1783,6 +1785,7 @@ k8s.io/client-go/informers/flowcontrol/v1beta2
|
||||
k8s.io/client-go/informers/internalinterfaces
|
||||
k8s.io/client-go/informers/networking
|
||||
k8s.io/client-go/informers/networking/v1
|
||||
k8s.io/client-go/informers/networking/v1alpha1
|
||||
k8s.io/client-go/informers/networking/v1beta1
|
||||
k8s.io/client-go/informers/node
|
||||
k8s.io/client-go/informers/node/v1
|
||||
@ -1866,6 +1869,8 @@ k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2
|
||||
k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake
|
||||
k8s.io/client-go/kubernetes/typed/networking/v1
|
||||
k8s.io/client-go/kubernetes/typed/networking/v1/fake
|
||||
k8s.io/client-go/kubernetes/typed/networking/v1alpha1
|
||||
k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake
|
||||
k8s.io/client-go/kubernetes/typed/networking/v1beta1
|
||||
k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake
|
||||
k8s.io/client-go/kubernetes/typed/node/v1
|
||||
@ -1922,6 +1927,7 @@ k8s.io/client-go/listers/flowcontrol/v1alpha1
|
||||
k8s.io/client-go/listers/flowcontrol/v1beta1
|
||||
k8s.io/client-go/listers/flowcontrol/v1beta2
|
||||
k8s.io/client-go/listers/networking/v1
|
||||
k8s.io/client-go/listers/networking/v1alpha1
|
||||
k8s.io/client-go/listers/networking/v1beta1
|
||||
k8s.io/client-go/listers/node/v1
|
||||
k8s.io/client-go/listers/node/v1alpha1
|
||||
|
Loading…
Reference in New Issue
Block a user