mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
Fix the nodeSelector key creation mechanism
Fixes the issue caused when multile ClusterCIDR objects have the same nodeSelector values, order of the requirements in the nodeSelector is not preserved when nodeSelector is marshalled and converted to a string.
This commit is contained in:
parent
9875c1b661
commit
c791d69b3e
@ -732,8 +732,8 @@ func (r *multiCIDRRangeAllocator) updateCIDRsAllocation(data multiCIDRNodeReserv
|
||||
// defaultNodeSelector generates a label with defaultClusterCIDRKey as the key and
|
||||
// defaultClusterCIDRValue as the value, it is an internal nodeSelector matching all
|
||||
// nodes. Only used if no ClusterCIDR selects the node.
|
||||
func defaultNodeSelector() ([]byte, error) {
|
||||
nodeSelector := &v1.NodeSelector{
|
||||
func defaultNodeSelector() *v1.NodeSelector {
|
||||
return &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
@ -746,13 +746,6 @@ func defaultNodeSelector() ([]byte, error) {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
marshalledSelector, err := nodeSelector.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return marshalledSelector, nil
|
||||
}
|
||||
|
||||
// prioritizedCIDRs returns a list of CIDRs to be allocated to the node.
|
||||
@ -883,7 +876,7 @@ func (r *multiCIDRRangeAllocator) orderedMatchingClusterCIDRs(node *v1.Node, occ
|
||||
pq := make(PriorityQueue, 0)
|
||||
|
||||
for label, clusterCIDRList := range r.cidrMap {
|
||||
labelsMatch, matchCnt, err := r.matchCIDRLabels(node, []byte(label))
|
||||
labelsMatch, matchCnt, err := r.matchCIDRLabels(node, label)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -916,11 +909,11 @@ func (r *multiCIDRRangeAllocator) orderedMatchingClusterCIDRs(node *v1.Node, occ
|
||||
}
|
||||
|
||||
// Append the catch all CIDR config.
|
||||
defaultSelector, err := defaultNodeSelector()
|
||||
defaultSelector, err := v1helper.NodeSelectorAsSelector(defaultNodeSelector())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if clusterCIDRList, ok := r.cidrMap[string(defaultSelector)]; ok {
|
||||
if clusterCIDRList, ok := r.cidrMap[defaultSelector.String()]; ok {
|
||||
matchingCIDRs = append(matchingCIDRs, clusterCIDRList...)
|
||||
}
|
||||
return matchingCIDRs, nil
|
||||
@ -928,21 +921,14 @@ func (r *multiCIDRRangeAllocator) orderedMatchingClusterCIDRs(node *v1.Node, occ
|
||||
|
||||
// matchCIDRLabels Matches the Node labels to CIDR Configs.
|
||||
// Returns true only if all the labels match, also returns the count of matching labels.
|
||||
func (r *multiCIDRRangeAllocator) matchCIDRLabels(node *v1.Node, label []byte) (bool, int, error) {
|
||||
func (r *multiCIDRRangeAllocator) matchCIDRLabels(node *v1.Node, label string) (bool, int, error) {
|
||||
var labelSet labels.Set
|
||||
var matchCnt int
|
||||
|
||||
labelsMatch := false
|
||||
selector := &v1.NodeSelector{}
|
||||
err := selector.Unmarshal(label)
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to unmarshal node selector for label %v: %v", label, err)
|
||||
return labelsMatch, 0, err
|
||||
}
|
||||
|
||||
ls, err := v1helper.NodeSelectorAsSelector(selector)
|
||||
ls, err := labels.Parse(label)
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to convert NodeSelector to labels.Selector: %v", err)
|
||||
klog.Errorf("Unable to parse label %s to labels.Selector: %v", label, err)
|
||||
return labelsMatch, 0, err
|
||||
}
|
||||
reqs, selectable := ls.Requirements()
|
||||
@ -959,7 +945,7 @@ func (r *multiCIDRRangeAllocator) matchCIDRLabels(node *v1.Node, label []byte) (
|
||||
labelsMatch = true
|
||||
}
|
||||
}
|
||||
return labelsMatch, matchCnt, err
|
||||
return labelsMatch, matchCnt, nil
|
||||
}
|
||||
|
||||
// Methods for handling ClusterCIDRs.
|
||||
@ -1234,20 +1220,20 @@ func (r *multiCIDRRangeAllocator) deleteClusterCIDR(clusterCIDR *networkingv1alp
|
||||
}
|
||||
|
||||
func (r *multiCIDRRangeAllocator) nodeSelectorKey(clusterCIDR *networkingv1alpha1.ClusterCIDR) (string, error) {
|
||||
var nodeSelector []byte
|
||||
var nodeSelector labels.Selector
|
||||
var err error
|
||||
|
||||
if clusterCIDR.Spec.NodeSelector != nil {
|
||||
nodeSelector, err = clusterCIDR.Spec.NodeSelector.Marshal()
|
||||
nodeSelector, err = v1helper.NodeSelectorAsSelector(clusterCIDR.Spec.NodeSelector)
|
||||
} else {
|
||||
nodeSelector, err = defaultNodeSelector()
|
||||
nodeSelector, err = v1helper.NodeSelectorAsSelector(defaultNodeSelector())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(nodeSelector), nil
|
||||
return nodeSelector.String(), nil
|
||||
}
|
||||
|
||||
func listClusterCIDRs(kubeClient clientset.Interface) (*networkingv1alpha1.ClusterCIDRList, error) {
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||
@ -82,8 +83,8 @@ func getTestNodeSelector(requirements []testNodeSelectorRequirement) string {
|
||||
testNodeSelector.NodeSelectorTerms = append(testNodeSelector.NodeSelectorTerms, nst)
|
||||
}
|
||||
|
||||
marshalledSelector, _ := testNodeSelector.Marshal()
|
||||
return string(marshalledSelector)
|
||||
selector, _ := v1helper.NodeSelectorAsSelector(testNodeSelector)
|
||||
return selector.String()
|
||||
}
|
||||
|
||||
func getTestCidrMap(testClusterCIDRMap map[string][]*testClusterCIDR) map[string][]*cidrset.ClusterCIDR {
|
||||
|
Loading…
Reference in New Issue
Block a user