mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Dynamic provisioning allowed topologies scheduler work
This commit is contained in:
parent
77d996b278
commit
dffbd75f86
@ -24,6 +24,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
@ -481,7 +482,11 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// TODO: Check if the node can satisfy the topology requirement in the class
|
||||
// Check if the node can satisfy the topology requirement in the class
|
||||
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
|
||||
glog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, getPVCName(claim))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// TODO: Check if capacity of the node domain in the storage class
|
||||
// can satisfy resource requirement of given claim
|
||||
|
@ -50,6 +50,7 @@ var (
|
||||
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "1", &waitClass)
|
||||
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "2", &waitClass)
|
||||
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", pvcUnbound, "", "1", &provisionNotSupportClass)
|
||||
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", pvcUnbound, "", "1", &topoMismatchClass)
|
||||
|
||||
pvNoNode = makeTestPV("pv-no-node", "", "1G", "1", nil, waitClass)
|
||||
pvNode1a = makeTestPV("pv-node1a", "node1", "5G", "1", nil, waitClass)
|
||||
@ -74,6 +75,7 @@ var (
|
||||
waitClass = "waitClass"
|
||||
immediateClass = "immediateClass"
|
||||
provisionNotSupportClass = "provisionNotSupportedClass"
|
||||
topoMismatchClass = "topoMismatchClass"
|
||||
|
||||
nodeLabelKey = "nodeKey"
|
||||
nodeLabelValue = "node1"
|
||||
@ -112,6 +114,16 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "test-provisioner",
|
||||
AllowedTopologies: []v1.TopologySelectorTerm{
|
||||
{
|
||||
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Values: []string{nodeLabelValue, "reference-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -126,6 +138,23 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "kubernetes.io/no-provisioner",
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: topoMismatchClass,
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "test-provisioner",
|
||||
AllowedTopologies: []v1.TopologySelectorTerm{
|
||||
{
|
||||
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Values: []string{"reference-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, class := range classes {
|
||||
if err := classInformer.Informer().GetIndexer().Add(class); err != nil {
|
||||
@ -740,6 +769,11 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
||||
expectedUnbound: false,
|
||||
expectedBound: true,
|
||||
},
|
||||
"volume-topology-unsatisfied": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC},
|
||||
expectedUnbound: false,
|
||||
expectedBound: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Set VolumeScheduling and DynamicProvisioningScheduling feature gate
|
||||
|
Loading…
Reference in New Issue
Block a user