remove FakeVolumeBinderConfig and test new statues and states

This commit is contained in:
Yecheng Fu 2020-06-08 22:23:52 +08:00
parent c4138361e4
commit 814a6f2acd
3 changed files with 248 additions and 55 deletions

View File

@ -256,8 +256,6 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
// Check PV node affinity on bound volumes
if len(boundClaims) > 0 {
// TODO if node affinity does not match, we should
// UnschedulableAndUnresolvable error back to scheduler framework
boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName)
if err != nil {
return nil, err

View File

@ -36,8 +36,15 @@ go_test(
srcs = ["volume_binding_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/controller/volume/persistentvolume/util:go_default_library",
"//pkg/controller/volume/scheduling:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@ -18,99 +18,287 @@ package volumebinding
import (
"context"
"fmt"
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/utils/pointer"
)
func TestVolumeBinding(t *testing.T) {
findErr := fmt.Errorf("find err")
volState := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{},
},
},
var (
immediate = storagev1.VolumeBindingImmediate
waitForFirstConsumer = storagev1.VolumeBindingWaitForFirstConsumer
immediateSC = &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "immediate-sc",
},
VolumeBindingMode: &immediate,
}
waitSC = &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "wait-sc",
},
VolumeBindingMode: &waitForFirstConsumer,
}
)
func makePV(name string) *v1.PersistentVolume {
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
}
func addPVNodeAffinity(pv *v1.PersistentVolume, volumeNodeAffinity *v1.VolumeNodeAffinity) *v1.PersistentVolume {
pv.Spec.NodeAffinity = volumeNodeAffinity
return pv
}
func makePVC(name string, boundPVName string, storageClassName string) *v1.PersistentVolumeClaim {
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: v1.NamespaceDefault,
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: pointer.StringPtr(storageClassName),
},
}
if boundPVName != "" {
pvc.Spec.VolumeName = boundPVName
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, pvutil.AnnBindCompleted, "true")
}
return pvc
}
func makePod(name string, pvcNames []string) *v1.Pod {
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: v1.NamespaceDefault,
},
}
p.Spec.Volumes = make([]v1.Volume, 0)
for _, pvcName := range pvcNames {
p.Spec.Volumes = append(p.Spec.Volumes, v1.Volume{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
})
}
return p
}
func TestVolumeBinding(t *testing.T) {
table := []struct {
name string
pod *v1.Pod
node *v1.Node
volumeBinderConfig *scheduling.FakeVolumeBinderConfig
wantStatus *framework.Status
name string
pod *v1.Pod
node *v1.Node
pvcs []*v1.PersistentVolumeClaim
pvs []*v1.PersistentVolume
wantPreFilterStatus *framework.Status
wantStateAfterPreFilter *stateData
wantFilterStatus *framework.Status
}{
{
name: "nothing",
pod: &v1.Pod{},
node: &v1.Node{},
wantStatus: nil,
name: "pod has not pvcs",
pod: makePod("pod-a", nil),
node: &v1.Node{},
wantStateAfterPreFilter: &stateData{
skip: true,
},
},
{
name: "all bound",
pod: &v1.Pod{Spec: volState},
pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{},
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
AllBound: true,
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name),
},
pvs: []*v1.PersistentVolume{
makePV("pv-a"),
},
wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name),
},
claimsToBind: []*v1.PersistentVolumeClaim{},
},
wantStatus: nil,
},
{
name: "unbound/no matches",
pod: &v1.Pod{Spec: volState},
name: "immediate claims not bound",
pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{},
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
FindReasons: []scheduling.ConflictReason{scheduling.ErrReasonBindConflict},
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "", immediateSC.Name),
},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonBindConflict)),
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "pod has unbound immediate PersistentVolumeClaims"),
},
{
name: "unbound claims no matches",
pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{},
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "", waitSC.Name),
},
wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{},
claimsToBind: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "", waitSC.Name),
},
},
wantFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonBindConflict)),
},
{
name: "bound and unbound unsatisfied",
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
FindReasons: []scheduling.ConflictReason{scheduling.ErrReasonBindConflict, scheduling.ErrReasonNodeConflict},
pod: makePod("pod-a", []string{"pvc-a", "pvc-b"}),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "barbar",
},
},
},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonBindConflict), string(scheduling.ErrReasonNodeConflict)),
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name),
makePVC("pvc-b", "", waitSC.Name),
},
pvs: []*v1.PersistentVolume{
addPVNodeAffinity(makePV("pv-a"), &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "foo",
Operator: v1.NodeSelectorOpIn,
Values: []string{"bar"},
},
},
},
},
},
}),
},
wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name),
},
claimsToBind: []*v1.PersistentVolumeClaim{
makePVC("pvc-b", "", waitSC.Name),
},
},
wantFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonNodeConflict), string(scheduling.ErrReasonBindConflict)),
},
{
name: "unbound/found matches/bind succeeds",
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{},
wantStatus: nil,
name: "pvc not found",
pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{},
wantPreFilterStatus: framework.NewStatus(framework.Error, `error getting PVC "default/pvc-a": could not find v1.PersistentVolumeClaim "default/pvc-a"`),
wantFilterStatus: nil,
},
{
name: "predicate error",
pod: &v1.Pod{Spec: volState},
name: "pv not found",
pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{},
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
FindErr: findErr,
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name),
},
wantStatus: framework.NewStatus(framework.Error, findErr.Error()),
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name),
},
claimsToBind: []*v1.PersistentVolumeClaim{},
},
wantFilterStatus: framework.NewStatus(framework.Error, `could not find v1.PersistentVolume "pv-a"`),
},
}
for _, item := range table {
t.Run(item.name, func(t *testing.T) {
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(item.node)
fakeVolumeBinder := scheduling.NewFakeVolumeBinder(item.volumeBinderConfig)
p := &VolumeBinding{
Binder: fakeVolumeBinder,
ctx := context.Background()
client := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
opts := []framework.Option{
framework.WithClientSet(client),
framework.WithInformerFactory(informerFactory),
}
state := framework.NewCycleState()
p.PreFilter(context.Background(), state, item.pod)
gotStatus := p.Filter(context.Background(), state, item.pod, nodeInfo)
if !reflect.DeepEqual(gotStatus, item.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, item.wantStatus)
fh, err := framework.NewFramework(nil, nil, nil, opts...)
if err != nil {
t.Fatal(err)
}
pl, err := New(&config.VolumeBindingArgs{
BindTimeoutSeconds: 300,
}, fh)
if err != nil {
t.Fatal(err)
}
// Start informer factory after initialization
informerFactory.Start(ctx.Done())
// Feed testing data and wait for them to be synced
client.StorageV1().StorageClasses().Create(ctx, immediateSC, metav1.CreateOptions{})
client.StorageV1().StorageClasses().Create(ctx, waitSC, metav1.CreateOptions{})
if item.node != nil {
client.CoreV1().Nodes().Create(ctx, item.node, metav1.CreateOptions{})
}
if len(item.pvcs) > 0 {
for _, pvc := range item.pvcs {
client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
}
}
if len(item.pvs) > 0 {
for _, pv := range item.pvs {
client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
}
}
caches := informerFactory.WaitForCacheSync(ctx.Done())
for _, synced := range caches {
if !synced {
t.Errorf("error waiting for informer cache sync")
}
}
// Verify
p := pl.(*VolumeBinding)
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(item.node)
state := framework.NewCycleState()
t.Logf("call PreFilter and check status")
gotPreFilterStatus := p.PreFilter(ctx, state, item.pod)
if !reflect.DeepEqual(gotPreFilterStatus, item.wantPreFilterStatus) {
t.Errorf("filter prefilter status does not match: %v, want: %v", gotPreFilterStatus, item.wantPreFilterStatus)
}
if !gotPreFilterStatus.IsSuccess() {
// scheduler framework will skip Filter if PreFilter fails
return
}
t.Logf("check state after prefilter phase")
stateData, err := getStateData(state)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(stateData, item.wantStateAfterPreFilter) {
t.Errorf("state got after prefilter does not match: %v, want: %v", stateData, item.wantStateAfterPreFilter)
}
t.Logf("call Filter and check status")
gotStatus := p.Filter(ctx, state, item.pod, nodeInfo)
if !reflect.DeepEqual(gotStatus, item.wantFilterStatus) {
t.Errorf("filter status does not match: %v, want: %v", gotStatus, item.wantFilterStatus)
}
})
}
}