Migrated the volumezone scheduler plugin to use contextual logging

This commit is contained in:
Mengjiao Liu 2023-03-22 11:09:31 +08:00
parent 9c6414cdfe
commit 27f84b755e
2 changed files with 11 additions and 5 deletions

View File

@ -107,6 +107,7 @@ func (pl *VolumeZone) PreFilter(ctx context.Context, cs *framework.CycleState, p
} }
func (pl *VolumeZone) getPVbyPod(ctx context.Context, pod *v1.Pod) ([]pvTopology, *framework.Status) { func (pl *VolumeZone) getPVbyPod(ctx context.Context, pod *v1.Pod) ([]pvTopology, *framework.Status) {
logger := klog.FromContext(ctx)
podPVTopologies := make([]pvTopology, 0) podPVTopologies := make([]pvTopology, 0)
for i := range pod.Spec.Volumes { for i := range pod.Spec.Volumes {
@ -154,7 +155,7 @@ func (pl *VolumeZone) getPVbyPod(ctx context.Context, pod *v1.Pod) ([]pvTopology
if value, ok := pv.ObjectMeta.Labels[key]; ok { if value, ok := pv.ObjectMeta.Labels[key]; ok {
volumeVSet, err := volumehelpers.LabelZonesToSet(value) volumeVSet, err := volumehelpers.LabelZonesToSet(value)
if err != nil { if err != nil {
klog.InfoS("Failed to parse label, ignoring the label", "label", fmt.Sprintf("%s:%s", key, value), "err", err) logger.Info("Failed to parse label, ignoring the label", "label", fmt.Sprintf("%s:%s", key, value), "err", err)
continue continue
} }
podPVTopologies = append(podPVTopologies, pvTopology{ podPVTopologies = append(podPVTopologies, pvTopology{
@ -190,6 +191,7 @@ func (pl *VolumeZone) PreFilterExtensions() framework.PreFilterExtensions {
// require calling out to the cloud provider. It seems that we are moving away // require calling out to the cloud provider. It seems that we are moving away
// from inline volume declarations anyway. // from inline volume declarations anyway.
func (pl *VolumeZone) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { func (pl *VolumeZone) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
logger := klog.FromContext(ctx)
// If a pod doesn't have any volume attached to it, the predicate will always be true. // If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case. // Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 { if len(pod.Spec.Volumes) == 0 {
@ -226,7 +228,7 @@ func (pl *VolumeZone) Filter(ctx context.Context, cs *framework.CycleState, pod
for _, pvTopology := range podPVTopologies { for _, pvTopology := range podPVTopologies {
v, ok := node.Labels[pvTopology.key] v, ok := node.Labels[pvTopology.key]
if !ok || !pvTopology.values.Has(v) { if !ok || !pvTopology.values.Has(v) {
klog.V(10).InfoS("Won't schedule pod onto node due to volume (mismatch on label key)", "pod", klog.KObj(pod), "node", klog.KObj(node), "PV", klog.KRef("", pvTopology.pvName), "PVLabelKey", pvTopology.key) logger.V(10).Info("Won't schedule pod onto node due to volume (mismatch on label key)", "pod", klog.KObj(pod), "node", klog.KObj(node), "PV", klog.KRef("", pvTopology.pvName), "PVLabelKey", pvTopology.key)
return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonConflict) return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonConflict)
} }
} }

View File

@ -26,6 +26,7 @@ import (
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake" fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake"
@ -232,7 +233,8 @@ func TestSingleZone(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) _, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
state := framework.NewCycleState() state := framework.NewCycleState()
@ -363,7 +365,8 @@ func TestMultiZone(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) _, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
state := framework.NewCycleState() state := framework.NewCycleState()
@ -487,7 +490,8 @@ func TestWithBinding(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) _, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
state := framework.NewCycleState() state := framework.NewCycleState()