mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 16:29:21 +00:00
Merge pull request #118071 from kerthcet/cleanup/use-contextual-logging-in-statefulset
Chore: Apply to use contextual logging for all loggers in statefulSet
This commit is contained in:
commit
a874f587b4
@ -26,7 +26,7 @@ structured k8s.io/apiserver/pkg/server/options/encryptionconfig/.*
|
|||||||
# The following packages have been migrated to contextual logging.
|
# The following packages have been migrated to contextual logging.
|
||||||
# Packages matched here do not have to be listed above because
|
# Packages matched here do not have to be listed above because
|
||||||
# "contextual" implies "structured".
|
# "contextual" implies "structured".
|
||||||
# TODO next: contextual k8s.io/kubernetes/pkg/scheduler/.*
|
# TODO next: contextual k8s.io/kubernetes/pkg/scheduler/.*
|
||||||
# A few files involved in startup migrated already to contextual
|
# A few files involved in startup migrated already to contextual
|
||||||
# We can't enable contextual logcheck until all are migrated
|
# We can't enable contextual logcheck until all are migrated
|
||||||
contextual k8s.io/dynamic-resource-allocation/.*
|
contextual k8s.io/dynamic-resource-allocation/.*
|
||||||
@ -50,7 +50,6 @@ contextual k8s.io/kubernetes/test/e2e/dra/.*
|
|||||||
-contextual k8s.io/kubernetes/pkg/controller/nodeipam/.*
|
-contextual k8s.io/kubernetes/pkg/controller/nodeipam/.*
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/podgc/.*
|
-contextual k8s.io/kubernetes/pkg/controller/podgc/.*
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/replicaset/.*
|
-contextual k8s.io/kubernetes/pkg/controller/replicaset/.*
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/statefulset/.*
|
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/testutil/.*
|
-contextual k8s.io/kubernetes/pkg/controller/testutil/.*
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/util/.*
|
-contextual k8s.io/kubernetes/pkg/controller/util/.*
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
|
-contextual k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
errorutils "k8s.io/apimachinery/pkg/util/errors"
|
errorutils "k8s.io/apimachinery/pkg/util/errors"
|
||||||
@ -208,6 +208,7 @@ func (spc *StatefulPodControl) DeleteStatefulPod(set *apps.StatefulSet, pod *v1.
|
|||||||
// An error is returned if something is not consistent. This is expected if the pod is being otherwise updated,
|
// An error is returned if something is not consistent. This is expected if the pod is being otherwise updated,
|
||||||
// but a problem otherwise (see usage of this method in UpdateStatefulPod).
|
// but a problem otherwise (see usage of this method in UpdateStatefulPod).
|
||||||
func (spc *StatefulPodControl) ClaimsMatchRetentionPolicy(ctx context.Context, set *apps.StatefulSet, pod *v1.Pod) (bool, error) {
|
func (spc *StatefulPodControl) ClaimsMatchRetentionPolicy(ctx context.Context, set *apps.StatefulSet, pod *v1.Pod) (bool, error) {
|
||||||
|
logger := klog.FromContext(ctx)
|
||||||
ordinal := getOrdinal(pod)
|
ordinal := getOrdinal(pod)
|
||||||
templates := set.Spec.VolumeClaimTemplates
|
templates := set.Spec.VolumeClaimTemplates
|
||||||
for i := range templates {
|
for i := range templates {
|
||||||
@ -219,7 +220,7 @@ func (spc *StatefulPodControl) ClaimsMatchRetentionPolicy(ctx context.Context, s
|
|||||||
case err != nil:
|
case err != nil:
|
||||||
return false, fmt.Errorf("Could not retrieve claim %s for %s when checking PVC deletion policy", claimName, pod.Name)
|
return false, fmt.Errorf("Could not retrieve claim %s for %s when checking PVC deletion policy", claimName, pod.Name)
|
||||||
default:
|
default:
|
||||||
if !claimOwnerMatchesSetAndPod(claim, set, pod) {
|
if !claimOwnerMatchesSetAndPod(logger, claim, set, pod) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -241,9 +242,9 @@ func (spc *StatefulPodControl) UpdatePodClaimForRetentionPolicy(ctx context.Cont
|
|||||||
case err != nil:
|
case err != nil:
|
||||||
return fmt.Errorf("Could not retrieve claim %s not found for %s when checking PVC deletion policy: %w", claimName, pod.Name, err)
|
return fmt.Errorf("Could not retrieve claim %s not found for %s when checking PVC deletion policy: %w", claimName, pod.Name, err)
|
||||||
default:
|
default:
|
||||||
if !claimOwnerMatchesSetAndPod(claim, set, pod) {
|
if !claimOwnerMatchesSetAndPod(logger, claim, set, pod) {
|
||||||
claim = claim.DeepCopy() // Make a copy so we don't mutate the shared cache.
|
claim = claim.DeepCopy() // Make a copy so we don't mutate the shared cache.
|
||||||
needsUpdate := updateClaimOwnerRefForSetAndPod(claim, set, pod)
|
needsUpdate := updateClaimOwnerRefForSetAndPod(logger, claim, set, pod)
|
||||||
if needsUpdate {
|
if needsUpdate {
|
||||||
err := spc.objectMgr.UpdateClaim(claim)
|
err := spc.objectMgr.UpdateClaim(claim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -122,7 +122,7 @@ func (ssc *defaultStatefulSetControl) performUpdate(
|
|||||||
|
|
||||||
switch {
|
switch {
|
||||||
case err != nil && statusErr != nil:
|
case err != nil && statusErr != nil:
|
||||||
klog.ErrorS(statusErr, "Could not update status", "statefulSet", klog.KObj(set))
|
logger.Error(statusErr, "Could not update status", "statefulSet", klog.KObj(set))
|
||||||
return currentRevision, updateRevision, currentStatus, err
|
return currentRevision, updateRevision, currentStatus, err
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return currentRevision, updateRevision, currentStatus, err
|
return currentRevision, updateRevision, currentStatus, err
|
||||||
@ -450,11 +450,8 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
|
|||||||
|
|
||||||
// If the Pod is in pending state then trigger PVC creation to create missing PVCs
|
// If the Pod is in pending state then trigger PVC creation to create missing PVCs
|
||||||
if isPending(replicas[i]) {
|
if isPending(replicas[i]) {
|
||||||
klog.V(4).Infof(
|
logger.V(4).Info("StatefulSet is triggering PVC Creation for pending Pod",
|
||||||
"StatefulSet %s/%s is triggering PVC creation for pending Pod %s",
|
"statefulSet", klog.KObj(set), "pod", klog.KObj(replicas[i]))
|
||||||
set.Namespace,
|
|
||||||
set.Name,
|
|
||||||
replicas[i].Name)
|
|
||||||
if err := ssc.podControl.createMissingPersistentVolumeClaims(ctx, set, replicas[i]); err != nil {
|
if err := ssc.podControl.createMissingPersistentVolumeClaims(ctx, set, replicas[i]); err != nil {
|
||||||
return &status, err
|
return &status, err
|
||||||
}
|
}
|
||||||
|
@ -172,13 +172,13 @@ func getPersistentVolumeClaimRetentionPolicy(set *apps.StatefulSet) apps.Statefu
|
|||||||
|
|
||||||
// claimOwnerMatchesSetAndPod returns false if the ownerRefs of the claim are not set consistently with the
|
// claimOwnerMatchesSetAndPod returns false if the ownerRefs of the claim are not set consistently with the
|
||||||
// PVC deletion policy for the StatefulSet.
|
// PVC deletion policy for the StatefulSet.
|
||||||
func claimOwnerMatchesSetAndPod(claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
|
func claimOwnerMatchesSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
|
||||||
policy := getPersistentVolumeClaimRetentionPolicy(set)
|
policy := getPersistentVolumeClaimRetentionPolicy(set)
|
||||||
const retain = apps.RetainPersistentVolumeClaimRetentionPolicyType
|
const retain = apps.RetainPersistentVolumeClaimRetentionPolicyType
|
||||||
const delete = apps.DeletePersistentVolumeClaimRetentionPolicyType
|
const delete = apps.DeletePersistentVolumeClaimRetentionPolicyType
|
||||||
switch {
|
switch {
|
||||||
default:
|
default:
|
||||||
klog.Errorf("Unknown policy %v; treating as Retain", set.Spec.PersistentVolumeClaimRetentionPolicy)
|
logger.Error(nil, "Unknown policy, treating as Retain", "policy", set.Spec.PersistentVolumeClaimRetentionPolicy)
|
||||||
fallthrough
|
fallthrough
|
||||||
case policy.WhenScaled == retain && policy.WhenDeleted == retain:
|
case policy.WhenScaled == retain && policy.WhenDeleted == retain:
|
||||||
if hasOwnerRef(claim, set) ||
|
if hasOwnerRef(claim, set) ||
|
||||||
@ -214,7 +214,7 @@ func claimOwnerMatchesSetAndPod(claim *v1.PersistentVolumeClaim, set *apps.State
|
|||||||
|
|
||||||
// updateClaimOwnerRefForSetAndPod updates the ownerRefs for the claim according to the deletion policy of
|
// updateClaimOwnerRefForSetAndPod updates the ownerRefs for the claim according to the deletion policy of
|
||||||
// the StatefulSet. Returns true if the claim was changed and should be updated and false otherwise.
|
// the StatefulSet. Returns true if the claim was changed and should be updated and false otherwise.
|
||||||
func updateClaimOwnerRefForSetAndPod(claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
|
func updateClaimOwnerRefForSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
|
||||||
needsUpdate := false
|
needsUpdate := false
|
||||||
// Sometimes the version and kind are not set {pod,set}.TypeMeta. These are necessary for the ownerRef.
|
// Sometimes the version and kind are not set {pod,set}.TypeMeta. These are necessary for the ownerRef.
|
||||||
// This is the case both in real clusters and the unittests.
|
// This is the case both in real clusters and the unittests.
|
||||||
@ -240,7 +240,7 @@ func updateClaimOwnerRefForSetAndPod(claim *v1.PersistentVolumeClaim, set *apps.
|
|||||||
const delete = apps.DeletePersistentVolumeClaimRetentionPolicyType
|
const delete = apps.DeletePersistentVolumeClaimRetentionPolicyType
|
||||||
switch {
|
switch {
|
||||||
default:
|
default:
|
||||||
klog.Errorf("Unknown policy %v, treating as Retain", set.Spec.PersistentVolumeClaimRetentionPolicy)
|
logger.Error(nil, "Unknown policy, treating as Retain", "policy", set.Spec.PersistentVolumeClaimRetentionPolicy)
|
||||||
fallthrough
|
fallthrough
|
||||||
case policy.WhenScaled == retain && policy.WhenDeleted == retain:
|
case policy.WhenScaled == retain && policy.WhenDeleted == retain:
|
||||||
needsUpdate = removeOwnerRef(claim, set) || needsUpdate
|
needsUpdate = removeOwnerRef(claim, set) || needsUpdate
|
||||||
|
@ -31,6 +31,8 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/klog/v2/ktesting"
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -317,6 +319,8 @@ func TestClaimOwnerMatchesSetAndPod(t *testing.T) {
|
|||||||
for _, useOtherRefs := range []bool{false, true} {
|
for _, useOtherRefs := range []bool{false, true} {
|
||||||
for _, setPodRef := range []bool{false, true} {
|
for _, setPodRef := range []bool{false, true} {
|
||||||
for _, setSetRef := range []bool{false, true} {
|
for _, setSetRef := range []bool{false, true} {
|
||||||
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
|
logger := klog.FromContext(ctx)
|
||||||
claim := v1.PersistentVolumeClaim{}
|
claim := v1.PersistentVolumeClaim{}
|
||||||
claim.Name = "target-claim"
|
claim.Name = "target-claim"
|
||||||
pod := v1.Pod{}
|
pod := v1.Pod{}
|
||||||
@ -347,7 +351,7 @@ func TestClaimOwnerMatchesSetAndPod(t *testing.T) {
|
|||||||
setOwnerRef(&claim, &randomObject2, &randomObject2.TypeMeta)
|
setOwnerRef(&claim, &randomObject2, &randomObject2.TypeMeta)
|
||||||
}
|
}
|
||||||
shouldMatch := setPodRef == tc.needsPodRef && setSetRef == tc.needsSetRef
|
shouldMatch := setPodRef == tc.needsPodRef && setSetRef == tc.needsSetRef
|
||||||
if claimOwnerMatchesSetAndPod(&claim, &set, &pod) != shouldMatch {
|
if claimOwnerMatchesSetAndPod(logger, &claim, &set, &pod) != shouldMatch {
|
||||||
t.Errorf("Bad match for %s with pod=%v,set=%v,others=%v", tc.name, setPodRef, setSetRef, useOtherRefs)
|
t.Errorf("Bad match for %s with pod=%v,set=%v,others=%v", tc.name, setPodRef, setSetRef, useOtherRefs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -417,6 +421,8 @@ func TestUpdateClaimOwnerRefForSetAndPod(t *testing.T) {
|
|||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
for _, hasPodRef := range []bool{true, false} {
|
for _, hasPodRef := range []bool{true, false} {
|
||||||
for _, hasSetRef := range []bool{true, false} {
|
for _, hasSetRef := range []bool{true, false} {
|
||||||
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
|
logger := klog.FromContext(ctx)
|
||||||
set := apps.StatefulSet{}
|
set := apps.StatefulSet{}
|
||||||
set.Name = "ss"
|
set.Name = "ss"
|
||||||
numReplicas := int32(5)
|
numReplicas := int32(5)
|
||||||
@ -441,7 +447,7 @@ func TestUpdateClaimOwnerRefForSetAndPod(t *testing.T) {
|
|||||||
setOwnerRef(&claim, &set, &set.TypeMeta)
|
setOwnerRef(&claim, &set, &set.TypeMeta)
|
||||||
}
|
}
|
||||||
needsUpdate := hasPodRef != tc.needsPodRef || hasSetRef != tc.needsSetRef
|
needsUpdate := hasPodRef != tc.needsPodRef || hasSetRef != tc.needsSetRef
|
||||||
shouldUpdate := updateClaimOwnerRefForSetAndPod(&claim, &set, &pod)
|
shouldUpdate := updateClaimOwnerRefForSetAndPod(logger, &claim, &set, &pod)
|
||||||
if shouldUpdate != needsUpdate {
|
if shouldUpdate != needsUpdate {
|
||||||
t.Errorf("Bad update for %s hasPodRef=%v hasSetRef=%v", tc.name, hasPodRef, hasSetRef)
|
t.Errorf("Bad update for %s hasPodRef=%v hasSetRef=%v", tc.name, hasPodRef, hasSetRef)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user