kubelet: pullmanager: write to pulled record if secret matches during query

This commit is contained in:
Stanislav Láznička 2024-11-07 13:42:57 +01:00
parent 788b7abe40
commit 146515ac4a
No known key found for this signature in database
GPG Key ID: F8D8054395A1D157
2 changed files with 98 additions and 22 deletions

View File

@ -34,6 +34,11 @@ import (
var _ ImagePullManager = &PullManager{}
// writeRecordWhileMatchingLimit is a limit at which we stop writing yet-uncached
// records that we found when we were checking if an image pull must be attempted.
// This is to prevent unbounded writes in cases of high namespace turnover.
const writeRecordWhileMatchingLimit = 100
// PullManager is an implementation of the ImagePullManager. It
// tracks images pulled by the kubelet by creating records about ongoing and
// successful pulls.
@ -256,16 +261,33 @@ func (f *PullManager) MustAttemptImagePull(image, imageRef string, podSecrets []
for _, cachedSecret := range cachedCreds.KubernetesSecrets {
// we need to check hash len in case hashing failed while storing the record in the keyring
if len(cachedSecret.CredentialHash) > 0 && podSecret.CredentialHash == cachedSecret.CredentialHash {
// TODO: should we record the new secret in case it has different coordinates? If the secret rotates, we will pull unnecessarily otherwise
hashesMatch := len(cachedSecret.CredentialHash) > 0 && podSecret.CredentialHash == cachedSecret.CredentialHash
secretCoordinatesMatch := podSecret.UID == cachedSecret.UID &&
podSecret.Namespace == cachedSecret.Namespace &&
podSecret.Name == cachedSecret.Name
if hashesMatch {
if !secretCoordinatesMatch && len(cachedCreds.KubernetesSecrets) < writeRecordWhileMatchingLimit {
// While we're only matching at this point, we want to ensure this secret is considered valid in the future
// and so we make an additional write to the cache.
// writePulledRecord() is a noop in case the secret with the updated hash already appears in the cache.
if err := f.writePulledRecordIfChanged(image, imageRef, &kubeletconfiginternal.ImagePullCredentials{KubernetesSecrets: []kubeletconfiginternal.ImagePullSecret{podSecret}}); err != nil {
klog.ErrorS(err, "failed to write an image pulled record", "image", image, "imageRef", imageRef)
}
}
return false
}
if podSecret.UID == cachedSecret.UID &&
podSecret.Namespace == cachedSecret.Namespace &&
podSecret.Name == cachedSecret.Name {
// TODO: should we record the new creds in this case so that we don't pull if these are present in a different secret?
return false
if secretCoordinatesMatch {
if !hashesMatch && len(cachedCreds.KubernetesSecrets) < writeRecordWhileMatchingLimit {
// While we're only matching at this point, we want to ensure the updated credentials are considered valid in the future
// and so we make an additional write to the cache.
// writePulledRecord() is a noop in case the hash got updated in the meantime.
if err := f.writePulledRecordIfChanged(image, imageRef, &kubeletconfiginternal.ImagePullCredentials{KubernetesSecrets: []kubeletconfiginternal.ImagePullSecret{podSecret}}); err != nil {
klog.ErrorS(err, "failed to write an image pulled record", "image", image, "imageRef", imageRef)
}
return false
}
}
}
}

View File

@ -199,14 +199,16 @@ func Test_pulledRecordMergeNewCreds(t *testing.T) {
func TestFileBasedImagePullManager_MustAttemptImagePull(t *testing.T) {
tests := []struct {
name string
imagePullPolicy ImagePullPolicyEnforcer
podSecrets []kubeletconfiginternal.ImagePullSecret
image string
imageRef string
pulledFiles []string
pullingFiles []string
want bool
name string
imagePullPolicy ImagePullPolicyEnforcer
podSecrets []kubeletconfiginternal.ImagePullSecret
image string
imageRef string
pulledFiles []string
pullingFiles []string
expectedPullRecord *kubeletconfiginternal.ImagePulledRecord
want bool
expectedCacheWrite bool
}{
{
name: "image exists and is recorded with pod's exact secret",
@ -240,7 +242,18 @@ func TestFileBasedImagePullManager_MustAttemptImagePull(t *testing.T) {
image: "docker.io/testing/test:latest",
imageRef: "testimageref",
pulledFiles: []string{"sha256-b3c0cc4278800b03a308ceb2611161430df571ca733122f0a40ac8b9792a9064"},
want: false,
expectedPullRecord: &kubeletconfiginternal.ImagePulledRecord{
ImageRef: "testimageref",
CredentialMapping: map[string]kubeletconfiginternal.ImagePullCredentials{
"docker.io/testing/test": {
KubernetesSecrets: []kubeletconfiginternal.ImagePullSecret{
{UID: "testsecretuid", Namespace: "default", Name: "pull-secret", CredentialHash: "differenthash"},
},
},
},
},
want: false,
expectedCacheWrite: true,
},
{
name: "image exists and is recorded with a different secret with a different UID",
@ -279,7 +292,19 @@ func TestFileBasedImagePullManager_MustAttemptImagePull(t *testing.T) {
image: "docker.io/testing/test:latest",
imageRef: "testimageref",
pulledFiles: []string{"sha256-b3c0cc4278800b03a308ceb2611161430df571ca733122f0a40ac8b9792a9064"},
want: false,
expectedPullRecord: &kubeletconfiginternal.ImagePulledRecord{
ImageRef: "testimageref",
CredentialMapping: map[string]kubeletconfiginternal.ImagePullCredentials{
"docker.io/testing/test": {
KubernetesSecrets: []kubeletconfiginternal.ImagePullSecret{
{UID: "testsecretuid", Namespace: "default", Name: "pull-secret", CredentialHash: "testsecrethash"},
{UID: "testsecretuid", Namespace: "differentns", Name: "pull-secret", CredentialHash: "testsecrethash"},
},
},
},
},
want: false,
expectedCacheWrite: true,
},
{
name: "image exists but the pull is recorded with a different image name but with the exact same secret",
@ -409,11 +434,13 @@ func TestFileBasedImagePullManager_MustAttemptImagePull(t *testing.T) {
copyTestData(t, pullingDir, "pulling", tt.pullingFiles)
copyTestData(t, pulledDir, "pulled", tt.pulledFiles)
fsRecordAccessor := &fsPullRecordsAccessor{
pullingDir: pullingDir,
pulledDir: pulledDir,
encoder: encoder,
decoder: decoder,
fsRecordAccessor := &testWriteCountingFSPullRecordsAccessor{
fsPullRecordsAccessor: fsPullRecordsAccessor{
pullingDir: pullingDir,
pulledDir: pulledDir,
encoder: encoder,
decoder: decoder,
},
}
f := &PullManager{
@ -426,10 +453,37 @@ func TestFileBasedImagePullManager_MustAttemptImagePull(t *testing.T) {
if got := f.MustAttemptImagePull(tt.image, tt.imageRef, tt.podSecrets); got != tt.want {
t.Errorf("FileBasedImagePullManager.MustAttemptImagePull() = %v, want %v", got, tt.want)
}
if tt.expectedCacheWrite != (fsRecordAccessor.imagePulledRecordsWrites != 0) {
t.Errorf("expected zero cache writes, got: %v", fsRecordAccessor.imagePulledRecordsWrites)
}
if tt.expectedPullRecord != nil {
got, found, err := fsRecordAccessor.GetImagePulledRecord(tt.imageRef)
if err != nil && !found {
t.Fatalf("failed to get an expected ImagePulledRecord")
}
got.LastUpdatedTime = tt.expectedPullRecord.LastUpdatedTime
if !reflect.DeepEqual(got, tt.expectedPullRecord) {
t.Errorf("expected ImagePulledRecord != got; diff: %s", cmp.Diff(tt.expectedPullRecord, got))
}
}
})
}
}
type testWriteCountingFSPullRecordsAccessor struct {
imagePulledRecordsWrites int
fsPullRecordsAccessor
}
func (a *testWriteCountingFSPullRecordsAccessor) WriteImagePulledRecord(pulledRecord *kubeletconfiginternal.ImagePulledRecord) error {
a.imagePulledRecordsWrites += 1
return a.fsPullRecordsAccessor.WriteImagePulledRecord(pulledRecord)
}
func TestFileBasedImagePullManager_RecordPullIntent(t *testing.T) {
tests := []struct {
name string