Compare commits

...

9 Commits

Author SHA1 Message Date
Miloslav Trmač
fd3ba47e00 Merge pull request #2800 from TomSweeneyRedHat/dev/tsweeney/skopeo-v1.22.0-1
[release-1.22] Skopeo v1.22.0
2026-02-11 18:33:16 +01:00
Tom Sweeney
23dddaad3d [release-1.22] Bump Skopeo to v1.22.0
Bump Skopeo to v1.22.0.  This will go out alongside
Buildah v1.43.0 and Podman v5.8.0 in RHEL 9.8/10.2

Signed-off-by: Tom Sweeney <tomsweney@redhat.com>
2026-02-10 16:32:38 -05:00
Miloslav Trmač
0b40dee09d [release-1.22] Update tests for a changed error message
008d971bc0
changed the text we were looking for.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
(cherry picked from commit a64f780f83)
Signed-off-by: Tom Sweeney <tomsweney@redhat.com>
2026-02-10 16:29:30 -05:00
Miloslav Trmač
3b6f25390b [release-1.22] Document the default of --retry-times
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
Signed-off-by: Tom Sweeney <tomsweney@redhat.com>
2026-02-10 16:26:49 -05:00
promalert
99298b6a80 [release-1.22] chore: fix function name in comment
Signed-off-by: promalert <promalert@outlook.com>
Signed-off-by: Tom Sweeney <tomsweney@redhat.com>
2026-02-10 16:26:49 -05:00
Tom Sweeney
d0289b2540 [release-1.22] Bump common 0.67.0, image 5.39.1, storage 1.62.0
Bump the following in preparation of the Skopeo v1.22.0 release in
conjunction with Podman v5.8 and Buildah v1.43

c/common v0.67.0
c/image v5.39.1
c/storage v1.62.0

Signed-off-by: Tom Sweeney <tomsweney@redhat.com>
2026-02-10 16:14:35 -05:00
Miloslav Trmač
48a05d71f0 Merge pull request #2767 from lsm5/release-1.21-packit-backport
[release-1.21] Packit: use `post-modifications` hook to update downstream TMT plan
2025-12-08 19:15:56 +01:00
Lokesh Mandvekar
dec587b480 Packit: use post-modifications hook to update downstream TMT plan
`prepare-files` action was interfering with spec file update which caused
https://github.com/containers/skopeo/issues/2760 .

`post-modifications` needs to be limited to the propose_downstream job or
else it will interfere with upstream PR copr builds.

Also, s/PACKIT_PROJECT_TAG/PACKIT_PROJECT_VERSION/ .

Co-authored-by: Nikola Forró <nforro@redhat.com>
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>

(cherry picked from commit e26a4237fc)
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2025-12-08 09:03:51 -05:00
Miloslav Trmač
8bd9c541f0 Bump to 1.21.0
- New support for creating "simple signing" signatures using Sequoia-PGP,
  dependent on a build tag that enables it
- New option (skopeo copy --force-compression-format)
- New option --user-agent-prefix
- TLS options on the command line of (skopeo sync) take precedence
  over options in YAML

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2025-11-28 11:28:17 +01:00
31 changed files with 700 additions and 281 deletions

View File

@@ -22,8 +22,6 @@ packages:
# https://packit.dev/docs/configuration/actions
actions:
get-current-version: bash -c 'grep "^const Version" version/version.go | cut -f2 -d\" | tr \- \~'
prepare-files: >-
bash -c "sed -i 's/^\(\s*\)ref: .*/\1ref: \"${PACKIT_PROJECT_TAG}\"/' ${PACKIT_DOWNSTREAM_REPO}/plans/main.fmf"
srpm_build_deps:
- make
@@ -121,6 +119,9 @@ jobs:
update_release: false
dist_git_branches: &fedora_targets
- fedora-all
actions:
post-modifications: >-
bash -c "sed -i 's/^\(\s*\)ref: .*/\1ref: \"v${PACKIT_PROJECT_VERSION}\"/' ${PACKIT_DOWNSTREAM_REPO}/plans/main.fmf"
# Sync to CentOS Stream
# FIXME: Switch trigger whenever we're ready to update CentOS Stream via

View File

@@ -214,7 +214,7 @@ Precompute digests to ensure layers are not uploaded that already exist on the d
**--retry-times**
The number of times to retry.
The number of times to retry. By default, no retries are attempted.
**--retry-delay**

View File

@@ -70,7 +70,7 @@ Bearer token for accessing the registry.
**--retry-times**
The number of times to retry.
The number of times to retry. By default, no retries are attempted.
**--retry-delay**

View File

@@ -69,7 +69,7 @@ Registry token for accessing the registry.
**--retry-times**
The number of times to retry.
The number of times to retry. By default, no retries are attempted.
**--retry-delay**

View File

@@ -43,7 +43,7 @@ Bearer token for accessing the registry.
**--retry-times**
The number of times to retry.
The number of times to retry. By default, no retries are attempted.
**--retry-delay**

View File

@@ -134,7 +134,7 @@ Only the first line will be read. A passphrase stored in a file is of questionab
**--retry-times**
The number of times to retry.
The number of times to retry. By default, no retries are attempted.
**--retry-delay**

6
go.mod
View File

@@ -17,9 +17,9 @@ require (
github.com/spf13/cobra v1.10.1
github.com/spf13/pflag v1.0.10
github.com/stretchr/testify v1.11.1
go.podman.io/common v0.66.0
go.podman.io/image/v5 v5.38.0
go.podman.io/storage v1.61.0
go.podman.io/common v0.67.0
go.podman.io/image/v5 v5.39.1
go.podman.io/storage v1.62.0
golang.org/x/term v0.36.0
gopkg.in/yaml.v3 v3.0.1
)

12
go.sum
View File

@@ -276,12 +276,12 @@ go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKr
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.podman.io/common v0.66.0 h1:KElE3HKLFdMdJL+jv5ExBiX2Dh4Qcv8ovmzaBGRsyZM=
go.podman.io/common v0.66.0/go.mod h1:aNd2a0S7pY+fx1X5kpQYuF4hbwLU8ZOccuVrhu7h1Xc=
go.podman.io/image/v5 v5.38.0 h1:aUKrCANkPvze1bnhLJsaubcfz0d9v/bSDLnwsXJm6G4=
go.podman.io/image/v5 v5.38.0/go.mod h1:hSIoIUzgBnmc4DjoIdzk63aloqVbD7QXDMkSE/cvG90=
go.podman.io/storage v1.61.0 h1:5hD/oyRYt1f1gxgvect+8syZBQhGhV28dCw2+CZpx0Q=
go.podman.io/storage v1.61.0/go.mod h1:A3UBK0XypjNZ6pghRhuxg62+2NIm5lcUGv/7XyMhMUI=
go.podman.io/common v0.67.0 h1:6Ci5oU1ek08OAxBLkHEqSyWmjNh5zf03PRqZ04cPdwU=
go.podman.io/common v0.67.0/go.mod h1:sB9L8LMtmf5Hpek2qkEyRrcSzpb+gYpG3vq5Khima3U=
go.podman.io/image/v5 v5.39.1 h1:loIw4qHzZzBlUguYZau40u8HbR5MrTPQhwT4Hy6sCm0=
go.podman.io/image/v5 v5.39.1/go.mod h1:SlaR6Pra1ATIx4BcuZ16oafb3QcCHISaKcJbtlN/G/0=
go.podman.io/storage v1.62.0 h1:0QjX1XlzVmbiaulb+aR/CG6p9+pzaqwIeZPe3tEjHbY=
go.podman.io/storage v1.62.0/go.mod h1:A3UBK0XypjNZ6pghRhuxg62+2NIm5lcUGv/7XyMhMUI=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=

View File

@@ -780,10 +780,10 @@ func (s *copySuite) TestCopySignatures() {
// Verify that mis-signed images are rejected
assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/personal:personal", "atomic:localhost:5006/myns/official:attack")
assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/personal:attack")
// "Invalid GPG signature" is reported by the gpgme mechanism; "Missing key: $fingerprint" by Sequoia.
assertSkopeoFails(t, ".*Source image rejected: (Invalid GPG signature|Missing key:).*",
// "Invalid GPG signature" is reported by the gpgme mechanism; "Missing key: $fingerprint" or "Missing key $fingerprint" by Sequoia.
assertSkopeoFails(t, ".*Source image rejected: (Invalid GPG signature|Missing key).*",
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:attack", dirDest)
assertSkopeoFails(t, ".*Source image rejected: (Invalid GPG signature|Missing key:).*",
assertSkopeoFails(t, ".*Source image rejected: (Invalid GPG signature|Missing key).*",
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:attack", dirDest)
// Verify that signed identity is verified.
@@ -796,8 +796,8 @@ func (s *copySuite) TestCopySignatures() {
// Verify that cosigning requirements are enforced
assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned")
// "Invalid GPG signature" is reported by the gpgme mechanism; "Missing key: $fingerprint" by Sequoia.
assertSkopeoFails(t, ".*Source image rejected: (Invalid GPG signature|Missing key:).*",
// "Invalid GPG signature" is reported by the gpgme mechanism; "Missing key: $fingerprint" or "Missing key $fingerprint" by Sequoia.
assertSkopeoFails(t, ".*Source image rejected: (Invalid GPG signature|Missing key).*",
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/cosigned:cosigned", dirDest)
assertSkopeoSucceeds(t, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned")
@@ -842,8 +842,8 @@ func (s *copySuite) TestCopyDirSignatures() {
// Verify that correct images are accepted
assertSkopeoSucceeds(t, "", "--policy", policy, "copy", topDirDest+"/restricted/official", topDirDest+"/dest")
// ... and that mis-signed images are rejected.
// "Invalid GPG signature" is reported by the gpgme mechanism; "Missing key: $fingerprint" by Sequoia.
assertSkopeoFails(t, ".*Source image rejected: (Invalid GPG signature|Missing key:).*",
// "Invalid GPG signature" is reported by the gpgme mechanism; "Missing key: $fingerprint" or "Missing key $fingerprint" by Sequoia.
assertSkopeoFails(t, ".*Source image rejected: (Invalid GPG signature|Missing key).*",
"--policy", policy, "copy", topDirDest+"/restricted/personal", topDirDest+"/dest")
// Verify that the signed identity is verified.

View File

@@ -207,7 +207,7 @@ func (cluster *openshiftCluster) startRegistry(t *testing.T) {
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(t, 5006, schema2Config))
}
// ocLogin runs (oc login) and (oc new-project) on the cluster, or terminates on failure.
// ocLoginToProject runs (oc login) and (oc new-project) on the cluster, or terminates on failure.
func (cluster *openshiftCluster) ocLoginToProject(t *testing.T) {
t.Logf("oc login")
cmd := cluster.clusterCmd(nil, "oc", "login", "--certificate-authority=openshift.local.config/master/ca.crt", "-u", "myuser", "-p", "mypw", "https://localhost:8443")

View File

@@ -137,7 +137,7 @@ END_PUSH
# The table below lists the paths to fetch, and the expected errors (or
# none, if we expect them to pass).
#
# "Invalid GPG signature" is reported by the gpgme mechanism; "Missing key: $fingerprint" by Sequoia.
# "Invalid GPG signature" is reported by the gpgme mechanism; "Missing key: $fingerprint" or "Missing key $fingerprint" by Sequoia.
while read path expected_error; do
expected_rc=
if [[ -n $expected_error ]]; then
@@ -156,7 +156,7 @@ END_PUSH
fi
done <<END_TESTS
/myns/alice:signed
/myns/bob:signedbyalice (Invalid GPG signature|Missing key:)
/myns/bob:signedbyalice (Invalid GPG signature|Missing key)
/myns/alice:unsigned Signature for identity \\\\\\\\"localhost:5000/myns/alice:signed\\\\\\\\" is not accepted
/myns/carol:latest Running image docker://localhost:5000/myns/carol:latest is rejected by policy.
/open/forall:latest

View File

@@ -386,14 +386,14 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx)
if err != nil {
logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err)
logrus.Debugf("Unable to create destination image %s source: %v", transports.ImageName(ic.c.dest.Reference()), err)
return nil, nil
}
defer destImageSource.Close()
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
if err != nil {
logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
logrus.Debugf("Unable to get destination image %s/%s manifest: %v", transports.ImageName(destImageSource.Reference()), targetInstance, err)
return nil, nil
}

View File

@@ -916,6 +916,11 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
if c.sys != nil && c.sys.DockerProxyURL != nil {
tr.Proxy = http.ProxyURL(c.sys.DockerProxyURL)
}
if c.sys != nil && c.sys.DockerProxy != nil {
tr.Proxy = func(request *http.Request) (*url.URL, error) {
return c.sys.DockerProxy(request.URL)
}
}
c.client = &http.Client{Transport: tr}
ping := func(scheme string) error {

View File

@@ -50,9 +50,8 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (private.Im
}
} else {
index = &imgspecv1.Index{
Versioned: imgspec.Versioned{
SchemaVersion: 2,
},
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Annotations: make(map[string]string),
}
}

View File

@@ -179,9 +179,8 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
// Populate a manifest with the configuration blob and the layers.
manifest := imgspecv1.Manifest{
Versioned: imgspecs.Versioned{
SchemaVersion: 2,
},
Versioned: imgspecs.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageManifest,
Config: imgspecv1.Descriptor{
Digest: configID,
Size: int64(len(configBytes)),

View File

@@ -668,6 +668,10 @@ type SystemContext struct {
DockerRegistryPushPrecomputeDigests bool
// DockerProxyURL specifies proxy configuration schema (like socks5://username:password@ip:port)
DockerProxyURL *url.URL
// DockerProxy is a function that determines the proxy URL for a given request URL.
// If set, this takes precedence over DockerProxyURL. The function should return the proxy URL to use,
// or nil if no proxy should be used for the given request.
DockerProxy func(reqURL *url.URL) (*url.URL, error)
// === docker/daemon.Transport overrides ===
// A directory containing a CA certificate (ending with ".crt"),

View File

@@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 38
VersionMinor = 39
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

View File

@@ -1 +1 @@
1.61.0
1.62.0

View File

@@ -47,7 +47,6 @@ type CreateOpts struct {
MountLabel string
StorageOpt map[string]string
*idtools.IDMappings
ignoreChownErrors bool
}
// MountOpts contains optional arguments for Driver.Get() methods.
@@ -184,7 +183,7 @@ type DiffDriver interface {
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The io.Reader must be an uncompressed stream.
ApplyDiff(id string, parent string, options ApplyDiffOpts) (size int64, err error)
ApplyDiff(id string, options ApplyDiffOpts) (size int64, err error)
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
@@ -299,6 +298,19 @@ type DriverWithDiffer interface {
DifferTarget(id string) (string, error)
}
// ApplyDiffStaging is an interface for driver who can apply the diff without holding the main storage lock.
// This API is experimental and can be changed without bumping the major version number.
type ApplyDiffStaging interface {
// StartStagingDiffToApply applies the new layer into a temporary directory.
// It returns a CleanupTempDirFunc which can be nil or set regardless if the function return an error or not.
// StagedAddition is only set when there is no error returned and the int64 value returns the size of the layer.
// This can be done without holding the storage lock, if a parent is given the caller must check for existence
// beforehand while holding a lock.
StartStagingDiffToApply(parent string, options ApplyDiffOpts) (tempdir.CleanupTempDirFunc, *tempdir.StagedAddition, int64, error)
// CommitStagedLayer commits the staged layer from StartStagingDiffToApply(). This must be done while holding the storage lock.
CommitStagedLayer(id string, commit *tempdir.StagedAddition) error
}
// Capabilities defines a list of capabilities a driver may implement.
// These capabilities are not required; however, they do determine how a
// graphdriver can be used.

View File

@@ -151,7 +151,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) (int64, error) {
func (gdw *NaiveDiffDriver) ApplyDiff(id string, options ApplyDiffOpts) (int64, error) {
driver := gdw.ProtoDriver
if options.Mappings == nil {

View File

@@ -995,6 +995,49 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return d.create(id, parent, opts, true)
}
// getLayerPermissions returns the base permissions to use for the layer directories.
// The first return value is the idPair to create the possible parent directories with.
// The second return value is the mode how it should be stored on disk.
// The third return value is the mode the layer expects to have which may be stored
// in an xattr when using forceMask, without forceMask both values are the same.
func (d *Driver) getLayerPermissions(parent string, uidMaps, gidMaps []idtools.IDMap) (idtools.IDPair, idtools.Stat, idtools.Stat, error) {
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return idtools.IDPair{}, idtools.Stat{}, idtools.Stat{}, err
}
idPair := idtools.IDPair{
UID: rootUID,
GID: rootGID,
}
st := idtools.Stat{IDs: idPair, Mode: defaultPerms}
if parent != "" {
parentBase := d.dir(parent)
parentDiff := filepath.Join(parentBase, "diff")
if xSt, err := idtools.GetContainersOverrideXattr(parentDiff); err == nil {
st = xSt
} else {
systemSt, err := system.Stat(parentDiff)
if err != nil {
return idtools.IDPair{}, idtools.Stat{}, idtools.Stat{}, err
}
st.IDs.UID = int(systemSt.UID())
st.IDs.GID = int(systemSt.GID())
st.Mode = os.FileMode(systemSt.Mode())
}
}
forcedSt := st
if d.options.forceMask != nil {
forcedSt.IDs = idPair
forcedSt.Mode = *d.options.forceMask
}
return idPair, forcedSt, st, nil
}
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) {
dir, homedir, _ := d.dir2(id, readOnly)
@@ -1013,38 +1056,15 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
return err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
idPair, forcedSt, st, err := d.getLayerPermissions(parent, uidMaps, gidMaps)
if err != nil {
return err
}
idPair := idtools.IDPair{
UID: rootUID,
GID: rootGID,
}
if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
return err
}
st := idtools.Stat{IDs: idPair, Mode: defaultPerms}
if parent != "" {
parentBase := d.dir(parent)
parentDiff := filepath.Join(parentBase, "diff")
if xSt, err := idtools.GetContainersOverrideXattr(parentDiff); err == nil {
st = xSt
} else {
systemSt, err := system.Stat(parentDiff)
if err != nil {
return err
}
st.IDs.UID = int(systemSt.UID())
st.IDs.GID = int(systemSt.GID())
st.Mode = os.FileMode(systemSt.Mode())
}
}
if err := fileutils.Lexists(dir); err == nil {
logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir)
// Dont just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir,
@@ -1088,12 +1108,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
}
}
forcedSt := st
if d.options.forceMask != nil {
forcedSt.IDs = idPair
forcedSt.Mode = *d.options.forceMask
}
diff := path.Join(dir, "diff")
if err := idtools.MkdirAs(diff, forcedSt.Mode, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
return err
@@ -1356,6 +1370,14 @@ func (d *Driver) getTempDirRoot(id string) string {
return filepath.Join(d.home, tempDirName)
}
// getTempDirRootForNewLayer returns the correct temp directory root based on where
// the layer should be created.
//
// This must be kept in sync with GetTempDirRootDirs().
func (d *Driver) getTempDirRootForNewLayer() string {
return filepath.Join(d.homeDirForImageStore(), tempDirName)
}
func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) {
tempDirRoot := d.getTempDirRoot(id)
t, err := tempdir.NewTempDir(tempDirRoot)
@@ -2369,31 +2391,94 @@ func (d *Driver) DifferTarget(id string) (string, error) {
return d.getDiffPath(id)
}
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
if !d.isParent(id, parent) {
if d.options.ignoreChownErrors {
options.IgnoreChownErrors = d.options.ignoreChownErrors
}
if d.options.forceMask != nil {
options.ForceMask = d.options.forceMask
}
return d.naiveDiff.ApplyDiff(id, parent, options)
// StartStagingDiffToApply applies the new layer into a temporary directory.
// It returns a CleanupTempDirFunc which can be nil or set regardless if the function return an error or not.
// StagedAddition is only set when there is no error returned and the int64 value returns the size of the layer.
// This can be done without holding the storage lock, if a parent is given the caller must check for existence
// beforehand while holding a lock.
//
// This API is experimental and can be changed without bumping the major version number.
func (d *Driver) StartStagingDiffToApply(parent string, options graphdriver.ApplyDiffOpts) (tempdir.CleanupTempDirFunc, *tempdir.StagedAddition, int64, error) {
tempDirRoot := d.getTempDirRootForNewLayer()
t, err := tempdir.NewTempDir(tempDirRoot)
if err != nil {
return nil, nil, -1, err
}
sa, err := t.StageAddition()
if err != nil {
return t.Cleanup, nil, -1, err
}
_, forcedSt, st, err := d.getLayerPermissions(parent, options.Mappings.UIDs(), options.Mappings.GIDs())
if err != nil {
// If we have a ENOENT it means the parent was removed which can happen as we are unlocked here.
// In this case also wrap ErrLayerUnknown which some callers can handle to retry after recreating the parent.
if errors.Is(err, fs.ErrNotExist) {
err = fmt.Errorf("parent layer %q: %w: %w", parent, graphdriver.ErrLayerUnknown, err)
}
return t.Cleanup, nil, -1, err
}
if err := idtools.MkdirAs(sa.Path, forcedSt.Mode, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
return t.Cleanup, nil, -1, err
}
if d.options.forceMask != nil {
st.Mode |= os.ModeDir
if err := idtools.SetContainersOverrideXattr(sa.Path, st); err != nil {
return t.Cleanup, nil, -1, err
}
}
size, err := d.applyDiff(sa.Path, options)
if err != nil {
return t.Cleanup, nil, -1, err
}
return t.Cleanup, sa, size, nil
}
// CommitStagedLayer that was created with StartStagingDiffToApply().
//
// This API is experimental and can be changed without bumping the major version number.
func (d *Driver) CommitStagedLayer(id string, sa *tempdir.StagedAddition) error {
applyDir, err := d.getDiffPath(id)
if err != nil {
return err
}
// The os.Rename() function used by CommitFunc errors when the target directory already
// exists, as such delete the dir. The create() function creates it and it would be more
// complicated to code in a way that it didn't create it.
if err := os.Remove(applyDir); err != nil {
return err
}
return sa.Commit(applyDir)
}
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
applyDir, err := d.getDiffPath(id)
if err != nil {
return 0, err
}
return d.applyDiff(applyDir, options)
}
// ApplyDiff applies the new layer into a root.
// This can run concurrently with any other driver operations, as such it is the
// callers responsibility to ensure the target path passed is safe to use if that is the case.
func (d *Driver) applyDiff(target string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
idMappings := options.Mappings
if idMappings == nil {
idMappings = &idtools.IDMappings{}
}
applyDir, err := d.getDiffPath(id)
if err != nil {
return 0, err
}
logrus.Debugf("Applying tar in %s", applyDir)
logrus.Debugf("Applying tar in %s", target)
// Overlay doesn't need the parent id to apply the diff
if err := untar(options.Diff, applyDir, &archive.TarOptions{
if err := untar(options.Diff, target, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
IgnoreChownErrors: d.options.ignoreChownErrors,
@@ -2404,7 +2489,7 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
return 0, err
}
return directory.Size(applyDir)
return directory.Size(target)
}
func (d *Driver) getComposefsData(id string) string {

View File

@@ -1,52 +0,0 @@
package graphdriver
import (
"github.com/sirupsen/logrus"
"go.podman.io/storage/pkg/idtools"
)
// TemplateDriver is just barely enough of a driver that we can implement a
// naive version of CreateFromTemplate on top of it.
type TemplateDriver interface {
DiffDriver
CreateReadWrite(id, parent string, opts *CreateOpts) error
Create(id, parent string, opts *CreateOpts) error
Remove(id string) error
}
// CreateFromTemplate creates a layer with the same contents and parent as
// another layer. Internally, it may even depend on that other layer
// continuing to exist, as if it were actually a child of the child layer.
func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error {
var err error
if readWrite {
err = d.CreateReadWrite(id, parent, opts)
} else {
err = d.Create(id, parent, opts)
}
if err != nil {
return err
}
diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel)
if err != nil {
if err2 := d.Remove(id); err2 != nil {
logrus.Errorf("Removing layer %q: %v", id, err2)
}
return err
}
defer diff.Close()
applyOptions := ApplyDiffOpts{
Diff: diff,
Mappings: templateIDMappings,
MountLabel: opts.MountLabel,
IgnoreChownErrors: opts.ignoreChownErrors,
}
if _, err = d.ApplyDiff(id, parent, applyOptions); err != nil {
if err2 := d.Remove(id); err2 != nil {
logrus.Errorf("Removing layer %q: %v", id, err2)
}
return err
}
return nil
}

View File

@@ -132,11 +132,11 @@ func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idt
}
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
func (d *Driver) ApplyDiff(id string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
if d.ignoreChownErrors {
options.IgnoreChownErrors = d.ignoreChownErrors
}
return d.naiveDiff.ApplyDiff(id, parent, options)
return d.naiveDiff.ApplyDiff(id, options)
}
// CreateReadWrite creates a layer that is writable for use as a container

View File

@@ -6,6 +6,7 @@ import (
"io/fs"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/sirupsen/logrus"
@@ -91,6 +92,26 @@ type TempDir struct {
counter uint64
}
// StagedAddition is a temporary object which holds the information of where to
// put the data into and then use Commit() to move the data into the final location.
type StagedAddition struct {
// Path is the temporary path. The path is not created so caller must create
// a file or directory on it in order to use Commit(). The path is only valid
// until Commit() is called or until the TempDir instance Cleanup() method is used.
Path string
}
// Commit the staged content into its final destination by using os.Rename().
// That means the dest must be on the same on the same fs as the root directory
// that was given to NewTempDir() and the dest must not exist yet.
// Commit must only be called once per instance returned from the
// StagedAddition() call.
func (s *StagedAddition) Commit(destination string) error {
err := os.Rename(s.Path, destination)
s.Path = "" // invalidate Path to avoid reuse
return err
}
// CleanupTempDirFunc is a function type that can be returned by operations
// which need to perform cleanup actions later.
type CleanupTempDirFunc func() error
@@ -190,6 +211,23 @@ func NewTempDir(rootDir string) (*TempDir, error) {
return td, nil
}
// StageAddition creates a new temporary path that is returned as field in the StagedAddition
// struct. The returned type StagedAddition has a Commit() function to move the content from
// the temporary location to the final one.
//
// The caller MUST call Commit() before Cleanup() is called on the TempDir, otherwise the
// staged content will be deleted and the Commit() will fail.
// If the TempDir has been cleaned up already, this method will return an error.
func (td *TempDir) StageAddition() (*StagedAddition, error) {
if td.tempDirLock == nil {
return nil, fmt.Errorf("temp dir instance not initialized or already cleaned up")
}
fileName := strconv.FormatUint(td.counter, 10) + "-addition"
tmpAddPath := filepath.Join(td.tempDirPath, fileName)
td.counter++
return &StagedAddition{Path: tmpAddPath}, nil
}
// StageDeletion moves the specified file into the instance's temporary directory.
// The temporary directory must already exist (created during NewTempDir).
// Files are renamed with a counter-based prefix (e.g., "0-filename", "1-filename") to ensure uniqueness.

408
vendor/go.podman.io/storage/layers.go generated vendored
View File

@@ -31,6 +31,7 @@ import (
"go.podman.io/storage/pkg/ioutils"
"go.podman.io/storage/pkg/lockfile"
"go.podman.io/storage/pkg/mount"
"go.podman.io/storage/pkg/pools"
"go.podman.io/storage/pkg/stringid"
"go.podman.io/storage/pkg/system"
"go.podman.io/storage/pkg/tarlog"
@@ -195,11 +196,53 @@ type DiffOptions struct {
Compression *archive.Compression
}
// stagedLayerOptions are the options passed to .create to populate a staged
// layerCreationContents are the options passed to .create to populate a staged
// layer
type stagedLayerOptions struct {
type layerCreationContents struct {
// These are used via the zstd:chunked pull paths
DiffOutput *drivers.DriverWithDifferOutput
DiffOptions *drivers.ApplyDiffWithDifferOpts
// stagedLayerExtraction is used by the normal tar layer extraction.
stagedLayerExtraction *maybeStagedLayerExtraction
}
// maybeStagedLayerExtraction is a helper to encapsulate details around extracting
// a layer potentially before we even take a look if the driver implements the
// ApplyDiffStaging interface.
// This should be initialized with layerStore.newMaybeStagedLayerExtraction()
type maybeStagedLayerExtraction struct {
// diff contains the tar archive, can be compressed, must be non nil, but can be at EOF when the content was already staged
diff io.Reader
// staging interface of the storage driver, set when the driver supports staging and nil otherwise
staging drivers.ApplyDiffStaging
// result is a placeholder for the applyDiff() result so we can pass that down the stack easily.
// If result is not nil the layer was staged successfully, if this is set stagedTarSplit and
// stagedLayer must be set as well.
result *applyDiffResult
// stagedTarSplit is the temp file where we staged the tar split file
stagedTarSplit *tempdir.StagedAddition
// stagedLayer is the temp directory where we staged the extracted layer content
stagedLayer *tempdir.StagedAddition
// cleanupFuncs contains the set of tempdir cleanup function that get executed in cleanup()
cleanupFuncs []tempdir.CleanupTempDirFunc
}
type applyDiffResult struct {
compressedDigest digest.Digest
compressedSize int64
compressionType archive.Compression
uncompressedDigest digest.Digest
uncompressedSize int64
// size of the data, including the full size of sparse files, and excluding all metadata
// It is neither compressedSize nor uncompressedSize.
// The use case for this seems unclear, it gets returned in PutLayer() but in the Podman
// stack at least that value is never used so maybe we can look into removing this.
size int64
uids []uint32
gids []uint32
}
// roLayerStore wraps a graph driver, adding the ability to refer to layers by
@@ -216,6 +259,11 @@ type roLayerStore interface {
// stopReading releases locks obtained by startReading.
stopReading()
// checkIdOrNameConflict checks if the id or names are already in use and returns an
// error in that case. As Special case if the layer already exists it returns it as
// well together with the error.
checkIdOrNameConflict(id string, names []string) (*Layer, error)
// Exists checks if a layer with the specified name or ID is known.
Exists(id string) bool
@@ -288,7 +336,7 @@ type rwLayerStore interface {
// underlying drivers do not themselves distinguish between writeable
// and read-only layers. Returns the new layer structure and the size of the
// diff which was applied to its parent to initialize its contents.
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error)
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, contents *layerCreationContents) (*Layer, int64, error)
// updateNames modifies names associated with a layer based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error
@@ -354,6 +402,14 @@ type rwLayerStore interface {
// Dedup deduplicates layers in the store.
dedup(drivers.DedupArgs) (drivers.DedupResult, error)
// newMaybeStagedLayerExtraction initializes a new maybeStagedLayerExtraction. The caller
// must call maybeStagedLayerExtraction.cleanup() to remove any temporary files.
newMaybeStagedLayerExtraction(diff io.Reader) *maybeStagedLayerExtraction
// stageWithUnlockedStore stages the layer content without needing the store locked.
// If the driver does not support stage addition then this is a NOP and does nothing.
stageWithUnlockedStore(m *maybeStagedLayerExtraction, parent string, options *LayerOptions) error
}
type multipleLockFile struct {
@@ -1307,13 +1363,8 @@ func (r *layerStore) Status() ([][2]string, error) {
// Requires startWriting.
func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) {
if duplicateLayer, idInUse := r.byid[id]; idInUse {
return duplicateLayer, ErrDuplicateID
}
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, ErrDuplicateName
}
if layer, err := r.checkIdOrNameConflict(id, names); err != nil {
return layer, err
}
parent := ""
@@ -1378,8 +1429,25 @@ func (r *layerStore) pickStoreLocation(volatile, writeable bool) layerLocations
}
}
// checkIdOrNameConflict checks if the id or names are already in use and returns an
// error in that case. As Special case if the layer already exists it returns it as
// well together with the error.
//
// Requires startReading or startWriting.
func (r *layerStore) checkIdOrNameConflict(id string, names []string) (*Layer, error) {
if duplicateLayer, idInUse := r.byid[id]; idInUse {
return duplicateLayer, ErrDuplicateID
}
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, ErrDuplicateName
}
}
return nil, nil
}
// Requires startWriting.
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, contents *layerCreationContents) (layer *Layer, size int64, err error) {
if moreOptions == nil {
moreOptions = &LayerOptions{}
}
@@ -1400,14 +1468,8 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
_, idInUse = r.byid[id]
}
}
if duplicateLayer, idInUse := r.byid[id]; idInUse {
return duplicateLayer, -1, ErrDuplicateID
}
names = dedupeStrings(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, -1, ErrDuplicateName
}
if layer, err := r.checkIdOrNameConflict(id, names); err != nil {
return layer, -1, err
}
parent := ""
if parentLayer != nil {
@@ -1568,18 +1630,31 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
}
size = -1
if diff != nil {
if size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff); err != nil {
cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
} else if slo != nil {
if err := r.applyDiffFromStagingDirectory(layer.ID, slo.DiffOutput, slo.DiffOptions); err != nil {
cleanupFailureContext = "applying staged directory diff"
return nil, -1, err
if contents != nil {
if contents.stagedLayerExtraction != nil {
if contents.stagedLayerExtraction.result != nil {
// The layer is staged, just commit it and update the metadata.
if err := contents.stagedLayerExtraction.commitLayer(r, layer.ID); err != nil {
cleanupFailureContext = "committing staged layer diff"
return nil, -1, err
}
r.applyDiffResultToLayer(layer, contents.stagedLayerExtraction.result)
} else {
// The diff was not staged, apply it now here instead.
if size, err = r.applyDiffWithOptions(layer.ID, moreOptions, contents.stagedLayerExtraction.diff); err != nil {
cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
}
} else {
// staging logic for the chunked pull path
if err := r.applyDiffFromStagingDirectory(layer.ID, contents.DiffOutput, contents.DiffOptions); err != nil {
cleanupFailureContext = "applying staged directory diff"
return nil, -1, err
}
}
} else {
// applyDiffWithOptions() would have updated r.bycompressedsum
// The layer creation content above would have updated r.bycompressedsum
// and r.byuncompressedsum for us, but if we used a template
// layer, we didn't call it, so add the new layer as candidates
// for searches for layers by checksum
@@ -2398,37 +2473,118 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
return r.applyDiffWithOptions(to, nil, diff)
}
func createTarSplitFile(r *layerStore, layerID string) (*os.File, error) {
if err := os.MkdirAll(filepath.Dir(r.tspath(layerID)), 0o700); err != nil {
return nil, err
}
return os.OpenFile(r.tspath(layerID), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600)
}
// newMaybeStagedLayerExtraction initializes a new maybeStagedLayerExtraction. The caller
// must call maybeStagedLayerExtraction.cleanup() to remove any temporary files.
func (r *layerStore) newMaybeStagedLayerExtraction(diff io.Reader) *maybeStagedLayerExtraction {
m := &maybeStagedLayerExtraction{
diff: diff,
}
if d, ok := r.driver.(drivers.ApplyDiffStaging); ok {
m.staging = d
}
return m
}
func (sl *maybeStagedLayerExtraction) cleanup() error {
return tempdir.CleanupTemporaryDirectories(sl.cleanupFuncs...)
}
// stageWithUnlockedStore stages the layer content without needing the store locked.
// If the driver does not support stage addition then this is a NOP and does nothing.
// This should be done without holding the storage lock, if a parent is given the caller
// must check for existence beforehand while holding a lock.
func (r *layerStore) stageWithUnlockedStore(sl *maybeStagedLayerExtraction, parent string, layerOptions *LayerOptions) (retErr error) {
if sl.staging == nil {
return nil
}
td, err := tempdir.NewTempDir(filepath.Join(r.layerdir, tempDirPath))
if err != nil {
return err
}
sl.cleanupFuncs = append(sl.cleanupFuncs, td.Cleanup)
stagedTarSplit, err := td.StageAddition()
if err != nil {
return err
}
sl.stagedTarSplit = stagedTarSplit
f, err := os.OpenFile(stagedTarSplit.Path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600)
if err != nil {
return err
}
// make sure to check for errors on close and return that one.
defer func() {
closeErr := f.Close()
if retErr == nil {
retErr = closeErr
}
}()
result, err := applyDiff(layerOptions, sl.diff, f, func(payload io.Reader) (int64, error) {
cleanup, stagedLayer, size, err := sl.staging.StartStagingDiffToApply(parent, drivers.ApplyDiffOpts{
Diff: payload,
Mappings: idtools.NewIDMappingsFromMaps(layerOptions.UIDMap, layerOptions.GIDMap),
// MountLabel is not supported for the unlocked extraction, see the comment in (*store).PutLayer()
MountLabel: "",
})
sl.cleanupFuncs = append(sl.cleanupFuncs, cleanup)
sl.stagedLayer = stagedLayer
return size, err
})
if err != nil {
return err
}
if err := f.Sync(); err != nil {
return fmt.Errorf("sync staged tar-split file: %w", err)
}
sl.result = result
return nil
}
// commitLayer() commits the content that was staged in stageWithUnlockedStore()
//
// Requires startWriting.
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
if !r.lockfile.IsReadWrite() {
return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(to)
if !ok {
return -1, ErrLayerUnknown
func (sl *maybeStagedLayerExtraction) commitLayer(r *layerStore, layerID string) error {
err := sl.stagedTarSplit.Commit(r.tspath(layerID))
if err != nil {
return err
}
return sl.staging.CommitStagedLayer(layerID, sl.stagedLayer)
}
// applyDiff can be called without holding any store locks so if the supplied
// applyDriverFunc requires locking the caller must ensure proper locking.
func applyDiff(layerOptions *LayerOptions, diff io.Reader, tarSplitFile *os.File, applyDriverFunc func(io.Reader) (int64, error)) (*applyDiffResult, error) {
header := make([]byte, 10240)
n, err := diff.Read(header)
if err != nil && err != io.EOF {
return -1, err
return nil, err
}
compression := archive.DetectCompression(header[:n])
defragmented := io.MultiReader(bytes.NewReader(header[:n]), diff)
// Decide if we need to compute digests
var compressedDigest, uncompressedDigest digest.Digest // = ""
result := applyDiffResult{}
var compressedDigester, uncompressedDigester digest.Digester // = nil
if layerOptions != nil && layerOptions.OriginalDigest != "" &&
layerOptions.OriginalDigest.Algorithm() == digest.Canonical {
compressedDigest = layerOptions.OriginalDigest
result.compressedDigest = layerOptions.OriginalDigest
} else {
compressedDigester = digest.Canonical.Digester()
}
if layerOptions != nil && layerOptions.UncompressedDigest != "" &&
layerOptions.UncompressedDigest.Algorithm() == digest.Canonical {
uncompressedDigest = layerOptions.UncompressedDigest
result.uncompressedDigest = layerOptions.UncompressedDigest
} else if compression != archive.Uncompressed {
uncompressedDigester = digest.Canonical.Digester()
}
@@ -2442,13 +2598,15 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
compressedCounter := ioutils.NewWriteCounter(compressedWriter)
defragmented = io.TeeReader(defragmented, compressedCounter)
tsdata := bytes.Buffer{}
tarSplitWriter := pools.BufioWriter32KPool.Get(tarSplitFile)
defer pools.BufioWriter32KPool.Put(tarSplitWriter)
uidLog := make(map[uint32]struct{})
gidLog := make(map[uint32]struct{})
var uncompressedCounter *ioutils.WriteCounter
size, err = func() (int64, error) { // A scope for defer
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
size, err := func() (int64, error) { // A scope for defer
compressor, err := pgzip.NewWriterLevel(tarSplitWriter, pgzip.BestSpeed)
if err != nil {
return -1, err
}
@@ -2481,62 +2639,108 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if err != nil {
return -1, err
}
return applyDriverFunc(payload)
}()
if err != nil {
return nil, err
}
if err := tarSplitWriter.Flush(); err != nil {
return nil, fmt.Errorf("failed to flush tar-split writer buffer: %w", err)
}
if compressedDigester != nil {
result.compressedDigest = compressedDigester.Digest()
}
if uncompressedDigester != nil {
result.uncompressedDigest = uncompressedDigester.Digest()
}
if result.uncompressedDigest == "" && compression == archive.Uncompressed {
result.uncompressedDigest = result.compressedDigest
}
if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil {
result.compressedSize = *layerOptions.OriginalSize
} else {
result.compressedSize = compressedCounter.Count
}
result.uncompressedSize = uncompressedCounter.Count
result.compressionType = compression
result.uids = make([]uint32, 0, len(uidLog))
for uid := range uidLog {
result.uids = append(result.uids, uid)
}
slices.Sort(result.uids)
result.gids = make([]uint32, 0, len(gidLog))
for gid := range gidLog {
result.gids = append(result.gids, gid)
}
slices.Sort(result.gids)
result.size = size
return &result, err
}
// Requires startWriting.
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (_ int64, retErr error) {
if !r.lockfile.IsReadWrite() {
return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(to)
if !ok {
return -1, ErrLayerUnknown
}
tarSplitFile, err := createTarSplitFile(r, layer.ID)
if err != nil {
return -1, err
}
// make sure to check for errors on close and return that one.
defer func() {
closeErr := tarSplitFile.Close()
if retErr == nil {
retErr = closeErr
}
}()
result, err := applyDiff(layerOptions, diff, tarSplitFile, func(payload io.Reader) (int64, error) {
options := drivers.ApplyDiffOpts{
Diff: payload,
Mappings: r.layerMappings(layer),
MountLabel: layer.MountLabel,
}
size, err := r.driver.ApplyDiff(layer.ID, layer.Parent, options)
if err != nil {
return -1, err
}
return size, err
}()
return r.driver.ApplyDiff(layer.ID, options)
})
if err != nil {
return -1, err
}
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
return -1, err
}
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
return -1, err
}
if compressedDigester != nil {
compressedDigest = compressedDigester.Digest()
}
if uncompressedDigester != nil {
uncompressedDigest = uncompressedDigester.Digest()
}
if uncompressedDigest == "" && compression == archive.Uncompressed {
uncompressedDigest = compressedDigest
if err := tarSplitFile.Sync(); err != nil {
return -1, fmt.Errorf("sync tar-split file: %w", err)
}
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
layer.CompressedDigest = compressedDigest
if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil {
layer.CompressedSize = *layerOptions.OriginalSize
} else {
layer.CompressedSize = compressedCounter.Count
}
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID)
layer.UncompressedDigest = uncompressedDigest
layer.UncompressedSize = uncompressedCounter.Count
layer.CompressionType = compression
layer.UIDs = make([]uint32, 0, len(uidLog))
for uid := range uidLog {
layer.UIDs = append(layer.UIDs, uid)
}
slices.Sort(layer.UIDs)
layer.GIDs = make([]uint32, 0, len(gidLog))
for gid := range gidLog {
layer.GIDs = append(layer.GIDs, gid)
}
slices.Sort(layer.GIDs)
r.applyDiffResultToLayer(layer, result)
err = r.saveFor(layer)
return size, err
return result.size, err
}
// Requires startWriting.
func (r *layerStore) applyDiffResultToLayer(layer *Layer, result *applyDiffResult) {
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, result.compressedDigest, layer.ID)
layer.CompressedDigest = result.compressedDigest
layer.CompressedSize = result.compressedSize
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, result.uncompressedDigest, layer.ID)
layer.UncompressedDigest = result.uncompressedDigest
layer.UncompressedSize = result.uncompressedSize
layer.CompressionType = result.compressionType
layer.UIDs = result.uids
layer.GIDs = result.gids
}
// Requires (startReading or?) startWriting.
@@ -2553,7 +2757,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) {
}
// Requires startWriting.
func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) (retErr error) {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
return ErrNotSupported
@@ -2597,10 +2801,23 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
}
if diffOutput.TarSplit != nil {
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
tarSplitFile, err := createTarSplitFile(r, layer.ID)
if err != nil {
compressor = pgzip.NewWriter(&tsdata)
return err
}
// make sure to check for errors on close and return that one.
defer func() {
closeErr := tarSplitFile.Close()
if retErr == nil {
retErr = closeErr
}
}()
tarSplitWriter := pools.BufioWriter32KPool.Get(tarSplitFile)
defer pools.BufioWriter32KPool.Put(tarSplitWriter)
compressor, err := pgzip.NewWriterLevel(tarSplitWriter, pgzip.BestSpeed)
if err != nil {
compressor = pgzip.NewWriter(tarSplitWriter)
}
if _, err := diffOutput.TarSplit.Seek(0, io.SeekStart); err != nil {
return err
@@ -2614,11 +2831,12 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
return err
}
compressor.Close()
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
return err
if err := tarSplitWriter.Flush(); err != nil {
return fmt.Errorf("failed to flush tar-split writer buffer: %w", err)
}
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
return err
if err := tarSplitFile.Sync(); err != nil {
return fmt.Errorf("sync tar-split file: %w", err)
}
}
for k, v := range diffOutput.BigData {

View File

@@ -20,6 +20,12 @@ struct subid_range get_range(struct subid_range *ranges, int i)
return ranges[i];
}
// helper for stderr to avoid referencing C.stderr from Go code,
// which breaks cgo on musl due to stderr being declared as FILE *const
static FILE *subid_stderr(void) {
return stderr;
}
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
# define subid_init libsubid_init
# define subid_get_uid_ranges get_subuid_ranges
@@ -44,7 +50,7 @@ func readSubid(username string, isUser bool) (ranges, error) {
}
onceInit.Do(func() {
C.subid_init(C.CString("storage"), C.stderr)
C.subid_init(C.CString("storage"), C.subid_stderr())
})
cUsername := C.CString(username)

View File

@@ -420,7 +420,10 @@ func (l *LockFile) tryLock(lType rawfilelock.LockType) error {
if !success {
return fmt.Errorf("resource temporarily unavailable")
}
l.stateMutex.Lock()
if !l.stateMutex.TryLock() {
rwMutexUnlocker()
return fmt.Errorf("resource temporarily unavailable")
}
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.

175
vendor/go.podman.io/storage/store.go generated vendored
View File

@@ -1449,12 +1449,48 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
return s.graphDriver.SupportsShifting(uidmap, gidmap)
}
// On entry:
// - rlstore must be locked for reading or writing
// - rlstores MUST NOT be locked
// Returns an extra unlock function to unlock any potentially read locked rlstores by this function.
// The unlock function is always set and thus must always be called.
func getParentLayer(rlstore roLayerStore, rlstores []roLayerStore, parent string) (*Layer, func(), error) {
// function we return to the caller so the caller gets the right stores locked and can unlock at the proper time themselves
var lockedLayerStores []roLayerStore
unlock := func() {
for _, i := range lockedLayerStores {
i.stopReading()
}
}
for _, l := range append([]roLayerStore{rlstore}, rlstores...) {
lstore := l
if lstore != rlstore {
if err := lstore.startReading(); err != nil {
return nil, unlock, err
}
lockedLayerStores = append(lockedLayerStores, lstore)
}
if l, err := lstore.Get(parent); err == nil && l != nil {
return l, unlock, nil
}
}
return nil, unlock, ErrLayerUnknown
}
// On entry:
// - rlstore must be locked for writing
// - rlstores MUST NOT be locked
func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
//
// Returns the new copied LayerOptions with mappings set, the parent Layer and
// an extra unlock function to unlock any potentially read locked rlstores by this function.
// The unlock function is always set and thus must always be called.
func populateLayerOptions(s *store, rlstore rwLayerStore, rlstores []roLayerStore, parent string, lOptions *LayerOptions) (*LayerOptions, *Layer, func(), error) {
// WARNING: Update also the freshLayer checks in store.PutLayer if adding more logic here.
var parentLayer *Layer
var options LayerOptions
// make sure we always return a valid func instead of nil so the caller can call it without checking
unlock := func() {}
if lOptions != nil {
options = *lOptions
options.BigData = slices.Clone(lOptions.BigData)
@@ -1469,53 +1505,32 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
uidMap := options.UIDMap
gidMap := options.GIDMap
if parent != "" {
var ilayer *Layer
for _, l := range append([]roLayerStore{rlstore}, rlstores...) {
lstore := l
if lstore != rlstore {
if err := lstore.startReading(); err != nil {
return nil, -1, err
}
defer lstore.stopReading()
}
if l, err := lstore.Get(parent); err == nil && l != nil {
ilayer = l
parent = ilayer.ID
break
}
var err error
parentLayer, unlock, err = getParentLayer(rlstore, rlstores, parent)
if err != nil {
return nil, nil, unlock, err
}
if ilayer == nil {
return nil, -1, ErrLayerUnknown
}
parentLayer = ilayer
if err := s.containerStore.startWriting(); err != nil {
return nil, -1, err
return nil, nil, unlock, err
}
defer s.containerStore.stopWriting()
containers, err := s.containerStore.Containers()
if err != nil {
return nil, -1, err
return nil, nil, unlock, err
}
for _, container := range containers {
if container.LayerID == parent {
return nil, -1, ErrParentIsContainer
return nil, nil, unlock, ErrParentIsContainer
}
}
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = ilayer.UIDMap
uidMap = parentLayer.UIDMap
}
if !options.HostGIDMapping && len(options.GIDMap) == 0 {
gidMap = ilayer.GIDMap
gidMap = parentLayer.GIDMap
}
} else {
// FIXME? Its unclear why we are holding containerStore locked here at all
// (and because we are not modifying it, why it is a write lock, not a read lock).
if err := s.containerStore.startWriting(); err != nil {
return nil, -1, err
}
defer s.containerStore.stopWriting()
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = s.uidMap
}
@@ -1533,7 +1548,7 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
GIDMap: copySlicePreferringNil(gidMap),
}
}
return rlstore.create(id, parentLayer, names, mountLabel, nil, &options, writeable, diff, slo)
return &options, parentLayer, unlock, nil
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
@@ -1541,11 +1556,92 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
if err != nil {
return nil, -1, err
}
var (
contents *layerCreationContents
options *LayerOptions
parentLayer *Layer
)
if diff != nil {
m := rlstore.newMaybeStagedLayerExtraction(diff)
defer func() {
if err := m.cleanup(); err != nil {
logrus.Errorf("Error cleaning up temporary directories: %v", err)
}
}()
// driver can do unlocked staging so do that without holding the layer lock
// Special case we only support it when no mount label is used. c/image doesn't set it for layers
// and the overlay driver doesn't use it for extract today so it would be safe even when set but
// that is not exactly obvious and if someone would implement the ApplyDiffStaging interface for
// another driver that may be no longer true. So for now simply fall back to the locked extract path
// to ensure we don't cause any weird issues here.
if m.staging != nil && mountLabel == "" {
// func so we have a scope for defer, we don't want to hold the lock for stageWithUnlockedStore()
layer, err := func() (*Layer, error) {
if err := rlstore.startWriting(); err != nil {
return nil, err
}
defer rlstore.stopWriting()
if layer, err := rlstore.checkIdOrNameConflict(id, names); err != nil {
return layer, err
}
var unlockLayerStores func()
options, parentLayer, unlockLayerStores, err = populateLayerOptions(s, rlstore, rlstores, parent, lOptions)
unlockLayerStores()
return nil, err
}()
if err != nil {
return layer, -1, err
}
// make sure to use the resolved full ID if there is a parent
if parentLayer != nil {
parent = parentLayer.ID
}
if err := rlstore.stageWithUnlockedStore(m, parent, options); err != nil {
return nil, -1, err
}
}
contents = &layerCreationContents{
stagedLayerExtraction: m,
}
}
if err := rlstore.startWriting(); err != nil {
return nil, -1, err
}
defer rlstore.stopWriting()
return s.putLayer(rlstore, rlstores, id, parent, names, mountLabel, writeable, lOptions, diff, nil)
if options == nil {
var unlockLayerStores func()
options, parentLayer, unlockLayerStores, err = populateLayerOptions(s, rlstore, rlstores, parent, lOptions)
defer unlockLayerStores()
if err != nil {
return nil, -1, err
}
} else if parent != "" {
// We used the staged extraction without holding the lock.
// Check again that the parent layer is still valid and exists.
freshLayer, unlockLayerStores, err := getParentLayer(rlstore, rlstores, parent)
defer unlockLayerStores()
if err != nil {
return nil, -1, err
}
// In populateLayerOptions() we get the ID mappings in order to extract correctly, ensure the freshly
// looked up parent Layer still has the same mappings to prevent silent UID/GID corruption.
if !slices.Equal(freshLayer.UIDMap, parentLayer.UIDMap) || !slices.Equal(freshLayer.GIDMap, parentLayer.GIDMap) {
// Fatal problem. Mappings changed so the parent must be considered different now.
// Since we consumed the diff there is no we to recover, return error to caller. The caller would need to retry.
// How likely is that and would need to return a special error so c/image could do the retries?
return nil, -1, fmt.Errorf("error during staged layer apply, parent layer %q changed id mappings while the content was extracted, must retry layer creation", parent)
}
}
return rlstore.create(id, parentLayer, names, mountLabel, nil, options, writeable, contents)
}
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
@@ -1753,7 +1849,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
}
}
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil)
if err != nil {
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
}
@@ -1924,7 +2020,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
options.Flags[mountLabelFlag] = mountLabel
}
clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil, nil)
clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil)
if err != nil {
return nil, err
}
@@ -3182,11 +3278,16 @@ func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
// if the layer doesn't exist yet, try to create it.
slo := stagedLayerOptions{
contents := layerCreationContents{
DiffOutput: args.DiffOutput,
DiffOptions: args.DiffOptions,
}
layer, _, err = s.putLayer(rlstore, rlstores, args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo)
options, parentLayer, unlockLayerStores, err := populateLayerOptions(s, rlstore, rlstores, args.ParentLayer, args.LayerOptions)
defer unlockLayerStores()
if err != nil {
return nil, err
}
layer, _, err = rlstore.create(args.ID, parentLayer, args.Names, args.MountLabel, nil, options, args.Writeable, &contents)
return layer, err
}

View File

@@ -197,7 +197,7 @@ outer:
// We need to create a temporary layer so we can mount it and lookup the
// maximum IDs used.
clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil, nil)
clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil)
if err != nil {
return 0, err
}

6
vendor/modules.txt vendored
View File

@@ -351,7 +351,7 @@ go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/internal/telemetry
go.opentelemetry.io/otel/trace/noop
# go.podman.io/common v0.66.0
# go.podman.io/common v0.67.0
## explicit; go 1.24.2
go.podman.io/common/pkg/auth
go.podman.io/common/pkg/capabilities
@@ -361,7 +361,7 @@ go.podman.io/common/pkg/password
go.podman.io/common/pkg/report
go.podman.io/common/pkg/report/camelcase
go.podman.io/common/pkg/retry
# go.podman.io/image/v5 v5.38.0
# go.podman.io/image/v5 v5.39.1
## explicit; go 1.24.0
go.podman.io/image/v5/copy
go.podman.io/image/v5/directory
@@ -433,7 +433,7 @@ go.podman.io/image/v5/transports
go.podman.io/image/v5/transports/alltransports
go.podman.io/image/v5/types
go.podman.io/image/v5/version
# go.podman.io/storage v1.61.0
# go.podman.io/storage v1.62.0
## explicit; go 1.24.0
go.podman.io/storage
go.podman.io/storage/drivers

View File

@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
const Version = "1.21.0-dev"
const Version = "1.22.0"