7 Commits

Author SHA1 Message Date
Avi Deitcher
940c1b7b3b simplify cache locking (#4136)
Signed-off-by: Avi Deitcher <avi@deitcher.net>
2025-06-30 20:58:50 +03:00
Daniel S.
818bccf20f docs: Add instructions for OCI export from Docker (#4135)
Signed-off-by: Daniel Smith <daniel@razorsecure.com>
2025-06-30 16:27:54 +03:00
Avi Deitcher
50120bce2d ensure that new index does not break on missing lock file (#4134)
Signed-off-by: Avi Deitcher <avi@deitcher.net>
2025-06-27 11:01:43 +03:00
Avi Deitcher
254aefc953 check for dirty tree without update-index, which is not parallel-safe (#4133)
Signed-off-by: Avi Deitcher <avi@deitcher.net>
2025-06-26 19:53:13 +03:00
Avi Deitcher
4df360d62d Centralize safe cache writes (#4132)
* centralize all writing of the index.json to one place

Signed-off-by: Avi Deitcher <avi@deitcher.net>

* create filelock utility

Signed-off-by: Avi Deitcher <avi@deitcher.net>

* leverage file locks for cache index.json

Signed-off-by: Avi Deitcher <avi@deitcher.net>

---------

Signed-off-by: Avi Deitcher <avi@deitcher.net>
2025-06-26 19:02:49 +03:00
christoph-zededa
3f54a80824 git: synchronize update-index with a mutex (#4130)
if `pkglib.NewFromConfig` is used in parallel, it calls
```
git -C /some/directory update-index -q --refresh
```
in parallel.

But `git` does not like this and exits with 128.

This can be easily tried with:
```
git -C /some/dir update-index -q --refresh & \
git -C /some/dir update-index -q --refresh
```

Signed-off-by: Christoph Ostarek <christoph@zededa.com>
2025-06-25 21:59:47 +03:00
Avi Deitcher
d45d3e8c6e more builder race condition; do restart if could not remove, and only go via container ID (#4129)
Signed-off-by: Avi Deitcher <avi@deitcher.net>
2025-06-25 21:21:51 +03:00
16 changed files with 368 additions and 107 deletions

View File

@@ -59,3 +59,31 @@ is provided, it always will pull, independent of what is in the cache.
The read process is smart enough to check each blob in the local cache before downloading
it from a registry.
## Imports from local Docker instance
To import an image from your local Docker daemon into LinuxKit, youll need to ensure the image is exported in the [OCI image format](https://docs.docker.com/build/exporters/oci-docker/), which LinuxKit understands.
This requires using a `docker-container` [buildx driver](https://docs.docker.com/build/builders/drivers/docker-container/), rather than the default.
Set it up like so:
```shell
docker buildx create --driver docker-container --driver-opt image=moby/buildkit:latest --name=ocibuilder --bootstrap
```
Then build and export your image using the OCI format:
```shell
docker buildx build --builder=ocibuilder --output type=oci,name=foo . > foo.tar
```
You can now import it into LinuxKit with:
```shell
linuxkit cache import foo.tar
```
Note that this process, as described, will only produce images for the platform/architecture you're currently on. To produce multi-platform images requires extra docker build flags and external builder or QEMU support - see [here](https://docs.docker.com/build/building/multi-platform/).
This workaround is only necessary when working with the local Docker daemon. If youre pulling from Docker Hub or another registry, you dont need to do any of this.

5
src/cmd/linuxkit/cache/const.go vendored Normal file
View File

@@ -0,0 +1,5 @@
package cache
const (
lockfile = ".lock"
)

View File

@@ -2,20 +2,44 @@ package cache
import (
"fmt"
"os"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/layout"
)
// Get get or initialize the cache
func Get(cache string) (layout.Path, error) {
func (p *Provider) Get(cache string) (layout.Path, error) {
// ensure the dir exists
if err := os.MkdirAll(cache, os.ModePerm); err != nil {
return "", fmt.Errorf("unable to create cache directory %s: %v", cache, err)
}
// first try to read the layout from the path
// if it exists, we can use it
// if it does not exist, we will initialize it
//
// do not lock for first read, because we do not need the lock except for initialization
// and future writes, so why slow down reads?
l, err := layout.FromPath(cache)
// initialize the cache path if needed
p, err := layout.FromPath(cache)
if err != nil {
p, err = layout.Write(cache, empty.Index)
if err := p.Lock(); err != nil {
return "", fmt.Errorf("unable to lock cache %s: %v", cache, err)
}
defer p.Unlock()
// after lock, try to read the layout again
// in case another process initialized it while we were waiting for the lock
// if it still does not exist, we will initialize it
l, err = layout.FromPath(cache)
if err != nil {
return p, fmt.Errorf("could not initialize cache at path %s: %v", cache, err)
l, err = layout.Write(cache, empty.Index)
if err != nil {
return l, fmt.Errorf("could not initialize cache at path %s: %v", cache, err)
}
}
}
return p, nil
return l, nil
}

View File

@@ -1,20 +1,30 @@
package cache
import (
"fmt"
"path/filepath"
"sync"
"github.com/containerd/containerd/v2/core/content"
"github.com/containerd/containerd/v2/plugins/content/local"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/util"
log "github.com/sirupsen/logrus"
)
// Provider cache implementation of cacheProvider
type Provider struct {
cache layout.Path
store content.Store
cache layout.Path
store content.Store
dir string
lock *util.FileLock
lockMut sync.Mutex
}
// NewProvider create a new CacheProvider based in the provided directory
func NewProvider(dir string) (*Provider, error) {
p, err := Get(dir)
p := &Provider{dir: dir, lockMut: sync.Mutex{}}
layout, err := p.Get(dir)
if err != nil {
return nil, err
}
@@ -22,5 +32,39 @@ func NewProvider(dir string) (*Provider, error) {
if err != nil {
return nil, err
}
return &Provider{p, store}, nil
p.cache = layout
p.store = store
return p, nil
}
// Lock locks the cache directory to prevent concurrent access
func (p *Provider) Lock() error {
// if the lock is already set, we do not need to do anything
if p.lock != nil {
return nil
}
p.lockMut.Lock()
defer p.lockMut.Unlock()
var lockFile = filepath.Join(p.dir, lockfile)
lock, err := util.Lock(lockFile)
if err != nil {
return fmt.Errorf("unable to retrieve cache lock %s: %v", lockFile, err)
}
p.lock = lock
return nil
}
// Unlock releases the lock on the cache directory
func (p *Provider) Unlock() {
p.lockMut.Lock()
defer p.lockMut.Unlock()
// if the lock is not set, we do not need to do anything
if p.lock == nil {
return
}
var lockFile = filepath.Join(p.dir, lockfile)
if err := p.lock.Unlock(); err != nil {
log.Errorf("unable to close lock for cache %s: %v", lockFile, err)
}
p.lock = nil
}

View File

@@ -9,8 +9,6 @@ import (
"github.com/google/go-containerregistry/pkg/authn"
namepkg "github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/match"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/validate"
@@ -182,10 +180,11 @@ func (p *Provider) Pull(name string, withArchReferences bool) error {
return fmt.Errorf("error getting manifest for trusted image %s: %v", name, err)
}
// use the original image name in the annotation
annotations := map[string]string{
imagespec.AnnotationRefName: fullname,
// lock the cache so we can write to it
if err := p.Lock(); err != nil {
return fmt.Errorf("unable to lock cache for writing: %v", err)
}
defer p.Unlock()
// first attempt as an index
ii, err := desc.ImageIndex()
@@ -195,7 +194,7 @@ func (p *Provider) Pull(name string, withArchReferences bool) error {
if err := p.cache.WriteIndex(ii); err != nil {
return fmt.Errorf("unable to write index: %v", err)
}
if err := p.DescriptorWrite(&v1ref, desc.Descriptor); err != nil {
if err := p.DescriptorWrite(v1ref.String(), desc.Descriptor); err != nil {
return fmt.Errorf("unable to write index descriptor to cache: %v", err)
}
if withArchReferences {
@@ -206,11 +205,10 @@ func (p *Provider) Pull(name string, withArchReferences bool) error {
for _, m := range im.Manifests {
if m.MediaType.IsImage() && m.Platform != nil && m.Platform.Architecture != unknown && m.Platform.OS != unknown {
archSpecific := fmt.Sprintf("%s-%s", ref.String(), m.Platform.Architecture)
archRef, err := reference.Parse(archSpecific)
if err != nil {
if _, err := reference.Parse(archSpecific); err != nil {
return fmt.Errorf("unable to parse arch-specific reference %s: %v", archSpecific, err)
}
if err := p.DescriptorWrite(&archRef, m); err != nil {
if err := p.DescriptorWrite(archSpecific, m); err != nil {
return fmt.Errorf("unable to write index descriptor to cache: %v", err)
}
}
@@ -224,9 +222,12 @@ func (p *Provider) Pull(name string, withArchReferences bool) error {
return fmt.Errorf("provided image is neither an image nor an index: %s", name)
}
log.Debugf("ImageWrite retrieved %s is image, saving", fullname)
if err = p.cache.ReplaceImage(im, match.Name(fullname), layout.WithAnnotations(annotations)); err != nil {
if err = p.cache.WriteImage(im); err != nil {
return fmt.Errorf("unable to save image to cache: %v", err)
}
if err = p.DescriptorWrite(fullname, desc.Descriptor); err != nil {
return fmt.Errorf("unable to write updated descriptor to cache: %v", err)
}
}
return nil

View File

@@ -12,7 +12,6 @@ import (
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/util"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
log "github.com/sirupsen/logrus"
)
@@ -131,11 +130,7 @@ func (p *Provider) Push(name, remoteName string, withArchSpecificTags, override
// it might not have existed, so we can add it locally
// use the original image name in the annotation
desc := m.DeepCopy()
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
desc.Annotations[imagespec.AnnotationRefName] = archTag
if err := p.cache.AppendDescriptor(*desc); err != nil {
if err := p.DescriptorWrite(archTag, *desc); err != nil {
return fmt.Errorf("error appending descriptor for %s to layout index: %v", archTag, err)
}
img, err = p.cache.Image(m.Digest)

View File

@@ -65,7 +65,7 @@ func (p *Provider) Remove(name string) error {
log.Warnf("unable to remove blob %s for %s: %v", blob, name, err)
}
}
return p.cache.RemoveDescriptors(match.Name(name))
return p.RemoveDescriptors(match.Name(name))
}
func blobsForImage(img v1.Image) ([]v1.Hash, error) {

View File

@@ -14,7 +14,6 @@ import (
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/match"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote"
@@ -77,10 +76,11 @@ func (p *Provider) ImagePull(ref *reference.Spec, platforms []imagespec.Platform
return fmt.Errorf("error getting manifest for image %s: %v", pullImageName, err)
}
// use the original image name in the annotation
annotations := map[string]string{
imagespec.AnnotationRefName: image,
// get our lock
if err := p.Lock(); err != nil {
return fmt.Errorf("unable to lock cache for removing descriptors: %v", err)
}
defer p.Unlock()
// first attempt as an index
ii, err := desc.ImageIndex()
@@ -120,7 +120,7 @@ func (p *Provider) ImagePull(ref *reference.Spec, platforms []imagespec.Platform
if err := p.cache.WriteIndex(ii); err != nil {
return fmt.Errorf("unable to write index: %v", err)
}
if err := p.DescriptorWrite(ref, desc.Descriptor); err != nil {
if err := p.DescriptorWrite(ref.String(), desc.Descriptor); err != nil {
return fmt.Errorf("unable to write index descriptor to cache: %v", err)
}
} else {
@@ -131,8 +131,11 @@ func (p *Provider) ImagePull(ref *reference.Spec, platforms []imagespec.Platform
return fmt.Errorf("provided image is neither an image nor an index: %s", image)
}
log.Debugf("ImageWrite retrieved %s is image, saving", pullImageName)
if err = p.cache.ReplaceImage(im, match.Name(image), layout.WithAnnotations(annotations)); err != nil {
return fmt.Errorf("unable to save image to cache: %v", err)
if err := p.cache.WriteImage(im); err != nil {
return fmt.Errorf("error writing image %s to cache: %v", pullImageName, err)
}
if err := p.DescriptorWrite(image, desc.Descriptor); err != nil {
return fmt.Errorf("unable to write image descriptor to cache: %v", err)
}
}
return nil
@@ -149,6 +152,11 @@ func (p *Provider) ImageLoad(r io.Reader) ([]v1.Descriptor, error) {
index bytes.Buffer
)
log.Debugf("ImageWriteTar to cache")
// get our lock
if err := p.Lock(); err != nil {
return nil, fmt.Errorf("unable to lock cache: %v", err)
}
defer p.Unlock()
for {
header, err := tr.Next()
if err == io.EOF {
@@ -208,20 +216,11 @@ func (p *Provider) ImageLoad(r io.Reader) ([]v1.Descriptor, error) {
// each of these is either an image or an index
// either way, it gets added directly to the linuxkit cache index.
for _, desc := range im.Manifests {
if imgName, ok := desc.Annotations[images.AnnotationImageName]; ok {
// remove the old descriptor, if it exists
if err := p.cache.RemoveDescriptors(match.Name(imgName)); err != nil {
return nil, fmt.Errorf("unable to remove old descriptors for %s: %v", imgName, err)
imgName, ok := desc.Annotations[images.AnnotationImageName]
if ok {
if err := p.DescriptorWrite(imgName, desc); err != nil {
return nil, fmt.Errorf("error writing descriptor for %s: %v", imgName, err)
}
// save the image name under our proper annotation
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
desc.Annotations[imagespec.AnnotationRefName] = imgName
}
log.Debugf("appending descriptor %#v", desc)
if err := p.cache.AppendDescriptor(desc); err != nil {
return nil, fmt.Errorf("error appending descriptor to layout index: %v", err)
}
descs = append(descs, desc)
}
@@ -256,6 +255,11 @@ func (p *Provider) IndexWrite(ref *reference.Spec, descriptors ...v1.Descriptor)
return fmt.Errorf("error parsing index: %v", err)
}
var im v1.IndexManifest
// get our lock
if err := p.Lock(); err != nil {
return fmt.Errorf("unable to lock cache: %v", err)
}
defer p.Unlock()
// do we update an existing one? Or create a new one?
if len(indexes) > 0 {
// we already had one, so update just the referenced index and return
@@ -366,9 +370,6 @@ func (p *Provider) IndexWrite(ref *reference.Spec, descriptors ...v1.Descriptor)
return fmt.Errorf("error writing new index to json: %v", err)
}
// finally update the descriptor in the root
if err := p.cache.RemoveDescriptors(match.Name(image)); err != nil {
return fmt.Errorf("unable to remove old descriptor from index.json: %v", err)
}
desc := v1.Descriptor{
MediaType: types.OCIImageIndex,
Size: size,
@@ -377,36 +378,7 @@ func (p *Provider) IndexWrite(ref *reference.Spec, descriptors ...v1.Descriptor)
imagespec.AnnotationRefName: image,
},
}
if err := p.cache.AppendDescriptor(desc); err != nil {
return fmt.Errorf("unable to append new descriptor to index.json: %v", err)
}
return nil
}
// DescriptorWrite writes a descriptor to the cache index; it validates that it has a name
// and replaces any existing one
func (p *Provider) DescriptorWrite(ref *reference.Spec, desc v1.Descriptor) error {
if ref == nil {
return errors.New("cannot write descriptor without reference name")
}
image := ref.String()
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
desc.Annotations[imagespec.AnnotationRefName] = image
log.Debugf("writing descriptor for image %s", image)
// do we update an existing one? Or create a new one?
if err := p.cache.RemoveDescriptors(match.Name(image)); err != nil {
return fmt.Errorf("unable to remove old descriptors for %s: %v", image, err)
}
if err := p.cache.AppendDescriptor(desc); err != nil {
return fmt.Errorf("unable to append new descriptor for %s: %v", image, err)
}
return nil
return p.DescriptorWrite(ref.String(), desc)
}
func (p *Provider) ImageInCache(ref *reference.Spec, trustedRef, architecture string) (bool, error) {
@@ -496,3 +468,46 @@ func (p *Provider) ImageInRegistry(ref *reference.Spec, trustedRef, architecture
}
return false, nil
}
// DescriptorWrite writes a descriptor to the cache index; it validates that it has a name
// and replaces any existing one
func (p *Provider) DescriptorWrite(image string, desc v1.Descriptor) error {
if image == "" {
return errors.New("cannot write descriptor without reference name")
}
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
desc.Annotations[imagespec.AnnotationRefName] = image
log.Debugf("writing descriptor for image %s", image)
// get our lock
if err := p.Lock(); err != nil {
return fmt.Errorf("unable to lock cache for writing descriptors: %v", err)
}
defer p.Unlock()
// get our lock
// do we update an existing one? Or create a new one?
if err := p.cache.RemoveDescriptors(match.Name(image)); err != nil {
return fmt.Errorf("unable to remove old descriptors for %s: %v", image, err)
}
if err := p.cache.AppendDescriptor(desc); err != nil {
return fmt.Errorf("unable to append new descriptor for %s: %v", image, err)
}
return nil
}
// RemoveDescriptors removes all descriptors that match the provided matcher.
// It does so in a parallel-access-safe way
func (p *Provider) RemoveDescriptors(matcher match.Matcher) error {
// get our lock
if err := p.Lock(); err != nil {
return fmt.Errorf("unable to lock cache for removing descriptors: %v", err)
}
defer p.Unlock()
return p.cache.RemoveDescriptors(matcher)
}

View File

@@ -561,7 +561,7 @@ func (p Pkg) Build(bos ...BuildOpt) error {
if err != nil {
return err
}
if err := c.DescriptorWrite(&ref, *desc); err != nil {
if err := c.DescriptorWrite(fullRelTag, *desc); err != nil {
return err
}
if err := c.Push(fullRelTag, "", bo.manifest, true); err != nil {

View File

@@ -407,13 +407,12 @@ func (c *cacheMocker) Push(name, remoteName string, withManifest, override bool)
return nil
}
func (c *cacheMocker) DescriptorWrite(ref *reference.Spec, desc v1.Descriptor) error {
func (c *cacheMocker) DescriptorWrite(image string, desc v1.Descriptor) error {
if !c.enabledDescriptorWrite {
return errors.New("descriptor disabled")
}
var (
image = ref.String()
im = v1.IndexManifest{
im = v1.IndexManifest{
MediaType: types.OCIImageIndex,
Manifests: []v1.Descriptor{desc},
SchemaVersion: 2,

View File

@@ -276,6 +276,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
)
for range buildKitCheckRetryCount {
var b bytes.Buffer
var cid string
if err := dr.command(nil, &b, io.Discard, "--context", dockerContext, "container", "inspect", name); err == nil {
// we already have a container named "linuxkit-builder" in the provided context.
// get its state and config
@@ -284,6 +285,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
return nil, fmt.Errorf("unable to read results of 'container inspect %s': %v", name, err)
}
cid = containerJSON[0].ID
existingImage := containerJSON[0].Config.Image
isRunning := containerJSON[0].State.Status == "running"
@@ -326,13 +328,25 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
// if we made it here, we need to stop and remove the container, either because of a config mismatch,
// or because we received the CLI option
if stop {
if err := dr.command(nil, io.Discard, io.Discard, "--context", dockerContext, "container", "stop", name); err != nil {
return nil, fmt.Errorf("failed to stop existing container %s", name)
if cid == "" {
// we don't have a container ID, so we can't stop it
return nil, fmt.Errorf("unable to stop existing container %s, no ID found", name)
}
if err := dr.command(nil, io.Discard, io.Discard, "--context", dockerContext, "container", "stop", cid); err != nil {
// if we failed, do a retry; maybe it does not even exist anymore
time.Sleep(buildkitCheckInterval)
continue
}
}
if remove {
if err := dr.command(nil, io.Discard, io.Discard, "--context", dockerContext, "container", "rm", name); err != nil {
return nil, fmt.Errorf("failed to remove existing container %s", name)
if cid == "" {
// we don't have a container ID, so we can't remove it
return nil, fmt.Errorf("unable to remove existing container %s, no ID found", name)
}
if err := dr.command(nil, io.Discard, io.Discard, "--context", dockerContext, "container", "rm", cid); err != nil {
// if we failed, do a retry; maybe it does not even exist anymore
time.Sleep(buildkitCheckInterval)
continue
}
}
if recreate {

View File

@@ -180,36 +180,36 @@ func (g git) commitTag(commit string) (string, error) {
}
func (g git) isDirty(pkg, commit string) (bool, error) {
// If it isn't HEAD it can't be dirty
// Only makes sense to check for HEAD
if commit != "HEAD" {
return false, nil
}
// Update cache, otherwise files which have an updated
// timestamp but no actual changes are marked as changes
// because `git diff-index` only uses the `lstat` result and
// not the actual file contents. Running `git update-index
// --refresh` updates the cache.
if err := g.command("update-index", "-q", "--refresh"); err != nil {
// 1. Check for changes in tracked files (without using update-index)
// --no-ext-diff disables any external diff tool
// --exit-code makes it return 1 if differences are found
err := g.command("diff", "--no-ext-diff", "--exit-code", "--quiet", commit, "--", pkg)
if err != nil {
if _, ok := err.(*exec.ExitError); ok {
// Changes found in tracked files
return true, nil
}
// Some actual failure
return false, err
}
// diff-index works pretty well, except that
err := g.command("diff-index", "--quiet", commit, "--", pkg)
// 2. Check for untracked files
_, err = g.commandStdout(nil, "ls-files", "--exclude-standard", "--others", "--error-unmatch", "--", pkg)
if err == nil {
// this returns an error if there are *no* untracked files, which is strange, but we can work with it
if _, err := g.commandStdout(nil, "ls-files", "--exclude-standard", "--others", "--error-unmatch", "--", pkg); err != nil {
return false, nil
}
// Untracked files found
return true, nil
}
switch err.(type) {
case *exec.ExitError:
// diff-index exits with an error if there are differences
return true, nil
default:
return false, err
if _, ok := err.(*exec.ExitError); ok {
// No untracked files — clean
return false, nil
}
// Unexpected error
return false, err
}
// goPkgVersion return a version that is compliant with go package versioning.

View File

@@ -37,7 +37,7 @@ type CacheProvider interface {
ImageLoad(r io.Reader) ([]v1.Descriptor, error)
// DescriptorWrite writes a descriptor to the cache index; it validates that it has a name
// and replaces any existing one
DescriptorWrite(ref *reference.Spec, descriptors v1.Descriptor) error
DescriptorWrite(image string, descriptors v1.Descriptor) error
// Push an image along with a multi-arch index from local cache to remote registry.
// name is the name as referenced in the local cache, remoteName is the name to give it remotely.
// If remoteName is empty, it is the same as name.

View File

@@ -0,0 +1,9 @@
package util
import (
"os"
)
type FileLock struct {
file *os.File
}

View File

@@ -0,0 +1,19 @@
//go:build !unix
package util
// Lock opens the file (creating it if needed) and sets an exclusive lock.
// Returns a FileLock that can later be unlocked.
func Lock(path string) (*FileLock, error) {
return &FileLock{}, nil
}
// Unlock releases the lock and closes the file.
func (l *FileLock) Unlock() error {
return nil
}
// CheckLock attempts to detect if the file is locked by another process.
func CheckLock(path string) (locked bool, holderPID int, err error) {
return false, 0, nil
}

View File

@@ -0,0 +1,108 @@
//go:build unix
package util
import (
"fmt"
"io"
"os"
"golang.org/x/sys/unix"
)
// Lock opens the file (creating it if needed) and sets an exclusive lock.
// Returns a FileLock that can later be unlocked.
func Lock(path string) (*FileLock, error) {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
flock := unix.Flock_t{
Type: unix.F_WRLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
if err := unix.FcntlFlock(f.Fd(), unix.F_SETLKW, &flock); err != nil {
_ = f.Close()
return nil, fmt.Errorf("set lock: %w", err)
}
return &FileLock{file: f}, nil
}
// Unlock releases the lock and closes the file.
func (l *FileLock) Unlock() error {
if l == nil || l.file == nil {
return fmt.Errorf("unlock: file handle is nil")
}
flock := unix.Flock_t{
Type: unix.F_UNLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
if err := unix.FcntlFlock(l.file.Fd(), unix.F_SETLKW, &flock); err != nil {
return fmt.Errorf("unlock: %w", err)
}
if err := l.file.Close(); err != nil {
return fmt.Errorf("close lock file: %w", err)
}
l.file = nil // Prevent further use of the file handle
return nil
}
// CheckLock attempts to detect if the file is locked by another process.
func CheckLock(path string) (locked bool, holderPID int, err error) {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return false, 0, fmt.Errorf("open file: %w", err)
}
defer func() {
_ = f.Close()
}()
check := unix.Flock_t{
Type: unix.F_WRLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
if err := unix.FcntlFlock(f.Fd(), unix.F_GETLK, &check); err != nil {
return false, 0, fmt.Errorf("get lock: %w", err)
}
if check.Type == unix.F_UNLCK {
return false, 0, nil
}
return true, int(check.Pid), nil
}
// WaitUnlocked waits until the file is unlocked by another process, and uses it for reading but not writing.
func WaitUnlocked(path string) error {
f, err := os.OpenFile(path, os.O_RDONLY, 0644)
if err != nil {
return fmt.Errorf("open file: %w", err)
}
defer func() {
_ = f.Close()
}()
flock := unix.Flock_t{
Type: unix.F_RDLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
if err := unix.FcntlFlock(f.Fd(), unix.F_SETLKW, &flock); err != nil {
_ = f.Close()
return fmt.Errorf("set lock: %w", err)
}
fileRef := &FileLock{file: f}
_ = fileRef.Unlock()
return nil
}