Make docker image repositories actually working

Several changes are included:
- Expose ensureDir in helpers, and call it in the Docker client. In
  other implementations that was handled by CopyFile behind the scenes,
  but that's not the case here
- Create accessor in Artifact to create Artifact objects from files.
  This is handy when we have to carry over downloaded package content
  into caches when artifacts are already verified
- Fix various issues around the imagePush flag, so now trees are pushed
  forcefully each time
- Take into consideration the real artifact name when pushing single
  files in the docker image. This behavior should be changed eventually,
  because single files which aren't repository packages now are in its
  own docker image, but we should have just one that brings the required
  metadata alltogether.
This commit is contained in:
Ettore Di Giacinto
2021-01-20 12:31:36 +01:00
parent b27b146b45
commit 91ea2ed99f
7 changed files with 75 additions and 202 deletions

View File

@@ -237,6 +237,10 @@ func (a *PackageArtifact) GetPath() string {
return a.Path return a.Path
} }
func (a *PackageArtifact) GetFileName() string {
return path.Base(a.GetPath())
}
func (a *PackageArtifact) SetPath(p string) { func (a *PackageArtifact) SetPath(p string) {
a.Path = p a.Path = p
} }
@@ -247,6 +251,29 @@ FROM scratch
COPY * /` COPY * /`
} }
// CreateArtifactForFile creates a new artifact from the given file
func CreateArtifactForFile(s string, opts ...func(*PackageArtifact)) (*PackageArtifact, error) {
fileName := path.Base(s)
archive, err := LuetCfg.GetSystem().TempDir("archive")
if err != nil {
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
}
defer os.RemoveAll(archive) // clean up
helpers.CopyFile(s, filepath.Join(archive, fileName))
artifact, err := LuetCfg.GetSystem().TempDir("artifact")
if err != nil {
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
}
a := &PackageArtifact{Path: filepath.Join(artifact, fileName)}
for _, o := range opts {
o(a)
}
return a, a.Compress(archive, 1)
}
// GenerateFinalImage takes an artifact and builds a Docker image with its content // GenerateFinalImage takes an artifact and builds a Docker image with its content
func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend, keepPerms bool) (CompilerBackendOptions, error) { func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend, keepPerms bool) (CompilerBackendOptions, error) {
builderOpts := CompilerBackendOptions{} builderOpts := CompilerBackendOptions{}
@@ -281,7 +308,9 @@ func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend
return builderOpts, b.BuildImage(builderOpts) return builderOpts, b.BuildImage(builderOpts)
} }
// Compress Archives and compress (TODO) to the artifact path // Compress is responsible to archive and compress to the artifact Path.
// It accepts a source path, which is the content to be archived/compressed
// and a concurrency parameter.
func (a *PackageArtifact) Compress(src string, concurrency int) error { func (a *PackageArtifact) Compress(src string, concurrency int) error {
switch a.CompressionType { switch a.CompressionType {

View File

@@ -111,6 +111,7 @@ type Artifact interface {
SetFiles(f []string) SetFiles(f []string)
GetFiles() []string GetFiles() []string
GetFileName() string
GetChecksums() Checksums GetChecksums() Checksums
SetChecksums(c Checksums) SetChecksums(c Checksums)

View File

@@ -117,14 +117,15 @@ func Read(file string) (string, error) {
return string(dat), nil return string(dat), nil
} }
func ensureDir(fileName string) { func EnsureDir(fileName string) error {
dirName := filepath.Dir(fileName) dirName := filepath.Dir(fileName)
if _, serr := os.Stat(dirName); serr != nil { if _, serr := os.Stat(dirName); serr != nil {
merr := os.MkdirAll(dirName, os.ModePerm) // FIXME: It should preserve permissions from src to dst instead merr := os.MkdirAll(dirName, os.ModePerm) // FIXME: It should preserve permissions from src to dst instead
if merr != nil { if merr != nil {
panic(merr) return merr
} }
} }
return nil
} }
// CopyFile copies the contents of the file named src to the file named // CopyFile copies the contents of the file named src to the file named

View File

@@ -21,20 +21,10 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/docker/docker/pkg/archive"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/moby/buildkit/session"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/genuinetools/img/client" imgworker "github.com/mudler/luet/pkg/installer/client/imgworker"
"github.com/genuinetools/img/types"
"github.com/moby/buildkit/util/appcontext"
"github.com/mudler/luet/pkg/compiler" "github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/config" "github.com/mudler/luet/pkg/config"
@@ -57,193 +47,24 @@ func downloadAndExtractDockerImage(image, dest string) error {
} }
defer os.RemoveAll(temp) defer os.RemoveAll(temp)
Debug("Temporary directory", temp) Debug("Temporary directory", temp)
c, err := client.New(temp, types.NativeBackend, nil) c, err := imgworker.New(temp)
if err != nil { if err != nil {
return errors.Wrapf(err, "failed creating client") return errors.Wrapf(err, "failed creating client")
} }
defer c.Close() defer c.Close()
// Slightly adapted from https://github.com/genuinetools/img/blob/54d0ca981c1260546d43961a538550eef55c87cf/pull.go // FROM Slightly adapted from genuinetools/img https://github.com/genuinetools/img/blob/54d0ca981c1260546d43961a538550eef55c87cf/pull.go
var listedImage *client.ListedImage
// Create the context.
ctx := appcontext.Context()
sess, sessDialer, err := c.Session(ctx)
if err != nil {
return errors.Wrapf(err, "failed creating Session")
}
ctx = session.NewContext(ctx, sess.ID())
ctx = namespaces.WithNamespace(ctx, "buildkit")
Debug("Starting session")
go func() {
sess.Run(ctx, sessDialer)
}()
defer func() {
Debug("Closing session")
sess.Close()
Debug("Session closed")
}()
Debug("Pulling image", image) Debug("Pulling image", image)
listedImage, err = c.Pull(ctx, image) listedImage, err := c.Pull(image)
if err != nil { if err != nil {
return errors.Wrapf(err, "failed listing images") return errors.Wrapf(err, "failed listing images")
} }
Debug("Pulled:", listedImage.Target.Digest) Debug("Pulled:", listedImage.Target.Digest)
Debug("Size:", units.BytesSize(float64(listedImage.ContentSize))) Debug("Size:", units.BytesSize(float64(listedImage.ContentSize)))
Debug("Unpacking", image, "to", dest) Debug("Unpacking", image, "to", dest)
os.RemoveAll(dest) os.RemoveAll(dest)
return c.Unpack(image, dest)
// XXX: Unpacking stalls. See why calling img works, and with luet doesn't. Shall we unpack by reimplementing the client here?
// err = c.Unpack(ctx, image, dest)
// Debug("Finished Unpacking")
// opt, err := c.createWorkerOpt(false)
// if err != nil {
// return fmt.Errorf("creating worker opt failed: %v", err)
// }
img, err := opt.ImageStore.Get(ctx, image)
if err != nil {
return fmt.Errorf("getting image %s from image store failed: %v", image, err)
}
manifest, err := images.Manifest(ctx, opt.ContentStore, img.Target, platforms.Default())
if err != nil {
return fmt.Errorf("getting image manifest failed: %v", err)
}
for _, desc := range manifest.Layers {
logrus.Debugf("Unpacking layer %s", desc.Digest.String())
// Read the blob from the content store.
layer, err := opt.ContentStore.ReaderAt(ctx, desc)
if err != nil {
return fmt.Errorf("getting reader for digest %s failed: %v", desc.Digest.String(), err)
}
// Unpack the tarfile to the rootfs path.
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
if err := archive.Untar(content.NewReader(layer), dest, &archive.TarOptions{
NoLchown: true,
}); err != nil {
return fmt.Errorf("extracting tar for %s to directory %s failed: %v", desc.Digest.String(), dest, err)
}
}
return errors.Wrapf(err, "failed unpacking images")
// eg, ctx := errgroup.WithContext(ctx)
// eg.Go(func() error {
// return sess.Run(ctx, sessDialer)
// })
// eg.Go(func() error {
// defer sess.Close()
// var err error
// listedImage, err = c.Pull(ctx, image)
// if err != nil {
// return errors.Wrapf(err, "failed listing images")
// }
// os.RemoveAll(dest)
// return errors.Wrapf(c.Unpack(ctx, image, dest), "failed unpacking images")
// })
// if err := eg.Wait(); err != nil {
// return err
// }
//Debug("Pulled:", listedImage.Target.Digest)
// Debug("Size:", units.BytesSize(float64(listedImage.ContentSize)))
// Get the identifier for the image.
// id, err := source.NewImageIdentifier(image)
// if err != nil {
// return err
// }
// Debug("Image identifier", id.ID())
// named, err := reference.ParseNormalizedNamed(image)
// if err != nil {
// return fmt.Errorf("parsing image name %q failed: %v", image, err)
// }
// // Add the latest lag if they did not provide one.
// named = reference.TagNameOnly(named)
// image = named.String()
// ctx := appcontext.Context()
// sess, sessDialer, err := c.Session(ctx)
// if err != nil {
// return err
// }
// ctx = session.NewContext(ctx, sess.ID())
// ctx = namespaces.WithNamespace(ctx, "buildkit")
// snapshotRoot := filepath.Join(temp, "snapshots")
// XXX: We force native backend. Our docker images will have just one layer as they are created from scratch.
// No need to depend on FUSE/overlayfs available in the system
// s, err := native.NewSnapshotter(snapshotRoot)
// contentStore, err := local.NewStore(filepath.Join(temp, "content"))
// if err != nil {
// return err
// }
// // Open the bolt database for metadata.
// db, err := bolt.Open(filepath.Join(temp, "containerdmeta.db"), 0644, nil)
// if err != nil {
// return err
// }
// // Create the new database for metadata.
// mdb := ctdmetadata.NewDB(db, contentStore, map[string]ctdsnapshot.Snapshotter{
// types.NativeBackend: s,
// })
// if err := mdb.Init(ctx); err != nil {
// return err
// }
// // Create the image store.
// imageStore := ctdmetadata.NewImageStore(mdb)
// contentStore = containerdsnapshot.NewContentStore(mdb.ContentStore(), "buildkit")
// Debug("Getting image", image)
// img, err := imageStore.Get(ctx, image)
// if err != nil {
// return fmt.Errorf("getting image %s from image store failed: %v", image, err)
// }
// manifest, err := images.Manifest(ctx, contentStore, img.Target, platforms.Default())
// if err != nil {
// return fmt.Errorf("getting image manifest failed: %v", err)
// }
// for _, desc := range manifest.Layers {
// Debug("Unpacking layer %s", desc.Digest.String())
// // Read the blob from the content store.
// layer, err := contentStore.ReaderAt(ctx, desc)
// if err != nil {
// return fmt.Errorf("getting reader for digest %s failed: %v", desc.Digest.String(), err)
// }
// // Unpack the tarfile to the rootfs path.
// // FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
// if err := archive.Untar(content.NewReader(layer), dest, &archive.TarOptions{
// NoLchown: true,
// ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
// }); err != nil {
// return fmt.Errorf("extracting tar for %s to directory %s failed: %v", desc.Digest.String(), dest, err)
// }
// }
return nil
} }
func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) { func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
@@ -253,6 +74,10 @@ func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Ar
var resultingArtifact compiler.Artifact var resultingArtifact compiler.Artifact
artifactName := path.Base(artifact.GetPath()) artifactName := path.Base(artifact.GetPath())
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName) cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
if err := helpers.EnsureDir(cacheFile); err != nil {
return nil, errors.Wrapf(err, "could not create cache folder %s for %s", config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), cacheFile)
}
ok := false ok := false
// TODO: // TODO:
@@ -264,7 +89,10 @@ func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Ar
// Check if file is already in cache // Check if file is already in cache
if helpers.Exists(cacheFile) { if helpers.Exists(cacheFile) {
Info("Use artifact", artifactName, "from cache.") Debug("Use artifact", artifactName, "from cache.")
resultingArtifact = artifact
resultingArtifact.SetPath(cacheFile)
resultingArtifact.SetChecksums(compiler.Checksums{})
} else { } else {
temp, err = config.LuetCfg.GetSystem().TempDir("tree") temp, err = config.LuetCfg.GetSystem().TempDir("tree")
@@ -284,14 +112,15 @@ func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Ar
Debug("Failed download of image", imageName) Debug("Failed download of image", imageName)
continue continue
} }
Debug("\nCompressing result ", filepath.Join(temp, artifactName), "to", cacheFile) Debug("\nCompressing result ", filepath.Join(temp), "to", cacheFile)
// We discard checksum, that are checked while during pull and unpack
newart := artifact newart := artifact
// We discard checksum, that are checked while during pull and unpack
newart.SetChecksums(compiler.Checksums{})
newart.SetPath(cacheFile) newart.SetPath(cacheFile)
err = newart.Compress(temp, 1) err = newart.Compress(temp, 1)
if err != nil { if err != nil {
Debug("Failed compressing package", imageName) Error(fmt.Sprintf("Failed compressing package %s: %s", imageName, err.Error()))
continue continue
} }
resultingArtifact = newart resultingArtifact = newart

View File

@@ -78,7 +78,7 @@ func (c *HttpClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Arti
// Check if file is already in cache // Check if file is already in cache
if helpers.Exists(cacheFile) { if helpers.Exists(cacheFile) {
Info("Use artifact", artifactName, "from cache.") Debug("Use artifact", artifactName, "from cache.")
} else { } else {
temp, err = config.LuetCfg.GetSystem().TempDir("tree") temp, err = config.LuetCfg.GetSystem().TempDir("tree")

View File

@@ -51,7 +51,7 @@ func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Art
// Check if file is already in cache // Check if file is already in cache
if helpers.Exists(cacheFile) { if helpers.Exists(cacheFile) {
Info("Use artifact", artifactName, "from cache.") Debug("Use artifact", artifactName, "from cache.")
} else { } else {
ok := false ok := false
for _, uri := range c.RepoData.Urls { for _, uri := range c.RepoData.Urls {

View File

@@ -693,7 +693,7 @@ func (r *LuetSystemRepository) genDockerRepo(imagePrefix string, resetRevision,
} }
// Update the tree name with the name created by compression selected. // Update the tree name with the name created by compression selected.
treeFile.SetFileName(path.Base(a.GetPath())) treeFile.SetFileName(a.GetFileName())
err = a.Hash() err = a.Hash()
if err != nil { if err != nil {
return errors.Wrap(err, "Failed generating checksums for tree") return errors.Wrap(err, "Failed generating checksums for tree")
@@ -701,12 +701,18 @@ func (r *LuetSystemRepository) genDockerRepo(imagePrefix string, resetRevision,
treeFile.SetChecksums(a.GetChecksums()) treeFile.SetChecksums(a.GetChecksums())
r.SetRepositoryFile(REPOFILE_TREE_KEY, treeFile) r.SetRepositoryFile(REPOFILE_TREE_KEY, treeFile)
imageTree := fmt.Sprintf("%s:%s", imagePrefix, TREE_TARBALL) // we generate a new archive containing the required compressed file.
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
treeArchive, err := compiler.CreateArtifactForFile(a.GetPath())
if err != nil {
return errors.Wrap(err, "Failed generating checksums for tree")
}
imageTree := fmt.Sprintf("%s:%s", imagePrefix, a.GetFileName())
Debug("Generating image", imageTree) Debug("Generating image", imageTree)
if opts, err := a.GenerateFinalImage(imageTree, r.GetBackend(), false); err != nil { if opts, err := treeArchive.GenerateFinalImage(imageTree, r.GetBackend(), false); err != nil {
return errors.Wrap(err, "Failed generating metadata tree "+opts.ImageName) return errors.Wrap(err, "Failed generating metadata tree "+opts.ImageName)
} }
if r.ForcePush { if r.PushImages {
if err := pushImage(r.GetBackend(), imageTree, true); err != nil { if err := pushImage(r.GetBackend(), imageTree, true); err != nil {
return errors.Wrapf(err, "Failed while pushing image: '%s'", imageTree) return errors.Wrapf(err, "Failed while pushing image: '%s'", imageTree)
} }
@@ -749,7 +755,7 @@ func (r *LuetSystemRepository) genDockerRepo(imagePrefix string, resetRevision,
return errors.Wrap(err, "Error met while archiving repository metadata") return errors.Wrap(err, "Error met while archiving repository metadata")
} }
metaFile.SetFileName(path.Base(a.GetPath())) metaFile.SetFileName(a.GetFileName())
r.SetRepositoryFile(REPOFILE_META_KEY, metaFile) r.SetRepositoryFile(REPOFILE_META_KEY, metaFile)
err = a.Hash() err = a.Hash()
if err != nil { if err != nil {
@@ -757,11 +763,18 @@ func (r *LuetSystemRepository) genDockerRepo(imagePrefix string, resetRevision,
} }
metaFile.SetChecksums(a.GetChecksums()) metaFile.SetChecksums(a.GetChecksums())
imageMetaTree := fmt.Sprintf("%s:%s", imagePrefix, REPOSITORY_METAFILE) // Files are downloaded as-is from docker images
if opts, err := a.GenerateFinalImage(imageMetaTree, r.GetBackend(), false); err != nil { // we generate a new archive containing the required compressed file.
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
metaArchive, err := compiler.CreateArtifactForFile(a.GetPath())
if err != nil {
return errors.Wrap(err, "Failed generating checksums for tree")
}
imageMetaTree := fmt.Sprintf("%s:%s", imagePrefix, a.GetFileName())
if opts, err := metaArchive.GenerateFinalImage(imageMetaTree, r.GetBackend(), false); err != nil {
return errors.Wrap(err, "Failed generating metadata tree"+opts.ImageName) return errors.Wrap(err, "Failed generating metadata tree"+opts.ImageName)
} }
if r.ForcePush { if r.PushImages {
if err := pushImage(r.GetBackend(), imageMetaTree, true); err != nil { if err := pushImage(r.GetBackend(), imageMetaTree, true); err != nil {
return errors.Wrapf(err, "Failed while pushing image: '%s'", imageMetaTree) return errors.Wrapf(err, "Failed while pushing image: '%s'", imageMetaTree)
} }
@@ -785,7 +798,7 @@ func (r *LuetSystemRepository) genDockerRepo(imagePrefix string, resetRevision,
if opts, err := a.GenerateFinalImage(imageRepo, r.GetBackend(), false); err != nil { if opts, err := a.GenerateFinalImage(imageRepo, r.GetBackend(), false); err != nil {
return errors.Wrap(err, "Failed generating repository image"+opts.ImageName) return errors.Wrap(err, "Failed generating repository image"+opts.ImageName)
} }
if r.ForcePush { if r.PushImages {
if err := pushImage(r.GetBackend(), imageRepo, true); err != nil { if err := pushImage(r.GetBackend(), imageRepo, true); err != nil {
return errors.Wrapf(err, "Failed while pushing image: '%s'", imageRepo) return errors.Wrapf(err, "Failed while pushing image: '%s'", imageRepo)
} }