mirror of
https://github.com/mudler/luet.git
synced 2025-09-01 15:18:28 +00:00
use containerd to uncompress
This commit is contained in:
@@ -57,14 +57,6 @@ func NewConfigProtect(annotationDir string) *ConfigProtect {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConfigProtect) AddAnnotationDir(d string) {
|
||||
c.AnnotationDir = d
|
||||
}
|
||||
|
||||
func (c *ConfigProtect) GetAnnotationDir() string {
|
||||
return c.AnnotationDir
|
||||
}
|
||||
|
||||
func (c *ConfigProtect) Map(files []string, protected []ConfigProtectConfFile) {
|
||||
|
||||
for _, file := range files {
|
||||
|
@@ -72,6 +72,7 @@ var _ = Describe("Delta", func() {
|
||||
_, tmpdir, err := Extract(
|
||||
ctx,
|
||||
img2,
|
||||
true,
|
||||
ExtractDeltaFiles(ctx, diff, []string{}, []string{}),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -87,6 +88,7 @@ var _ = Describe("Delta", func() {
|
||||
_, tmpdir, err := Extract(
|
||||
ctx,
|
||||
img2,
|
||||
true,
|
||||
ExtractDeltaFiles(ctx, diff, []string{}, []string{"usr/local/go"}),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -98,6 +100,7 @@ var _ = Describe("Delta", func() {
|
||||
_, tmpdir, err := Extract(
|
||||
ctx,
|
||||
img2,
|
||||
true,
|
||||
ExtractDeltaFiles(ctx, diff, []string{"usr/local/go"}, []string{"usr/local/go/bin"}),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -110,6 +113,7 @@ var _ = Describe("Delta", func() {
|
||||
_, tmpdir, err := Extract(
|
||||
ctx,
|
||||
img2,
|
||||
true,
|
||||
ExtractDeltaFiles(ctx, diff, []string{"usr/local/go"}, []string{}),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
@@ -166,7 +166,7 @@ func ExtractFiles(
|
||||
}
|
||||
}
|
||||
|
||||
func ExtractReader(ctx *types.Context, reader io.ReadCloser, output string, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
||||
func ExtractReader(ctx *types.Context, reader io.ReadCloser, output string, keepPerms bool, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
||||
defer reader.Close()
|
||||
|
||||
perms := map[string][]int{}
|
||||
@@ -190,6 +190,8 @@ func ExtractReader(ctx *types.Context, reader io.ReadCloser, output string, filt
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
// TODO: Parametrize this
|
||||
if keepPerms {
|
||||
for f, p := range perms {
|
||||
ff := filepath.Join(output, f)
|
||||
if _, err := os.Lstat(ff); err == nil {
|
||||
@@ -211,18 +213,18 @@ func ExtractReader(ctx *types.Context, reader io.ReadCloser, output string, filt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return c, output, nil
|
||||
}
|
||||
|
||||
func Extract(ctx *types.Context, img v1.Image, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
||||
func Extract(ctx *types.Context, img v1.Image, keepPerms bool, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
||||
tmpdiffs, err := ctx.Config.GetSystem().TempDir("extraction")
|
||||
if err != nil {
|
||||
return 0, "", errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
return ExtractReader(ctx, mutate.Extract(img), tmpdiffs, filter, opts...)
|
||||
return ExtractReader(ctx, mutate.Extract(img), tmpdiffs, keepPerms, filter, opts...)
|
||||
}
|
||||
|
||||
func ExtractTo(ctx *types.Context, img v1.Image, output string, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
||||
return ExtractReader(ctx, mutate.Extract(img), output, filter, opts...)
|
||||
func ExtractTo(ctx *types.Context, img v1.Image, output string, keepPerms bool, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
||||
return ExtractReader(ctx, mutate.Extract(img), output, keepPerms, filter, opts...)
|
||||
}
|
||||
|
@@ -58,6 +58,7 @@ var _ = Describe("Extract", func() {
|
||||
_, tmpdir, err := Extract(
|
||||
ctx,
|
||||
img,
|
||||
true,
|
||||
ExtractFiles(ctx, "", []string{}, []string{}),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -71,6 +72,7 @@ var _ = Describe("Extract", func() {
|
||||
_, tmpdir, err := Extract(
|
||||
ctx,
|
||||
img,
|
||||
true,
|
||||
ExtractFiles(ctx, "/usr", []string{}, []string{}),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -84,6 +86,7 @@ var _ = Describe("Extract", func() {
|
||||
_, tmpdir, err := Extract(
|
||||
ctx,
|
||||
img,
|
||||
true,
|
||||
ExtractFiles(ctx, "/usr", []string{"bin"}, []string{"sbin"}),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -98,6 +101,7 @@ var _ = Describe("Extract", func() {
|
||||
_, tmpdir, err := Extract(
|
||||
ctx,
|
||||
img,
|
||||
true,
|
||||
ExtractFiles(ctx, "", []string{"/usr|/usr/bin"}, []string{"^/bin"}),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
@@ -28,6 +28,7 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
zstd "github.com/klauspost/compress/zstd"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
@@ -35,6 +36,7 @@ import (
|
||||
//"strconv"
|
||||
"strings"
|
||||
|
||||
containerdCompression "github.com/containerd/containerd/archive/compression"
|
||||
config "github.com/mudler/luet/pkg/api/core/config"
|
||||
"github.com/mudler/luet/pkg/api/core/image"
|
||||
types "github.com/mudler/luet/pkg/api/core/types"
|
||||
@@ -67,8 +69,8 @@ type PackageArtifact struct {
|
||||
Runtime *pkg.DefaultPackage `json:"runtime,omitempty"`
|
||||
}
|
||||
|
||||
func ImageToArtifact(ctx *types.Context, img v1.Image, t compression.Implementation, output string, filter func(h *tar.Header) (bool, error)) (*PackageArtifact, error) {
|
||||
_, tmpdiffs, err := image.Extract(ctx, img, filter)
|
||||
func ImageToArtifact(ctx *types.Context, img v1.Image, t compression.Implementation, output string, keepPerms bool, filter func(h *tar.Header) (bool, error)) (*PackageArtifact, error) {
|
||||
_, tmpdiffs, err := image.Extract(ctx, img, keepPerms, filter)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
@@ -385,6 +387,85 @@ func hashFileContent(path string) (string, error) {
|
||||
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func replaceFileTarWrapper(dst string, inputTarStream io.ReadCloser, mods []string, fn func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)) io.ReadCloser {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
tarReader := tar.NewReader(inputTarStream)
|
||||
tarWriter := tar.NewWriter(pipeWriter)
|
||||
defer inputTarStream.Close()
|
||||
defer tarWriter.Close()
|
||||
|
||||
modify := func(name string, original *tar.Header, tarReader io.Reader) error {
|
||||
header, data, err := fn(dst, name, original, tarReader)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case header == nil:
|
||||
return nil
|
||||
}
|
||||
|
||||
if header.Name == "" {
|
||||
header.Name = name
|
||||
}
|
||||
header.Size = int64(len(data))
|
||||
if err := tarWriter.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(data) != 0 {
|
||||
if _, err := tarWriter.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
var remaining []string
|
||||
var err error
|
||||
var originalHeader *tar.Header
|
||||
for {
|
||||
originalHeader, err = tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if helpers.Contains(mods, originalHeader.Name) {
|
||||
// No modifiers for this file, copy the header and data
|
||||
if err := tarWriter.WriteHeader(originalHeader); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := pools.Copy(tarWriter, tarReader); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
remaining = append(remaining, originalHeader.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := modify(originalHeader.Name, originalHeader, tarReader); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the modifiers that haven't matched any files in the archive
|
||||
for _, name := range remaining {
|
||||
if err := modify(name, nil, nil); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pipeWriter.Close()
|
||||
|
||||
}()
|
||||
return pipeReader
|
||||
}
|
||||
|
||||
func tarModifierWrapperFunc(ctx *types.Context) func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
return func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
// If the destination path already exists I rename target file name with postfix.
|
||||
@@ -448,8 +529,7 @@ func tarModifierWrapperFunc(ctx *types.Context) func(dst, path string, header *t
|
||||
}
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetProtectFiles(ctx *types.Context) []string {
|
||||
ans := []string{}
|
||||
func (a *PackageArtifact) GetProtectFiles(ctx *types.Context) (res []string) {
|
||||
annotationDir := ""
|
||||
|
||||
if !ctx.Config.ConfigProtectSkip {
|
||||
@@ -468,159 +548,75 @@ func (a *PackageArtifact) GetProtectFiles(ctx *types.Context) []string {
|
||||
cp.Map(a.Files, ctx.Config.GetConfigProtectConfFiles())
|
||||
|
||||
// NOTE: for unpack we need files path without initial /
|
||||
ans = cp.GetProtectFiles(false)
|
||||
res = cp.GetProtectFiles(false)
|
||||
}
|
||||
|
||||
return ans
|
||||
return
|
||||
}
|
||||
|
||||
// Unpack Untar and decompress (TODO) to the given path
|
||||
func (a *PackageArtifact) Unpack(ctx *types.Context, dst string, keepPerms bool) error {
|
||||
if !strings.HasPrefix(dst, "/") {
|
||||
|
||||
if !strings.HasPrefix(dst, string(os.PathSeparator)) {
|
||||
return errors.New("destination must be an absolute path")
|
||||
}
|
||||
|
||||
// Create
|
||||
protectedFiles := a.GetProtectFiles(ctx)
|
||||
|
||||
tarModifier := helpers.NewTarModifierWrapper(dst, tarModifierWrapperFunc(ctx))
|
||||
mod := tarModifierWrapperFunc(ctx)
|
||||
//tarModifier := helpers.NewTarModifierWrapper(dst, mod)
|
||||
|
||||
switch a.CompressionType {
|
||||
case compression.Zstandard:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
archiveFile, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot open "+a.Path)
|
||||
}
|
||||
defer original.Close()
|
||||
defer archiveFile.Close()
|
||||
|
||||
bufferedReader := bufio.NewReader(original)
|
||||
|
||||
d, err := zstd.NewReader(bufferedReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
_, err = io.Copy(archive, d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
|
||||
}
|
||||
|
||||
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
|
||||
ctx.Config.GetGeneral().SameOwner, protectedFiles, tarModifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case compression.GZip:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
decompressed, err := containerdCompression.DecompressStream(archiveFile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot open "+a.Path)
|
||||
}
|
||||
defer original.Close()
|
||||
defer decompressed.Close()
|
||||
|
||||
bufferedReader := bufio.NewReader(original)
|
||||
r, err := gzip.NewReader(bufferedReader)
|
||||
if err != nil {
|
||||
replacerArchive := replaceFileTarWrapper(dst, decompressed, protectedFiles, mod)
|
||||
defer replacerArchive.Close()
|
||||
|
||||
// or with filter?
|
||||
// func(header *tar.Header) (bool, error) {
|
||||
// if helpers.Contains(protectedFiles, header.Name) {
|
||||
// newHead, _, err := mod(dst, header.Name, header, decompressed)
|
||||
// if err != nil {
|
||||
// return false, err
|
||||
// }
|
||||
// header.Name = newHead.Name
|
||||
// // Override target path
|
||||
// //target = filepath.Join(dest, header.Name)
|
||||
// }
|
||||
// // tarModifier.Modifier()
|
||||
// return true, nil
|
||||
// },
|
||||
_, _, err = image.ExtractReader(ctx, replacerArchive, dst, ctx.Config.GetGeneral().SameOwner, nil)
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
_, err = io.Copy(archive, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
|
||||
}
|
||||
|
||||
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
|
||||
ctx.Config.GetGeneral().SameOwner, protectedFiles, tarModifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
// Defaults to tar only (covers when "none" is supplied)
|
||||
default:
|
||||
return helpers.UntarProtect(a.Path, dst, ctx.Config.GetGeneral().SameOwner,
|
||||
protectedFiles, tarModifier)
|
||||
}
|
||||
return errors.New("Compression type must be supplied")
|
||||
}
|
||||
|
||||
// FileList generates the list of file of a package from the local archive
|
||||
func (a *PackageArtifact) FileList() ([]string, error) {
|
||||
var tr *tar.Reader
|
||||
switch a.CompressionType {
|
||||
case compression.Zstandard:
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrap(err, "Cannot open "+a.Path)
|
||||
}
|
||||
defer original.Close()
|
||||
|
||||
bufferedReader := bufio.NewReader(original)
|
||||
r, err := zstd.NewReader(bufferedReader)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer r.Close()
|
||||
tr = tar.NewReader(r)
|
||||
case compression.GZip:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrap(err, "Cannot open "+a.Path)
|
||||
}
|
||||
defer original.Close()
|
||||
|
||||
bufferedReader := bufio.NewReader(original)
|
||||
r, err := gzip.NewReader(bufferedReader)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer r.Close()
|
||||
tr = tar.NewReader(r)
|
||||
|
||||
// Defaults to tar only (covers when "none" is supplied)
|
||||
default:
|
||||
tarFile, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrap(err, "Could not open package archive")
|
||||
}
|
||||
defer tarFile.Close()
|
||||
tr = tar.NewReader(tarFile)
|
||||
|
||||
}
|
||||
|
||||
var files []string
|
||||
|
||||
archiveFile, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return files, errors.Wrap(err, "Cannot open "+a.Path)
|
||||
}
|
||||
defer archiveFile.Close()
|
||||
|
||||
decompressed, err := containerdCompression.DecompressStream(archiveFile)
|
||||
if err != nil {
|
||||
return files, errors.Wrap(err, "Cannot open "+a.Path)
|
||||
}
|
||||
defer decompressed.Close()
|
||||
tr := tar.NewReader(decompressed)
|
||||
|
||||
// untar each segment
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
|
@@ -162,6 +162,7 @@ RUN echo bar > /test2`))
|
||||
ctx,
|
||||
img,
|
||||
result,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -211,6 +212,7 @@ RUN echo bar > /test2`))
|
||||
ctx,
|
||||
img,
|
||||
result,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
@@ -248,6 +248,7 @@ func (cs *LuetCompiler) unpackFs(concurrency int, keepPermissions bool, p *compi
|
||||
_, rootfs, err := image.Extract(
|
||||
cs.Options.Context,
|
||||
img,
|
||||
keepPermissions,
|
||||
image.ExtractFiles(
|
||||
cs.Options.Context,
|
||||
p.GetPackageDir(),
|
||||
@@ -316,6 +317,7 @@ func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p *co
|
||||
ref2,
|
||||
cs.Options.CompressionType,
|
||||
p.Rel(fmt.Sprintf("%s%s", p.GetPackage().GetFingerPrint(), ".package.tar")),
|
||||
keepPermissions,
|
||||
image.ExtractDeltaFiles(cs.Options.Context, diff, p.GetIncludes(), p.GetExcludes()),
|
||||
)
|
||||
if err != nil {
|
||||
|
@@ -213,12 +213,8 @@ func DownloadAndExtractDockerImage(ctx *luettypes.Context, image, dest string, a
|
||||
ctx,
|
||||
img,
|
||||
dest,
|
||||
luetimages.ExtractFiles(
|
||||
ctx,
|
||||
"",
|
||||
[]string{},
|
||||
[]string{},
|
||||
),
|
||||
true,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@@ -348,6 +348,7 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
|
||||
defer wg.Done()
|
||||
|
||||
for p := range c {
|
||||
|
||||
if p.Uninstall.Package != nil {
|
||||
l.Options.Context.Debug("Replacing package inplace")
|
||||
toUninstall, uninstall, err := l.generateUninstallFn(p.Uninstall.Option, s, p.Uninstall.Package)
|
||||
@@ -356,8 +357,10 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
|
||||
continue
|
||||
//return errors.Wrap(err, "while computing uninstall")
|
||||
}
|
||||
|
||||
systemLock.Lock()
|
||||
err = uninstall()
|
||||
systemLock.Unlock()
|
||||
|
||||
if err != nil {
|
||||
l.Options.Context.Error("Failed uninstall for ", packsToList(toUninstall))
|
||||
continue
|
||||
|
@@ -929,7 +929,11 @@ func (r *LuetSystemRepository) Sync(ctx *types.Context, force bool) (*LuetSystem
|
||||
}
|
||||
ctx.Debug("Decompress tree of the repository " + r.Name + "...")
|
||||
|
||||
err = treeFileArtifact.Unpack(ctx, treefs, true)
|
||||
if _, err := os.Lstat(treefs); os.IsNotExist(err) {
|
||||
os.MkdirAll(treefs, 0600)
|
||||
}
|
||||
|
||||
err = treeFileArtifact.Unpack(ctx, treefs, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while unpacking tree")
|
||||
}
|
||||
@@ -937,7 +941,7 @@ func (r *LuetSystemRepository) Sync(ctx *types.Context, force bool) (*LuetSystem
|
||||
// FIXME: It seems that tar with only one file doesn't create destination
|
||||
// directory. I create directory directly for now.
|
||||
os.MkdirAll(metafs, os.ModePerm)
|
||||
err = metaFileArtifact.Unpack(ctx, metafs, true)
|
||||
err = metaFileArtifact.Unpack(ctx, metafs, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while unpacking metadata")
|
||||
}
|
||||
|
@@ -204,6 +204,7 @@ func (d *dockerRepositoryGenerator) Generate(r *LuetSystemRepository, imagePrefi
|
||||
d.context,
|
||||
img,
|
||||
repoTemp,
|
||||
d.context.Config.GetGeneral().SameOwner,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user