2019-11-04 16:16:13 +00:00
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler
2019-11-10 09:46:57 +00:00
import (
2019-12-28 15:32:32 +00:00
"archive/tar"
2019-12-30 11:53:32 +00:00
"bufio"
2019-12-28 15:32:32 +00:00
"io"
2019-11-10 09:46:57 +00:00
"io/ioutil"
"os"
2019-11-22 20:01:29 +00:00
"path"
2019-11-10 09:46:57 +00:00
"path/filepath"
2019-11-14 16:43:47 +00:00
"regexp"
2019-12-01 20:04:22 +00:00
2019-12-30 11:53:32 +00:00
gzip "github.com/klauspost/pgzip"
2019-11-30 10:47:39 +00:00
//"strconv"
2019-11-10 09:46:57 +00:00
"strings"
"sync"
2019-12-30 11:53:32 +00:00
"github.com/mudler/luet/pkg/helpers"
2019-11-10 09:46:57 +00:00
. "github.com/mudler/luet/pkg/logger"
2019-11-15 17:11:26 +00:00
"github.com/mudler/luet/pkg/solver"
2019-12-30 11:53:32 +00:00
"github.com/pkg/errors"
2019-11-22 20:01:29 +00:00
yaml "gopkg.in/yaml.v2"
2019-12-30 11:53:32 +00:00
)
2019-11-10 09:46:57 +00:00
2019-12-30 11:53:32 +00:00
type CompressionImplementation string
const (
None CompressionImplementation = "none" // e.g. tar for standard packages
GZip CompressionImplementation = "gzip"
2019-11-10 09:46:57 +00:00
)
2019-11-22 20:01:29 +00:00
type ArtifactIndex [ ] Artifact
func ( i ArtifactIndex ) CleanPath ( ) ArtifactIndex {
2019-11-29 18:01:59 +00:00
newIndex := ArtifactIndex { }
for _ , n := range i {
art := n . ( * PackageArtifact )
2019-12-30 15:32:05 +00:00
// FIXME: This is a dup and makes difficult to add attributes to artifacts
newIndex = append ( newIndex , & PackageArtifact {
Path : path . Base ( n . GetPath ( ) ) ,
SourceAssertion : art . SourceAssertion ,
CompileSpec : art . CompileSpec ,
Dependencies : art . Dependencies ,
CompressionType : art . CompressionType ,
Checksums : art . Checksums ,
} )
2019-11-22 20:01:29 +00:00
}
return newIndex
//Update if exists, otherwise just create
}
// When compiling, we write also a fingerprint.metadata.yaml file with PackageArtifact. In this way we can have another command to create the repository
// which will consist in just of an repository.yaml which is just the repository structure with the list of package artifact.
// In this way a generic client can fetch the packages and, after unpacking the tree, performing queries to install packages.
2019-11-08 18:57:04 +00:00
type PackageArtifact struct {
2019-12-30 15:32:39 +00:00
Path string ` json:"path" `
2019-12-30 13:46:17 +00:00
2019-12-29 12:56:52 +00:00
Dependencies [ ] * PackageArtifact ` json:"dependencies" `
CompileSpec * LuetCompilationSpec ` json:"compilationspec" `
Checksums Checksums ` json:"checksums" `
2019-11-22 20:01:29 +00:00
SourceAssertion solver . PackagesAssertions ` json:"-" `
2019-12-30 15:32:39 +00:00
CompressionType CompressionImplementation ` json:"compressiontype" `
2019-11-08 18:57:04 +00:00
}
func NewPackageArtifact ( path string ) Artifact {
2019-12-30 11:53:32 +00:00
return & PackageArtifact { Path : path , Dependencies : [ ] * PackageArtifact { } , Checksums : Checksums { } , CompressionType : None }
2019-11-15 17:11:26 +00:00
}
2019-11-22 20:01:29 +00:00
func NewPackageArtifactFromYaml ( data [ ] byte ) ( Artifact , error ) {
2019-12-29 12:56:52 +00:00
p := & PackageArtifact { Checksums : Checksums { } }
2019-11-22 20:01:29 +00:00
err := yaml . Unmarshal ( data , & p )
if err != nil {
2019-11-22 23:29:24 +00:00
return p , err
2019-11-22 20:01:29 +00:00
}
2019-11-22 23:29:24 +00:00
return p , err
2019-11-22 20:01:29 +00:00
}
2020-01-05 14:30:16 +00:00
func LoadArtifactFromYaml ( spec CompilationSpec ) ( Artifact , error ) {
metaFile := spec . GetPackage ( ) . GetFingerPrint ( ) + ".metadata.yaml"
dat , err := ioutil . ReadFile ( spec . Rel ( metaFile ) )
if err != nil {
return nil , errors . Wrap ( err , "Error reading file " + metaFile )
}
2020-01-05 14:51:48 +00:00
art , err := NewPackageArtifactFromYaml ( dat )
if err != nil {
return nil , errors . Wrap ( err , "Error writing file " + metaFile )
}
// It is relative, set it back to abs
art . SetPath ( spec . Rel ( art . GetPath ( ) ) )
return art , nil
2020-01-05 14:30:16 +00:00
}
2019-12-30 11:53:32 +00:00
func ( a * PackageArtifact ) SetCompressionType ( t CompressionImplementation ) {
a . CompressionType = t
}
2020-01-28 16:46:32 +00:00
func ( a * PackageArtifact ) GetChecksums ( ) Checksums {
return a . Checksums
}
func ( a * PackageArtifact ) SetChecksums ( c Checksums ) {
a . Checksums = c
}
2019-12-29 12:56:52 +00:00
func ( a * PackageArtifact ) Hash ( ) error {
return a . Checksums . Generate ( a )
}
func ( a * PackageArtifact ) Verify ( ) error {
sum := Checksums { }
err := sum . Generate ( a )
if err != nil {
return err
}
err = sum . Compare ( a . Checksums )
if err != nil {
return err
}
return nil
}
2019-11-22 20:01:29 +00:00
func ( a * PackageArtifact ) WriteYaml ( dst string ) error {
2019-12-29 12:56:52 +00:00
// First compute checksum of artifact. When we write the yaml we want to write up-to-date informations.
err := a . Hash ( )
if err != nil {
return errors . Wrap ( err , "Failed generating checksums for artifact" )
}
2019-11-23 15:28:50 +00:00
//p := a.CompileSpec.GetPackage().GetPath()
//a.CompileSpec.GetPackage().SetPath("")
// for _, ass := range a.CompileSpec.GetSourceAssertion() {
// ass.Package.SetPath("")
// }
data , err := yaml . Marshal ( a )
if err != nil {
return errors . Wrap ( err , "While marshalling for PackageArtifact YAML" )
}
mangle , err := NewPackageArtifactFromYaml ( data )
if err != nil {
return errors . Wrap ( err , "Generated invalid artifact" )
}
//p := a.CompileSpec.GetPackage().GetPath()
mangle . GetCompileSpec ( ) . GetPackage ( ) . SetPath ( "" )
for _ , ass := range mangle . GetCompileSpec ( ) . GetSourceAssertion ( ) {
2019-11-22 20:01:29 +00:00
ass . Package . SetPath ( "" )
}
2019-11-23 15:28:50 +00:00
data , err = yaml . Marshal ( mangle )
2019-11-22 20:01:29 +00:00
if err != nil {
return errors . Wrap ( err , "While marshalling for PackageArtifact YAML" )
}
2019-11-23 15:28:50 +00:00
2019-11-22 20:01:29 +00:00
err = ioutil . WriteFile ( filepath . Join ( dst , a . GetCompileSpec ( ) . GetPackage ( ) . GetFingerPrint ( ) + ".metadata.yaml" ) , data , os . ModePerm )
if err != nil {
return errors . Wrap ( err , "While writing PackageArtifact YAML" )
}
2019-11-23 15:28:50 +00:00
//a.CompileSpec.GetPackage().SetPath(p)
2019-11-22 20:01:29 +00:00
return nil
}
2019-11-15 17:11:26 +00:00
func ( a * PackageArtifact ) GetSourceAssertion ( ) solver . PackagesAssertions {
return a . SourceAssertion
}
func ( a * PackageArtifact ) SetCompileSpec ( as CompilationSpec ) {
2019-11-22 23:29:24 +00:00
a . CompileSpec = as . ( * LuetCompilationSpec )
2019-11-15 17:11:26 +00:00
}
func ( a * PackageArtifact ) GetCompileSpec ( ) CompilationSpec {
return a . CompileSpec
}
func ( a * PackageArtifact ) SetSourceAssertion ( as solver . PackagesAssertions ) {
a . SourceAssertion = as
}
func ( a * PackageArtifact ) GetDependencies ( ) [ ] Artifact {
2019-11-22 23:29:24 +00:00
ret := [ ] Artifact { }
for _ , d := range a . Dependencies {
ret = append ( ret , d )
}
return ret
2019-11-15 17:11:26 +00:00
}
func ( a * PackageArtifact ) SetDependencies ( d [ ] Artifact ) {
2019-11-22 23:29:24 +00:00
ret := [ ] * PackageArtifact { }
for _ , dd := range d {
ret = append ( ret , dd . ( * PackageArtifact ) )
}
a . Dependencies = ret
2019-11-08 18:57:04 +00:00
}
func ( a * PackageArtifact ) GetPath ( ) string {
return a . Path
}
func ( a * PackageArtifact ) SetPath ( p string ) {
a . Path = p
}
2019-11-10 09:46:57 +00:00
2019-12-28 15:32:32 +00:00
// Compress Archives and compress (TODO) to the artifact path
2019-12-30 11:53:32 +00:00
func ( a * PackageArtifact ) Compress ( src string , concurrency int ) error {
switch a . CompressionType {
case GZip :
err := helpers . Tar ( src , a . Path )
if err != nil {
return err
}
original , err := os . Open ( a . Path )
if err != nil {
return err
}
defer original . Close ( )
gzipfile := a . Path + ".gz"
bufferedReader := bufio . NewReader ( original )
// Open a file for writing.
dst , err := os . Create ( gzipfile )
if err != nil {
return err
}
// Create gzip writer.
w := gzip . NewWriter ( dst )
w . SetConcurrency ( concurrency , 10 )
defer w . Close ( )
defer dst . Close ( )
_ , err = io . Copy ( w , bufferedReader )
if err != nil {
return err
}
w . Close ( )
os . RemoveAll ( a . Path ) // Remove original
2019-12-30 15:32:39 +00:00
// a.CompressedPath = gzipfile
a . Path = gzipfile
2019-12-30 14:28:54 +00:00
return nil
2019-12-30 13:46:17 +00:00
//a.Path = gzipfile
2019-12-31 11:29:53 +00:00
// Defaults to tar only (covers when "none" is supplied)
default :
return helpers . Tar ( src , a . Path )
2019-12-30 11:53:32 +00:00
}
return errors . New ( "Compression type must be supplied" )
2019-12-28 15:32:32 +00:00
}
// Unpack Untar and decompress (TODO) to the given path
func ( a * PackageArtifact ) Unpack ( dst string , keepPerms bool ) error {
2019-12-30 11:53:32 +00:00
switch a . CompressionType {
case GZip :
// Create the uncompressed archive
2019-12-30 15:32:39 +00:00
archive , err := os . Create ( a . GetPath ( ) + ".uncompressed" )
2019-12-30 11:53:32 +00:00
if err != nil {
return err
}
2019-12-30 15:32:39 +00:00
defer os . RemoveAll ( a . GetPath ( ) + ".uncompressed" )
defer archive . Close ( )
2019-12-30 11:53:32 +00:00
2019-12-30 15:32:39 +00:00
original , err := os . Open ( a . Path )
2019-12-30 11:53:32 +00:00
if err != nil {
2019-12-30 15:32:39 +00:00
return errors . Wrap ( err , "Cannot open " + a . Path )
2019-12-30 11:53:32 +00:00
}
defer original . Close ( )
bufferedReader := bufio . NewReader ( original )
r , err := gzip . NewReader ( bufferedReader )
if err != nil {
return err
}
defer r . Close ( )
_ , err = io . Copy ( archive , r )
if err != nil {
2019-12-30 15:32:39 +00:00
return errors . Wrap ( err , "Cannot copy to " + a . GetPath ( ) + ".uncompressed" )
2019-12-30 11:53:32 +00:00
}
2019-12-30 15:32:39 +00:00
err = helpers . Untar ( a . GetPath ( ) + ".uncompressed" , dst , keepPerms )
2019-12-30 11:53:32 +00:00
if err != nil {
return err
}
2019-12-30 14:28:54 +00:00
return nil
2019-12-31 11:29:53 +00:00
// Defaults to tar only (covers when "none" is supplied)
default :
return helpers . Untar ( a . GetPath ( ) , dst , keepPerms )
2019-12-30 11:53:32 +00:00
}
return errors . New ( "Compression type must be supplied" )
2019-12-28 15:32:32 +00:00
}
func ( a * PackageArtifact ) FileList ( ) ( [ ] string , error ) {
2019-12-30 15:32:39 +00:00
var tr * tar . Reader
switch a . CompressionType {
case GZip :
// Create the uncompressed archive
archive , err := os . Create ( a . GetPath ( ) + ".uncompressed" )
if err != nil {
return [ ] string { } , err
}
defer os . RemoveAll ( a . GetPath ( ) + ".uncompressed" )
defer archive . Close ( )
original , err := os . Open ( a . Path )
if err != nil {
return [ ] string { } , errors . Wrap ( err , "Cannot open " + a . Path )
}
defer original . Close ( )
bufferedReader := bufio . NewReader ( original )
r , err := gzip . NewReader ( bufferedReader )
if err != nil {
return [ ] string { } , err
}
defer r . Close ( )
tr = tar . NewReader ( r )
2019-12-28 15:32:32 +00:00
2019-12-31 11:29:53 +00:00
// Defaults to tar only (covers when "none" is supplied)
default :
tarFile , err := os . Open ( a . GetPath ( ) )
if err != nil {
return [ ] string { } , errors . Wrap ( err , "Could not open package archive" )
}
defer tarFile . Close ( )
tr = tar . NewReader ( tarFile )
2019-12-28 15:32:32 +00:00
}
var files [ ] string
// untar each segment
for {
hdr , err := tr . Next ( )
if err == io . EOF {
break
}
if err != nil {
return [ ] string { } , err
}
// determine proper file path info
finfo := hdr . FileInfo ( )
fileName := hdr . Name
if finfo . Mode ( ) . IsDir ( ) {
continue
}
files = append ( files , fileName )
// if a dir, create it, then go to next segment
}
return files , nil
}
2019-11-10 09:46:57 +00:00
type CopyJob struct {
Src , Dst string
2019-11-13 16:06:55 +00:00
Artifact string
2019-11-10 09:46:57 +00:00
}
func worker ( i int , wg * sync . WaitGroup , s <- chan CopyJob ) {
defer wg . Done ( )
for job := range s {
2019-11-30 10:47:39 +00:00
//Info("#"+strconv.Itoa(i), "copying", job.Src, "to", job.Dst)
2019-12-01 18:10:40 +00:00
// if dir, err := helpers.IsDirectory(job.Src); err == nil && dir {
// err = helpers.CopyDir(job.Src, job.Dst)
// if err != nil {
// Warning("Error copying dir", job, err)
// }
// continue
// }
2019-11-10 09:46:57 +00:00
if ! helpers . Exists ( job . Dst ) {
if err := helpers . CopyFile ( job . Src , job . Dst ) ; err != nil {
2019-11-30 10:47:39 +00:00
Warning ( "Error copying" , job , err )
2019-11-10 09:46:57 +00:00
}
}
}
}
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
2019-12-30 11:53:32 +00:00
func ExtractArtifactFromDelta ( src , dst string , layers [ ] ArtifactLayer , concurrency int , keepPerms bool , includes [ ] string , t CompressionImplementation ) ( Artifact , error ) {
2019-11-10 09:46:57 +00:00
archive , err := ioutil . TempDir ( os . TempDir ( ) , "archive" )
if err != nil {
return nil , errors . Wrap ( err , "Error met while creating tempdir for archive" )
}
defer os . RemoveAll ( archive ) // clean up
if strings . HasSuffix ( src , ".tar" ) {
rootfs , err := ioutil . TempDir ( os . TempDir ( ) , "rootfs" )
if err != nil {
return nil , errors . Wrap ( err , "Error met while creating tempdir for rootfs" )
}
defer os . RemoveAll ( rootfs ) // clean up
err = helpers . Untar ( src , rootfs , keepPerms )
if err != nil {
return nil , errors . Wrap ( err , "Error met while unpacking rootfs" )
}
src = rootfs
}
toCopy := make ( chan CopyJob )
var wg = new ( sync . WaitGroup )
for i := 0 ; i < concurrency ; i ++ {
wg . Add ( 1 )
go worker ( i , wg , toCopy )
}
2019-11-14 16:43:47 +00:00
// Handle includes in spec. If specified they filter what gets in the package
if len ( includes ) > 0 {
var includeRegexp [ ] * regexp . Regexp
for _ , i := range includes {
r , e := regexp . Compile ( i )
if e != nil {
Warning ( "Failed compiling regex:" , e )
continue
}
includeRegexp = append ( includeRegexp , r )
}
for _ , l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
ADDS :
for _ , a := range l . Diffs . Additions {
for _ , i := range includeRegexp {
if i . MatchString ( a . Name ) {
toCopy <- CopyJob { Src : filepath . Join ( src , a . Name ) , Dst : filepath . Join ( archive , a . Name ) , Artifact : a . Name }
continue ADDS
}
}
}
}
} else {
// Otherwise just grab all
for _ , l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
for _ , a := range l . Diffs . Additions {
toCopy <- CopyJob { Src : filepath . Join ( src , a . Name ) , Dst : filepath . Join ( archive , a . Name ) , Artifact : a . Name }
}
2019-11-10 09:46:57 +00:00
}
}
2019-11-14 16:43:47 +00:00
2019-11-10 09:46:57 +00:00
close ( toCopy )
wg . Wait ( )
2019-12-30 11:53:32 +00:00
a := NewPackageArtifact ( dst )
a . SetCompressionType ( t )
err = a . Compress ( archive , concurrency )
2019-11-10 09:46:57 +00:00
if err != nil {
return nil , errors . Wrap ( err , "Error met while creating package archive" )
}
2019-12-30 11:53:32 +00:00
return a , nil
2019-11-10 09:46:57 +00:00
}
2020-01-05 17:05:48 +00:00
func ComputeArtifactLayerSummary ( diffs [ ] ArtifactLayer ) ArtifactLayersSummary {
ans := ArtifactLayersSummary {
Layers : make ( [ ] ArtifactLayerSummary , 0 ) ,
}
for _ , layer := range diffs {
sum := ArtifactLayerSummary {
FromImage : layer . FromImage ,
ToImage : layer . ToImage ,
AddFiles : 0 ,
AddSizes : 0 ,
DelFiles : 0 ,
DelSizes : 0 ,
ChangeFiles : 0 ,
ChangeSizes : 0 ,
}
for _ , a := range layer . Diffs . Additions {
sum . AddFiles ++
sum . AddSizes += int64 ( a . Size )
}
for _ , d := range layer . Diffs . Deletions {
sum . DelFiles ++
sum . DelSizes += int64 ( d . Size )
}
for _ , c := range layer . Diffs . Changes {
sum . ChangeFiles ++
sum . ChangeSizes += int64 ( c . Size )
}
ans . Layers = append ( ans . Layers , sum )
}
return ans
}