write to cache

Signed-off-by: Avi Deitcher <avi@deitcher.net>
This commit is contained in:
Avi Deitcher
2021-02-04 16:08:09 +02:00
parent 4cdf6bc56d
commit 9633d23d37
178 changed files with 2788 additions and 380 deletions

View File

@@ -23,7 +23,7 @@ We do not want the builds to happen with each CI run for two reasons:
1. It is slower to do a package build than to just pull the latest image.
2. If any of the steps of the build fails, e.g. a `curl` download that depends on an intermittent target, it can cause all of CI to fail.
Thus, if, as a maintainer, you merge any commits into a `pkg/`, even if the change is documentation alone, please do a `linuxkit package push`.
Thus, if, as a maintainer, you merge any commits into a `pkg/`, even if the change is documentation alone, please do a `linuxkit pkg push`.
## Package source
@@ -54,8 +54,8 @@ A package source consists of a directory containing at least two files:
### Prerequisites
Before you can build packages you need:
- Docker version 17.06 or newer. If you are on a Mac you also need
`docker-credential-osxkeychain.bin`, which comes with Docker for Mac.
- Docker version 19.03 or newer, which includes [buildx](https://docs.docker.com/buildx/working-with-buildx/)
- If you are on a Mac you also need `docker-credential-osxkeychain.bin`, which comes with Docker for Mac.
- `make`, `notary`, `base64`, `jq`, and `expect`
- A *recent* version of `manifest-tool` which you can build with `make
bin/manifest-tool`, or `go get github.com:estesp/manifest-tool`, or
@@ -67,6 +67,35 @@ Further, when building packages you need to be logged into hub with
`docker login` as some of the tooling extracts your hub credentials
during the build.
### Build Targets
LinuxKit builds packages as docker images. It deposits the built package as a docker image in one of two targets:
* the linuxkit cache `~/.linuxkit/` (configurable) - default option
* the docker image cache
If you want to build images and test and run them _in a standalone_ fashion locally, then you should pick the docker image cache. Otherwise, you should use the default linuxkit cache. LinuxKit defaults to building OS images using docker images from this cache,\
only looking in the docker cache if instructed to via `linuxkit build --docker`.
When using the linuxkit cache as the package build target, it creates all of the layers, the manifest that can be uploaded
to a registry, and the multi-architecture index. If an image already exists for a different architecture in the cache,
it updates the index to include additional manifests created.
As of this writing, `linuxkit pkg build` only builds packages for the platform on which it is running; it does not (yet) support cross-building the packages for other architectures.
Note that the local docker option is available _only_ when building without pushing to a remote registry, i.e.:
```
linuxkit pkg build
linuxkit pkg build --docker
```
If you push to a registry, it _always_ uses the linuxkit cache only:
```
linuxkit pkg push
```
### Build packages as a maintainer
If you have write access to the `linuxkit` organisation on hub, you

View File

@@ -48,3 +48,25 @@ func findImage(p layout.Path, imageName, architecture string) (v1.Image, error)
}
return nil, fmt.Errorf("no image found for %s", imageName)
}
// FindDescriptor get the first descriptor pointed to by the image name
func FindDescriptor(dir string, name string) (*v1.Descriptor, error) {
p, err := Get(dir)
if err != nil {
return nil, err
}
index, err := p.ImageIndex()
// if there is no root index, we are broken
if err != nil {
return nil, fmt.Errorf("invalid image cache: %v", err)
}
descs, err := partial.FindManifests(index, match.Name(name))
if err != nil {
return nil, err
}
if len(descs) < 1 {
return nil, nil
}
return &descs[0], nil
}

View File

@@ -0,0 +1,22 @@
package cache
import (
"io"
)
type nopCloserWriter struct {
writer io.Writer
}
func (n nopCloserWriter) Write(b []byte) (int, error) {
return n.writer.Write(b)
}
func (n nopCloserWriter) Close() error {
return nil
}
// NopCloserWriter wrap an io.Writer with a no-op Closer
func NopCloserWriter(writer io.Writer) io.WriteCloser {
return nopCloserWriter{writer}
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/containerd/containerd/reference"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/validate"
)
@@ -15,6 +16,7 @@ func ValidateImage(ref *reference.Spec, cacheDir, architecture string) (ImageSou
imageIndex v1.ImageIndex
image v1.Image
imageName = ref.String()
desc *v1.Descriptor
)
// next try the local cache
root, err := FindRoot(cacheDir, imageName)
@@ -22,10 +24,16 @@ func ValidateImage(ref *reference.Spec, cacheDir, architecture string) (ImageSou
img, err := root.Image()
if err == nil {
image = img
if desc, err = partial.Descriptor(img); err != nil {
return ImageSource{}, errors.New("image could not create valid descriptor")
}
} else {
ii, err := root.ImageIndex()
if err == nil {
imageIndex = ii
if desc, err = partial.Descriptor(ii); err != nil {
return ImageSource{}, errors.New("index could not create valid descriptor")
}
}
}
}
@@ -45,6 +53,7 @@ func ValidateImage(ref *reference.Spec, cacheDir, architecture string) (ImageSou
ref,
cacheDir,
architecture,
desc,
), nil
}
return ImageSource{}, errors.New("invalid index")
@@ -55,6 +64,7 @@ func ValidateImage(ref *reference.Spec, cacheDir, architecture string) (ImageSou
ref,
cacheDir,
architecture,
desc,
), nil
}
return ImageSource{}, errors.New("invalid image")

85
src/cmd/linuxkit/cache/push.go vendored Normal file
View File

@@ -0,0 +1,85 @@
package cache
import (
"fmt"
"github.com/google/go-containerregistry/pkg/authn"
namepkg "github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/registry"
)
// PushWithManifest push an image along with, optionally, a multi-arch index.
func PushWithManifest(dir string, name, suffix string, pushImage, pushManifest, trust, sign bool) error {
var (
digest string
l int
err error
options []remote.Option
)
p, err := Get(dir)
if err != nil {
return err
}
imageName := name + suffix
ref, err := namepkg.ParseReference(imageName)
if err != nil {
return err
}
if pushImage {
fmt.Printf("Pushing %s\n", imageName)
// do we even have the given one?
root, err := findRootFromLayout(p, imageName)
if err != nil {
return err
}
options = append(options, remote.WithAuthFromKeychain(authn.DefaultKeychain))
img, err1 := root.Image()
ii, err2 := root.ImageIndex()
switch {
case err1 == nil:
if err := remote.Write(ref, img, options...); err != nil {
return err
}
fmt.Printf("Pushed image %s\n", imageName)
case err2 == nil:
if err := remote.WriteIndex(ref, ii, options...); err != nil {
return err
}
fmt.Printf("Pushed index %s\n", imageName)
default:
return fmt.Errorf("name %s unknown in cache", imageName)
}
} else {
fmt.Print("Image push disabled, skipping...\n")
}
auth, err := registry.GetDockerAuth()
if err != nil {
return fmt.Errorf("failed to get auth: %v", err)
}
if pushManifest {
fmt.Printf("Pushing %s to manifest %s\n", imageName, name)
digest, l, err = registry.PushManifest(imageName, auth)
if err != nil {
return err
}
} else {
fmt.Print("Manifest push disabled, skipping...\n")
}
// if trust is not enabled, nothing more to do
if !trust {
fmt.Println("trust disabled, not signing")
return nil
}
if !sign {
fmt.Println("signing disabled, not signing")
return nil
}
fmt.Printf("Signing manifest for %s\n", imageName)
return registry.SignTag(name, digest, l, auth)
}

View File

@@ -6,6 +6,7 @@ import (
"io"
"github.com/containerd/containerd/reference"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/mutate"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
@@ -17,16 +18,18 @@ type ImageSource struct {
ref *reference.Spec
cache layout.Path
architecture string
descriptor *v1.Descriptor
}
// NewSource return an ImageSource for a specific ref and architecture in the given
// cache directory.
func NewSource(ref *reference.Spec, dir string, architecture string) ImageSource {
func NewSource(ref *reference.Spec, dir string, architecture string, descriptor *v1.Descriptor) ImageSource {
p, _ := Get(dir)
return ImageSource{
ref: ref,
cache: p,
architecture: architecture,
descriptor: descriptor,
}
}
@@ -67,3 +70,8 @@ func (c ImageSource) TarReader() (io.ReadCloser, error) {
return mutate.Extract(image), nil
}
// Descriptor return the descriptor of the image.
func (c ImageSource) Descriptor() *v1.Descriptor {
return c.descriptor
}

View File

@@ -1,7 +1,15 @@
package cache
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strings"
"github.com/containerd/containerd/reference"
"github.com/google/go-containerregistry/pkg/authn"
@@ -9,8 +17,15 @@ import (
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/match"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/types"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
log "github.com/sirupsen/logrus"
)
const (
linux = "linux"
)
// ImageWrite takes an image name and pulls it down, writing it locally. It should be
@@ -26,6 +41,7 @@ func ImageWrite(dir string, ref *reference.Spec, trustedRef, architecture string
if trustedRef != "" {
pullImageName = trustedRef
}
log.Debugf("ImageWrite to cache %s trusted reference %s", image, pullImageName)
remoteRef, err := name.ParseReference(pullImageName)
if err != nil {
return ImageSource{}, fmt.Errorf("invalid image name %s: %v", pullImageName, err)
@@ -44,6 +60,7 @@ func ImageWrite(dir string, ref *reference.Spec, trustedRef, architecture string
// first attempt as an index
ii, err := desc.ImageIndex()
if err == nil {
log.Debugf("ImageWrite retrieved %s is index, saving", pullImageName)
err = p.ReplaceIndex(ii, match.Name(image), layout.WithAnnotations(annotations))
} else {
var im v1.Image
@@ -52,6 +69,7 @@ func ImageWrite(dir string, ref *reference.Spec, trustedRef, architecture string
if err != nil {
return ImageSource{}, fmt.Errorf("provided image is neither an image nor an index: %s", image)
}
log.Debugf("ImageWrite retrieved %s is image, saving", pullImageName)
err = p.ReplaceImage(im, match.Name(image), layout.WithAnnotations(annotations))
}
if err != nil {
@@ -61,5 +79,343 @@ func ImageWrite(dir string, ref *reference.Spec, trustedRef, architecture string
ref,
dir,
architecture,
&desc.Descriptor,
), nil
}
// ImageWriteTar takes an OCI format image tar stream and writes it locally. It should be
// efficient and only write missing blobs, based on their content hash.
func ImageWriteTar(dir string, ref *reference.Spec, architecture string, r io.Reader) (ImageSource, error) {
p, err := Get(dir)
if err != nil {
return ImageSource{}, err
}
var (
tr = tar.NewReader(r)
index bytes.Buffer
)
var suffix string
switch architecture {
case "amd64", "arm64", "s390x":
suffix = "-" + architecture
default:
return ImageSource{}, fmt.Errorf("Unknown arch %q", architecture)
}
imageName := ref.String() + suffix
log.Debugf("ImageWriteTar to cache %s", imageName)
for {
header, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return ImageSource{}, err
}
// get the filename and decide what to do with the file on that basis
// there are only a few kinds of files in an oci archive:
// blobs/sha256/<hash> - these we write out to our cache unless it already exists
// index.json - we just take the data out of it and append to our index.json
// manifest.json - not interested
// oci-layout - not interested
filename := header.Name
switch {
case filename == "manifest.json":
log.Debugf("ignoring %s", filename)
case filename == "oci-layout":
log.Debugf("ignoring %s", filename)
case header.Typeflag == tar.TypeDir:
log.Debugf("ignoring directory %s", filename)
case filename == "index.json":
log.Debugf("saving %s to memory to parse", filename)
// any errors should stop and get reported
if _, err := io.Copy(&index, tr); err != nil {
return ImageSource{}, fmt.Errorf("error reading data for file %s : %v", filename, err)
}
case strings.HasPrefix(filename, "blobs/sha256/"):
// must have a file named blob/sha256/<hash>
parts := strings.Split(filename, "/")
// if we had a file that is just the directory, ignore it
if len(parts) != 3 {
log.Debugf("ignoring %s", filename)
continue
}
hash, err := v1.NewHash(fmt.Sprintf("%s:%s", parts[1], parts[2]))
if err != nil {
// malformed file
return ImageSource{}, fmt.Errorf("invalid hash filename for %s: %v", filename, err)
}
log.Debugf("writing %s as hash %s", filename, hash)
if err := p.WriteBlob(hash, ioutil.NopCloser(tr)); err != nil {
return ImageSource{}, fmt.Errorf("error reading data for file %s : %v", filename, err)
}
}
}
// update the index in the cache directory
var descriptor *v1.Descriptor
if index.Len() != 0 {
im, err := v1.ParseIndexManifest(&index)
if err != nil {
return ImageSource{}, fmt.Errorf("error reading index.json")
}
// in theory, we should support a tar stream with multiple images in it. However, how would we
// know which one gets the single name annotation we have? We will find some way in the future.
if len(im.Manifests) != 1 {
return ImageSource{}, fmt.Errorf("currently only support OCI tar stream that has a single image")
}
if err := p.RemoveDescriptors(match.Name(imageName)); err != nil {
return ImageSource{}, fmt.Errorf("unable to remove old descriptors for %s: %v", imageName, err)
}
for _, desc := range im.Manifests {
// make sure that we have the correct image name annotation
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
desc.Annotations[imagespec.AnnotationRefName] = imageName
descriptor = &desc
log.Debugf("appending descriptor %#v", descriptor)
if err := p.AppendDescriptor(desc); err != nil {
return ImageSource{}, fmt.Errorf("error appending descriptor to layout index: %v", err)
}
}
}
if descriptor != nil && descriptor.Platform == nil {
descriptor.Platform = &v1.Platform{
OS: linux,
Architecture: architecture,
}
}
return NewSource(
ref,
dir,
architecture,
descriptor,
), nil
}
// IndexWrite takes an image name and creates an index for the targets to which it points.
// does not pull down any images; entirely assumes that the subjects of the manifests are present.
// If a reference to the provided already exists and it is an index, updates the manifests in the
// existing index.
func IndexWrite(dir string, ref *reference.Spec, descriptors ...v1.Descriptor) (ImageSource, error) {
p, err := Get(dir)
if err != nil {
return ImageSource{}, err
}
image := ref.String()
log.Debugf("writing an index for %s", image)
ii, err := p.ImageIndex()
if err != nil {
return ImageSource{}, fmt.Errorf("unable to get root index at %s: %v", dir, err)
}
images, err := partial.FindImages(ii, match.Name(image))
if err != nil {
return ImageSource{}, fmt.Errorf("error parsing index at %s: %v", dir, err)
}
if err == nil && len(images) > 0 {
return ImageSource{}, fmt.Errorf("image named %s already exists in cache at %s and is not an index", image, dir)
}
indexes, err := partial.FindIndexes(ii, match.Name(image))
if err != nil {
return ImageSource{}, fmt.Errorf("error parsing index at %s: %v", dir, err)
}
var im v1.IndexManifest
// do we update an existing one? Or create a new one?
if len(indexes) > 0 {
// we already had one, so update just the referenced index and return
manifest, err := indexes[0].IndexManifest()
if err != nil {
return ImageSource{}, fmt.Errorf("unable to convert index for %s into its manifest: %v", image, err)
}
oldhash, err := indexes[0].Digest()
if err != nil {
return ImageSource{}, fmt.Errorf("unable to get hash of existing index: %v", err)
}
// we only care about avoiding duplicate arch/OS/Variant
descReplace := map[string]v1.Descriptor{}
for _, desc := range descriptors {
descReplace[fmt.Sprintf("%s/%s/%s", desc.Platform.OS, desc.Platform.Architecture, desc.Platform.OSVersion)] = desc
}
// now we can go through each one and see if it already exists, and, if so, replace it
var manifests []v1.Descriptor
for _, m := range manifest.Manifests {
if m.Platform != nil {
lookup := fmt.Sprintf("%s/%s/%s", m.Platform.OS, m.Platform.Architecture, m.Platform.OSVersion)
if desc, ok := descReplace[lookup]; ok {
manifests = append(manifests, desc)
// already added, so do not need it in the lookup list any more
delete(descReplace, lookup)
continue
}
}
manifests = append(manifests, m)
}
// any left get added
for _, desc := range descReplace {
manifests = append(manifests, desc)
}
manifest.Manifests = manifests
im = *manifest
// remove the old index
if err := p.RemoveBlob(oldhash); err != nil {
return ImageSource{}, fmt.Errorf("unable to remove old index file: %v", err)
}
} else {
// we did not have one, so create an index, store it, update the root index.json, and return
im = v1.IndexManifest{
MediaType: types.OCIImageIndex,
Manifests: descriptors,
SchemaVersion: 2,
}
}
// write the updated index, remove the old one
b, err := json.Marshal(im)
if err != nil {
return ImageSource{}, fmt.Errorf("unable to marshal new index to json: %v", err)
}
hash, size, err := v1.SHA256(bytes.NewReader(b))
if err != nil {
return ImageSource{}, fmt.Errorf("error calculating hash of index json: %v", err)
}
if err := p.WriteBlob(hash, ioutil.NopCloser(bytes.NewReader(b))); err != nil {
return ImageSource{}, fmt.Errorf("error writing new index to json: %v", err)
}
// finally update the descriptor in the root
if err := p.RemoveDescriptors(match.Name(image)); err != nil {
return ImageSource{}, fmt.Errorf("unable to remove old descriptor from index.json: %v", err)
}
desc := v1.Descriptor{
MediaType: types.OCIImageIndex,
Size: size,
Digest: hash,
Annotations: map[string]string{
imagespec.AnnotationRefName: image,
},
}
if err := p.AppendDescriptor(desc); err != nil {
return ImageSource{}, fmt.Errorf("unable to append new descriptor to index.json: %v", err)
}
return NewSource(
ref,
dir,
"",
&desc,
), nil
}
// DescriptorWrite writes a name for a given descriptor
func DescriptorWrite(dir string, ref *reference.Spec, descriptors ...v1.Descriptor) (ImageSource, error) {
p, err := Get(dir)
if err != nil {
return ImageSource{}, err
}
image := ref.String()
log.Debugf("writing descriptors for image %s: %v", image, descriptors)
ii, err := p.ImageIndex()
if err != nil {
return ImageSource{}, fmt.Errorf("unable to get root index at %s: %v", dir, err)
}
images, err := partial.FindImages(ii, match.Name(image))
if err != nil {
return ImageSource{}, fmt.Errorf("error parsing index at %s: %v", dir, err)
}
if err == nil && len(images) > 0 {
return ImageSource{}, fmt.Errorf("image named %s already exists in cache at %s and is not an index", image, dir)
}
indexes, err := partial.FindIndexes(ii, match.Name(image))
if err != nil {
return ImageSource{}, fmt.Errorf("error parsing index at %s: %v", dir, err)
}
var im v1.IndexManifest
// do we update an existing one? Or create a new one?
if len(indexes) > 0 {
// we already had one, so update just the referenced index and return
im, err := indexes[0].IndexManifest()
if err != nil {
return ImageSource{}, fmt.Errorf("unable to convert index for %s into its manifest: %v", image, err)
}
oldhash, err := indexes[0].Digest()
if err != nil {
return ImageSource{}, fmt.Errorf("unable to get hash of existing index: %v", err)
}
// we only care about avoiding duplicate arch/OS/Variant
descReplace := map[string]v1.Descriptor{}
for _, desc := range descriptors {
descReplace[fmt.Sprintf("%s/%s/%s", desc.Platform.OS, desc.Platform.Architecture, desc.Platform.OSVersion)] = desc
}
// now we can go through each one and see if it already exists, and, if so, replace it
var manifests []v1.Descriptor
for _, m := range im.Manifests {
lookup := fmt.Sprintf("%s/%s/%s", m.Platform.OS, m.Platform.Architecture, m.Platform.OSVersion)
if desc, ok := descReplace[lookup]; ok {
manifests = append(manifests, desc)
// already added, so do not need it in the lookup list any more
delete(descReplace, lookup)
continue
}
manifests = append(manifests, m)
}
// any left get added
for _, desc := range descReplace {
manifests = append(manifests, desc)
}
im.Manifests = manifests
// remove the old index - unfortunately, there is no "RemoveBlob" option in the library
// once https://github.com/google/go-containerregistry/pull/936/ is in, we can get rid of some of this
oldfile := path.Join(dir, oldhash.Algorithm, oldhash.Hex)
if err := os.RemoveAll(oldfile); err != nil {
return ImageSource{}, fmt.Errorf("unable to remove old file %s: %v", oldfile, err)
}
} else {
// we did not have one, so create an index, store it, update the root index.json, and return
im = v1.IndexManifest{
MediaType: types.OCIImageIndex,
Manifests: descriptors,
SchemaVersion: 2,
}
}
// write the updated index, remove the old one
b, err := json.Marshal(im)
if err != nil {
return ImageSource{}, fmt.Errorf("unable to marshal new index to json: %v", err)
}
hash, size, err := v1.SHA256(bytes.NewReader(b))
if err != nil {
return ImageSource{}, fmt.Errorf("error calculating hash of index json: %v", err)
}
if err := p.WriteBlob(hash, ioutil.NopCloser(bytes.NewReader(b))); err != nil {
return ImageSource{}, fmt.Errorf("error writing new index to json: %v", err)
}
// finally update the descriptor in the root
if err := p.RemoveDescriptors(match.Name(image)); err != nil {
return ImageSource{}, fmt.Errorf("unable to remove old descriptor from index.json: %v", err)
}
desc := v1.Descriptor{
MediaType: types.OCIImageIndex,
Size: size,
Digest: hash,
Annotations: map[string]string{
imagespec.AnnotationRefName: image,
},
}
if err := p.AppendDescriptor(desc); err != nil {
return ImageSource{}, fmt.Errorf("unable to append new descriptor to index.json: %v", err)
}
return NewSource(
ref,
dir,
"",
&desc,
), nil
}

View File

@@ -60,7 +60,7 @@ require (
github.com/gogo/googleapis v1.3.2 // indirect
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/protobuf v1.4.2 // indirect
github.com/google/go-containerregistry v0.4.0
github.com/google/go-containerregistry v0.4.1-0.20210208222243-cbafe638a7a9
github.com/google/uuid v1.1.1
github.com/gophercloud/gophercloud v0.1.0
github.com/gophercloud/utils v0.0.0-20181029231510-34f5991525d1
@@ -69,7 +69,7 @@ require (
github.com/gorilla/mux v1.7.4-0.20190830121156-884b5ffcbd3a // indirect
github.com/gorilla/websocket v1.4.0 // indirect
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/hashicorp/go-version v1.2.0 // indirect
github.com/hashicorp/go-version v1.2.0
github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jinzhu/gorm v1.9.16 // indirect
@@ -81,7 +81,7 @@ require (
github.com/mattn/go-shellwords v1.0.10 // indirect
github.com/mattn/go-sqlite3 v1.14.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 // indirect
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0 // indirect
github.com/miekg/pkcs11 v1.0.3 // indirect
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936 // indirect
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f // indirect
@@ -92,8 +92,6 @@ require (
github.com/moul/gotty-client v1.7.1-0.20180526075433-e5589f6df359
github.com/ncw/swift v1.0.47 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/onsi/ginkgo v1.12.0 // indirect
github.com/onsi/gomega v1.9.0 // indirect
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc v1.0.0-rc90.0.20200409211037-ccbb3364d49d // indirect
@@ -130,19 +128,19 @@ require (
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 // indirect
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee
golang.org/x/mod v0.4.1 // indirect
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sys v0.0.0-20210113181707-4bcb84eeeb78 // indirect
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect
golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3 // indirect
golang.org/x/tools v0.1.0 // indirect
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e // indirect
google.golang.org/api v0.22.0
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 // indirect
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece // indirect
google.golang.org/grpc v1.30.0-dev.0.20200410230105-27096e8260a4 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
gopkg.in/gcfg.v1 v1.2.0 // indirect
@@ -151,8 +149,10 @@ require (
gopkg.in/yaml.v2 v2.3.0
gotest.tools/v3 v3.0.3 // indirect
k8s.io/apiserver v0.18.8 // indirect
k8s.io/code-generator v0.20.1 // indirect
k8s.io/code-generator v0.20.2 // indirect
k8s.io/csi-translation-lib v0.18.8 // indirect
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b // indirect
k8s.io/klog/v2 v2.5.0 // indirect
rsc.io/letsencrypt v0.0.3 // indirect
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e // indirect
vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787 // indirect

View File

@@ -315,7 +315,10 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@@ -390,6 +393,8 @@ github.com/google/go-containerregistry v0.3.0 h1:+vqpHdgIbD7xSeufHJq0iuAx7ILcEeh
github.com/google/go-containerregistry v0.3.0/go.mod h1:BJ7VxR1hAhdiZBGGnvGETHEmFs1hzXc4VM1xjOPO9wA=
github.com/google/go-containerregistry v0.4.0 h1:45axtqLd66llqD8R9XgiCQ64foc7I2xkAG40NwR5YFw=
github.com/google/go-containerregistry v0.4.0/go.mod h1:TX4KwzBRckt63iM22ZNHzUGqXMdLE1UFJuEQnC/14fE=
github.com/google/go-containerregistry v0.4.1-0.20210208222243-cbafe638a7a9 h1:mUV1z+Rwa0uuEo+FTrKTK9Eu85vBgcfYaN3t3nmpyzY=
github.com/google/go-containerregistry v0.4.1-0.20210208222243-cbafe638a7a9/go.mod h1:GU9FUA/X9rd2cV3ZoUNaWihp27tki6/38EsVzL2Dyzc=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -441,6 +446,7 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -524,7 +530,10 @@ github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A
github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE=
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0 h1:8E6DrFvII6QR4eJ3PkFvV+lc03P+2qwqTPLm1ax7694=
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0/go.mod h1:fcEyUyXZXoV4Abw8DX0t7wyL8mCDxXyU4iAFZfT3IHw=
github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -559,6 +568,7 @@ github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -567,11 +577,13 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -656,6 +668,7 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.6 h1:C1/pvkxkGN/H03mDxLzItaceYJDBk1HdClgR15suAzI=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.6/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -833,7 +846,10 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180124060956-0ed95abb35c4/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -867,7 +883,10 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20170313201147-1611bb46e67a/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -886,6 +905,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -912,6 +932,7 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -938,6 +959,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210113181707-4bcb84eeeb78 h1:nVuTkr9L6Bq62qpUqKo/RnZCFfzDBL0bYo6w9OJUqZY=
golang.org/x/sys v0.0.0-20210113181707-4bcb84eeeb78/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -994,7 +1019,11 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3 h1:DywqrEscRX7O2phNjkT0L6lhHKGBoMLCNX+XcAe7t6s=
golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1081,6 +1110,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/dancannon/gorethink.v3 v3.0.5 h1:/g7PWP7zUS6vSNmHSDbjCHQh1Rqn8Jy6zSMQxAsBSMQ=
gopkg.in/dancannon/gorethink.v3 v3.0.5/go.mod h1:GXsi1e3N2OcKhcP6nsYABTiUejbWMFO4GY5a4pEaeEc=
@@ -1124,21 +1154,34 @@ k8s.io/apiserver v0.18.8/go.mod h1:12u5FuGql8Cc497ORNj79rhPdiXQC4bf53X/skR/1YM=
k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU=
k8s.io/cloud-provider v0.18.8/go.mod h1:cn9AlzMPVIXA4HHLVbgGUigaQlZyHSZ7WAwDEFNrQSs=
k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
k8s.io/code-generator v0.20.1 h1:kre3GNich5gbO3d1FyTT8fHI4ZJezZV217yFdWlQaRQ=
k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/code-generator v0.20.2 h1:SQaysped4EtUDk3u1zphnUJiOAwFdhHx9xS3WKAE0x8=
k8s.io/code-generator v0.20.2/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/component-base v0.18.8/go.mod h1:00frPRDas29rx58pPCxNkhUfPbwajlyyvu8ruNgSErU=
k8s.io/csi-translation-lib v0.18.8/go.mod h1:6cA6Btlzxy9s3QrS4BCZzQqclIWnTLr6Jx3H2ctAzY4=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b h1:bAU8IlrMA6KbP0dIg/sVSJn95pDCUHDZx0DpTGrf2v4=
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.5.0 h1:8mOnjf1RmUPW6KRqQCfYSZq/K20Unmp3IhuZUhxl8KI=
k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/legacy-cloud-providers v0.15.7/go.mod h1:3kYzP7K+6Qj7DzBf7RR57cGOSpIaRL8W7GeB2jBJsiI=
@@ -1256,6 +1299,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@@ -20,6 +20,8 @@ func pkgBuild(args []string) {
}
force := flags.Bool("force", false, "Force rebuild")
docker := flags.Bool("docker", false, "Store the built image in the docker image cache instead of the default linuxkit cache")
buildCacheDir := flags.String("cache", defaultLinuxkitCache(), "Directory for storing built image, incompatible with --docker")
p, err := pkglib.NewFromCLI(flags, args...)
if err != nil {
@@ -33,6 +35,10 @@ func pkgBuild(args []string) {
if *force {
opts = append(opts, pkglib.WithBuildForce())
}
opts = append(opts, pkglib.WithBuildCacheDir(*buildCacheDir))
if *docker {
opts = append(opts, pkglib.WithBuildTargetDockerCache())
}
if err := p.Build(opts...); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)

View File

@@ -25,6 +25,7 @@ func pkgPush(args []string) {
manifest := flags.Bool("manifest", true, "Create and push multi-arch manifest")
image := flags.Bool("image", true, "Build and push image for the current platform")
sign := flags.Bool("sign", true, "sign the manifest, if a manifest is created; ignored if --manifest=false")
buildCacheDir := flags.String("cache", defaultLinuxkitCache(), "Directory for storing built image, incompatible with --docker")
p, err := pkglib.NewFromCLI(flags, args...)
if err != nil {
@@ -57,6 +58,7 @@ func pkgPush(args []string) {
if *sign && *manifest {
opts = append(opts, pkglib.WithBuildSign())
}
opts = append(opts, pkglib.WithBuildCacheDir(*buildCacheDir))
if *nobuild {
fmt.Printf("Pushing %q without building\n", p.Tag())

View File

@@ -3,14 +3,23 @@ package pkglib
import (
"archive/tar"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"github.com/containerd/containerd/reference"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/cache"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/version"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
const (
minimumDockerVersion = "19.03"
)
type buildOpts struct {
@@ -21,6 +30,8 @@ type buildOpts struct {
manifest bool
sign bool
image bool
targetDocker bool
cache string
}
// BuildOpt allows callers to specify options to Build
@@ -82,6 +93,22 @@ func WithRelease(r string) BuildOpt {
}
}
// WithBuildTargetDockerCache put the build target in the docker cache instead of the default linuxkit cache
func WithBuildTargetDockerCache() BuildOpt {
return func(bo *buildOpts) error {
bo.targetDocker = true
return nil
}
}
// WithBuildCacheDir provide a build cache directory to use
func WithBuildCacheDir(dir string) BuildOpt {
return func(bo *buildOpts) error {
bo.cache = dir
return nil
}
}
// Build builds the package
func (p Pkg) Build(bos ...BuildOpt) error {
var bo buildOpts
@@ -105,7 +132,10 @@ func (p Pkg) Build(bos ...BuildOpt) error {
return err
}
var suffix string
var (
desc *v1.Descriptor
suffix string
)
switch arch {
case "amd64", "arm64", "s390x":
suffix = "-" + arch
@@ -113,6 +143,12 @@ func (p Pkg) Build(bos ...BuildOpt) error {
return fmt.Errorf("Unknown arch %q", arch)
}
// did we have the build cache dir provided? Yes, there is a default, but that is at the CLI level,
// and expected to be provided at this function level
if bo.cache == "" && !bo.targetDocker {
return errors.New("must provide linuxkit build cache directory when not targeting docker")
}
if p.git != nil && bo.push && bo.release == "" {
r, err := p.git.commitTag("HEAD")
if err != nil {
@@ -127,14 +163,31 @@ func (p Pkg) Build(bos ...BuildOpt) error {
d := newDockerRunner(p.trust, p.cache, bo.sign)
if err := d.buildkitCheck(); err != nil {
return fmt.Errorf("buildkit not supported, check docker version: %v", err)
}
if !bo.force {
if bo.targetDocker {
ok, err := d.pull(p.Tag())
// any error returns
if err != nil {
return err
}
// if we already have it, do not bother building any more
if ok {
return nil
}
} else {
ref, err := reference.Parse(p.Tag())
if err != nil {
return fmt.Errorf("could not resolve references for image %s: %v", p.Tag(), err)
}
if _, err := cache.ImageWrite(bo.cache, &ref, "", arch); err == nil {
fmt.Printf("image already found %s", ref)
return nil
}
}
fmt.Println("No image pulled, continuing with build")
}
@@ -174,15 +227,75 @@ func (p Pkg) Build(bos ...BuildOpt) error {
d.ctx = &buildCtx{sources: p.sources}
if err := d.build(p.Tag()+suffix, p.path, args...); err != nil {
// set the target
var (
buildxOutput string
stdout io.WriteCloser
tag = p.Tag()
tagArch = tag + suffix
eg errgroup.Group
stdoutCloser = func() {
if stdout != nil {
stdout.Close()
}
}
)
ref, err := reference.Parse(tag)
if err != nil {
return fmt.Errorf("could not resolve references for image %s: %v", tagArch, err)
}
if bo.targetDocker {
buildxOutput = "type=docker"
stdout = nil
// there is no gofunc processing for simple output to docker
} else {
// we are writing to local, so we need to catch the tar output stream and place the right files in the right place
buildxOutput = "type=oci"
piper, pipew := io.Pipe()
stdout = pipew
eg.Go(func() error {
source, err := cache.ImageWriteTar(bo.cache, &ref, arch, piper)
// send the error down the channel
if err != nil {
fmt.Printf("cache.ImageWriteTar goroutine ended with error: %v\n", err)
}
desc = source.Descriptor()
piper.Close()
return err
})
}
args = append(args, fmt.Sprintf("--output=%s", buildxOutput))
if err := d.build(tagArch, p.path, stdout, args...); err != nil {
stdoutCloser()
return err
}
stdoutCloser()
// wait for the processor to finish
if err := eg.Wait(); err != nil {
return err
}
// create the arch-less image
switch {
case bo.targetDocker:
// if in docker, use a tag
if err := d.tag(tagArch, tag); err != nil {
return err
}
case desc == nil:
return errors.New("no valid descriptor returned for image")
default:
// if in the proper linuxkit cache, create a multi-arch index
if _, err := cache.IndexWrite(bo.cache, &ref, *desc); err != nil {
return err
}
}
if !bo.push {
if err := d.tag(p.Tag()+suffix, p.Tag()); err != nil {
return err
}
fmt.Printf("Build complete, not pushing, all done.\n")
return nil
}
@@ -198,9 +311,15 @@ func (p Pkg) Build(bos ...BuildOpt) error {
// matters given we do either pull or build above in the
// !force case.
if bo.targetDocker {
if err := d.pushWithManifest(p.Tag(), suffix, bo.image, bo.manifest, bo.sign); err != nil {
return err
}
} else {
if err := cache.PushWithManifest(bo.cache, p.Tag(), suffix, bo.image, bo.manifest, p.trust, bo.sign); err != nil {
return err
}
}
if bo.release == "" {
fmt.Printf("Build and push complete, not releasing, all done.\n")
@@ -212,6 +331,7 @@ func (p Pkg) Build(bos ...BuildOpt) error {
return err
}
if bo.targetDocker {
if err := d.tag(p.Tag()+suffix, relTag+suffix); err != nil {
return err
}
@@ -219,6 +339,25 @@ func (p Pkg) Build(bos ...BuildOpt) error {
if err := d.pushWithManifest(relTag, suffix, bo.image, bo.manifest, bo.sign); err != nil {
return err
}
} else {
// must make sure descriptor is available
if desc == nil {
desc, err = cache.FindDescriptor(bo.cache, p.Tag()+suffix)
if err != nil {
return err
}
}
ref, err := reference.Parse(relTag + suffix)
if err != nil {
return err
}
if _, err := cache.DescriptorWrite(bo.cache, &ref, *desc); err != nil {
return err
}
if err := cache.PushWithManifest(bo.cache, relTag, suffix, bo.image, bo.manifest, p.trust, bo.sign); err != nil {
return err
}
}
fmt.Printf("Build, push and release of %q complete, all done.\n", bo.release)

View File

@@ -5,20 +5,16 @@ package pkglib
//go:generate ./gen
import (
"encoding/base64"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path"
"strconv"
"strings"
"github.com/docker/cli/cli/config"
dockertypes "github.com/docker/docker/api/types"
"github.com/estesp/manifest-tool/pkg/registry"
"github.com/estesp/manifest-tool/pkg/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
versioncompare "github.com/hashicorp/go-version"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/registry"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
@@ -26,10 +22,7 @@ import (
const (
dctEnableEnv = "DOCKER_CONTENT_TRUST=1"
registryServer = "https://index.docker.io/v1/"
notaryServer = "https://notary.docker.io"
notaryDelegationPassphraseEnvVar = "NOTARY_DELEGATION_PASSPHRASE"
notaryAuthEnvVar = "NOTARY_AUTH"
dctEnvVar = "DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"
buildkitBuilderName = "linuxkit"
)
var platforms = []string{
@@ -77,10 +70,16 @@ var proxyEnvVars = []string{
"ALL_PROXY",
}
func (dr dockerRunner) command(args ...string) error {
func (dr dockerRunner) command(stdout, stderr io.Writer, args ...string) error {
cmd := exec.Command("docker", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if stdout == nil {
stdout = os.Stdout
}
if stderr == nil {
stderr = os.Stderr
}
cmd.Stdout = stdout
cmd.Stderr = stderr
cmd.Env = os.Environ()
dct := ""
@@ -94,7 +93,8 @@ func (dr dockerRunner) command(args ...string) error {
var eg errgroup.Group
if args[0] == "build" {
// special handling for build-args
if args[0] == "buildx" && args[1] == "build" {
buildArgs := []string{}
for _, proxyVarName := range proxyEnvVars {
if value, ok := os.LookupEnv(proxyVarName); ok {
@@ -134,8 +134,87 @@ func (dr dockerRunner) command(args ...string) error {
return eg.Wait()
}
// versionCheck returns the client version and server version, and compares them both
// against the minimum required version.
func (dr dockerRunner) versionCheck(version string) (string, string, error) {
var stdout bytes.Buffer
if err := dr.command(&stdout, nil, "version", "--format", "json"); err != nil {
return "", "", err
}
// we can build a struct for everything, but all we really need is .Client.Version and .Server.Version
jsonMap := make(map[string]map[string]interface{})
b := stdout.Bytes()
if err := json.Unmarshal(b, &jsonMap); err != nil {
return "", "", fmt.Errorf("unable to parse docker version output: %v, output is: %s", err, string(b))
}
client, ok := jsonMap["Client"]
if !ok {
return "", "", errors.New("docker version output did not have 'Client' field")
}
clientVersionInt, ok := client["Version"]
if !ok {
return "", "", errors.New("docker version output did not have 'Client.Version' field")
}
clientVersionString, ok := clientVersionInt.(string)
if !ok {
return "", "", errors.New("client version was not a string")
}
server, ok := jsonMap["Server"]
if !ok {
return "", "", errors.New("docker version output did not have 'Server' field")
}
serverVersionInt, ok := server["Version"]
if !ok {
return clientVersionString, "", errors.New("docker version output did not have 'Server.Version' field")
}
serverVersionString, ok := serverVersionInt.(string)
if !ok {
return clientVersionString, "", errors.New("server version was not a string")
}
// get the lower of each of those versions
clientVersion, err := versioncompare.NewVersion(clientVersionString)
if err != nil {
return clientVersionString, serverVersionString, fmt.Errorf("invalid client version %s: %v", clientVersionString, err)
}
serverVersion, err := versioncompare.NewVersion(serverVersionString)
if err != nil {
return clientVersionString, serverVersionString, fmt.Errorf("invalid server version %s: %v", serverVersionString, err)
}
compareVersion, err := versioncompare.NewVersion(version)
if err != nil {
return clientVersionString, serverVersionString, fmt.Errorf("invalid provided version %s: %v", version, err)
}
if serverVersion.LessThan(compareVersion) {
return clientVersionString, serverVersionString, fmt.Errorf("server version %s less than compare version %s", serverVersion, compareVersion)
}
if clientVersion.LessThan(compareVersion) {
return clientVersionString, serverVersionString, fmt.Errorf("client version %s less than compare version %s", clientVersion, compareVersion)
}
return clientVersionString, serverVersionString, nil
}
// buildkitCheck checks if buildkit is supported. This is necessary because github uses some strange versions
// of docker in Actions, which makes it difficult to tell if buildkit is supported.
// See https://github.community/t/what-really-is-docker-3-0-6/16171
func (dr dockerRunner) buildkitCheck() error {
return dr.command(nil, nil, "buildx", "ls")
}
// builder ensure that a builder of the given name exists
func (dr dockerRunner) builder(name string) error {
if err := dr.command(nil, nil, "buildx", "inspect", name); err == nil {
// if no error, then we have a builder already
return nil
}
// create a builder
return dr.command(nil, nil, "buildx", "create", "--name", name, "--driver", "docker-container", "--buildkitd-flags", "--allow-insecure-entitlement network.host")
}
func (dr dockerRunner) pull(img string) (bool, error) {
err := dr.command("image", "pull", img)
err := dr.command(nil, nil, "image", "pull", img)
if err == nil {
return true, nil
}
@@ -148,7 +227,7 @@ func (dr dockerRunner) pull(img string) (bool, error) {
}
func (dr dockerRunner) push(img string) error {
return dr.command("image", "push", img)
return dr.command(nil, nil, "image", "push", img)
}
func (dr dockerRunner) pushWithManifest(img, suffix string, pushImage, pushManifest, sign bool) error {
@@ -166,14 +245,14 @@ func (dr dockerRunner) pushWithManifest(img, suffix string, pushImage, pushManif
fmt.Print("Image push disabled, skipping...\n")
}
auth, err := getDockerAuth()
auth, err := registry.GetDockerAuth()
if err != nil {
return fmt.Errorf("failed to get auth: %v", err)
}
if pushManifest {
fmt.Printf("Pushing %s to manifest %s\n", img+suffix, img)
digest, l, err = manifestPush(img, auth)
digest, l, err = registry.PushManifest(img, auth)
if err != nil {
return err
}
@@ -190,113 +269,31 @@ func (dr dockerRunner) pushWithManifest(img, suffix string, pushImage, pushManif
return nil
}
fmt.Printf("Signing manifest for %s\n", img)
return signManifest(img, digest, l, auth)
return registry.SignTag(img, digest, l, auth)
}
func (dr dockerRunner) tag(ref, tag string) error {
fmt.Printf("Tagging %s as %s\n", ref, tag)
return dr.command("image", "tag", ref, tag)
return dr.command(nil, nil, "image", "tag", ref, tag)
}
func (dr dockerRunner) build(tag, pkg string, opts ...string) error {
args := []string{"build"}
func (dr dockerRunner) build(tag, pkg string, stdout io.Writer, opts ...string) error {
// ensure we have a builder
if err := dr.builder(buildkitBuilderName); err != nil {
return fmt.Errorf("unable to ensure proper buildx builder: %v", err)
}
args := []string{"buildx", "build"}
if !dr.cache {
args = append(args, "--no-cache")
}
args = append(args, opts...)
args = append(args, fmt.Sprintf("--builder=%s", buildkitBuilderName))
args = append(args, "-t", tag, pkg)
return dr.command(args...)
return dr.command(stdout, nil, args...)
}
func (dr dockerRunner) save(tgt string, refs ...string) error {
args := append([]string{"image", "save", "-o", tgt}, refs...)
return dr.command(args...)
}
func getDockerAuth() (dockertypes.AuthConfig, error) {
cfgFile := config.LoadDefaultConfigFile(os.Stderr)
authconfig, err := cfgFile.GetAuthConfig(registryServer)
return dockertypes.AuthConfig(authconfig), err
}
func manifestPush(img string, auth dockertypes.AuthConfig) (hash string, length int, err error) {
srcImages := []types.ManifestEntry{}
for i, platform := range platforms {
osArchArr := strings.Split(platform, "/")
if len(osArchArr) != 2 && len(osArchArr) != 3 {
return hash, length, fmt.Errorf("platform argument %d is not of form 'os/arch': '%s'", i, platform)
}
variant := ""
os, arch := osArchArr[0], osArchArr[1]
if len(osArchArr) == 3 {
variant = osArchArr[2]
}
srcImages = append(srcImages, types.ManifestEntry{
Image: fmt.Sprintf("%s-%s", img, arch),
Platform: ocispec.Platform{
OS: os,
Architecture: arch,
Variant: variant,
},
})
}
yamlInput := types.YAMLInput{
Image: img,
Manifests: srcImages,
}
// push the manifest list with the auth as given, ignore missing, do not allow insecure
return registry.PushManifestList(auth.Username, auth.Password, yamlInput, true, false, false, "")
}
func signManifest(img, digest string, length int, auth dockertypes.AuthConfig) error {
imgParts := strings.Split(img, ":")
if len(imgParts) < 2 {
return fmt.Errorf("image not composed of <repo>:<tag> '%s'", img)
}
repo := imgParts[0]
tag := imgParts[1]
digestParts := strings.Split(digest, ":")
if len(digestParts) < 2 {
return fmt.Errorf("digest not composed of <algo>:<hash> '%s'", digest)
}
algo, hash := digestParts[0], digestParts[1]
if algo != "sha256" {
return fmt.Errorf("notary works with sha256 hash, not the provided %s", algo)
}
notaryAuth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", auth.Username, auth.Password)))
// run the notary command to sign
args := []string{
"-s",
notaryServer,
"-d",
path.Join(os.Getenv("HOME"), ".docker/trust"),
"addhash",
"-p",
fmt.Sprintf("docker.io/%s", repo),
tag,
strconv.Itoa(length),
"--sha256",
hash,
"-r",
"targets/releases",
}
cmd := exec.Command("notary", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", notaryDelegationPassphraseEnvVar, os.Getenv(dctEnvVar)), fmt.Sprintf("%s=%s", notaryAuthEnvVar, notaryAuth))
log.Debugf("Executing: %v", cmd.Args)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to execute notary-tool: %v", err)
}
// report output
fmt.Printf("Signed manifest index: %s:%s\n", repo, tag)
return nil
return dr.command(nil, nil, args...)
}

View File

@@ -0,0 +1,19 @@
package registry
import (
"os"
"github.com/docker/cli/cli/config"
dockertypes "github.com/docker/docker/api/types"
)
const (
registryServer = "https://index.docker.io/v1/"
)
// GetDockerAuth get an AuthConfig for the default registry server.
func GetDockerAuth() (dockertypes.AuthConfig, error) {
cfgFile := config.LoadDefaultConfigFile(os.Stderr)
authconfig, err := cfgFile.GetAuthConfig(registryServer)
return dockertypes.AuthConfig(authconfig), err
}

View File

@@ -0,0 +1,114 @@
package registry
import (
"encoding/base64"
"fmt"
"os"
"os/exec"
"path"
"strconv"
"strings"
dockertypes "github.com/docker/docker/api/types"
"github.com/estesp/manifest-tool/pkg/registry"
"github.com/estesp/manifest-tool/pkg/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
log "github.com/sirupsen/logrus"
)
const (
notaryServer = "https://notary.docker.io"
notaryDelegationPassphraseEnvVar = "NOTARY_DELEGATION_PASSPHRASE"
notaryAuthEnvVar = "NOTARY_AUTH"
dctEnvVar = "DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"
)
var platforms = []string{
"linux/amd64", "linux/arm64", "linux/s390x",
}
// PushManifest create a manifest that supports each of the provided platforms and push it out.
func PushManifest(img string, auth dockertypes.AuthConfig) (hash string, length int, err error) {
srcImages := []types.ManifestEntry{}
for i, platform := range platforms {
osArchArr := strings.Split(platform, "/")
if len(osArchArr) != 2 && len(osArchArr) != 3 {
return hash, length, fmt.Errorf("platform argument %d is not of form 'os/arch': '%s'", i, platform)
}
variant := ""
os, arch := osArchArr[0], osArchArr[1]
if len(osArchArr) == 3 {
variant = osArchArr[2]
}
srcImages = append(srcImages, types.ManifestEntry{
Image: fmt.Sprintf("%s-%s", img, arch),
Platform: ocispec.Platform{
OS: os,
Architecture: arch,
Variant: variant,
},
})
}
yamlInput := types.YAMLInput{
Image: img,
Manifests: srcImages,
}
log.Debugf("pushing manifest list for %s -> %#v", img, yamlInput)
// push the manifest list with the auth as given, ignore missing, do not allow insecure
return registry.PushManifestList(auth.Username, auth.Password, yamlInput, true, false, false, "")
}
// SignTag sign a tag on a registry.
func SignTag(img, digest string, length int, auth dockertypes.AuthConfig) error {
imgParts := strings.Split(img, ":")
if len(imgParts) < 2 {
return fmt.Errorf("image not composed of <repo>:<tag> '%s'", img)
}
repo := imgParts[0]
tag := imgParts[1]
digestParts := strings.Split(digest, ":")
if len(digestParts) < 2 {
return fmt.Errorf("digest not composed of <algo>:<hash> '%s'", digest)
}
algo, hash := digestParts[0], digestParts[1]
if algo != "sha256" {
return fmt.Errorf("notary works with sha256 hash, not the provided %s", algo)
}
notaryAuth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", auth.Username, auth.Password)))
// run the notary command to sign
args := []string{
"-s",
notaryServer,
"-d",
path.Join(os.Getenv("HOME"), ".docker/trust"),
"addhash",
"-p",
fmt.Sprintf("docker.io/%s", repo),
tag,
strconv.Itoa(length),
"--sha256",
hash,
"-r",
"targets/releases",
}
cmd := exec.Command("notary", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", notaryDelegationPassphraseEnvVar, os.Getenv(dctEnvVar)), fmt.Sprintf("%s=%s", notaryAuthEnvVar, notaryAuth))
log.Debugf("Executing: %v", cmd.Args)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to execute notary-tool: %v", err)
}
// report output
fmt.Printf("Signed manifest index: %s:%s\n", repo, tag)
return nil
}

View File

@@ -15,7 +15,7 @@
package authn
// AuthConfig contains authorization information for connecting to a Registry
// Inlined what we use from github.com/cli/cli/config/types
// Inlined what we use from github.com/docker/cli/cli/config/types
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`

View File

@@ -15,6 +15,7 @@
package gzip
import (
"bufio"
"bytes"
"compress/gzip"
"io"
@@ -38,11 +39,19 @@ func ReadCloser(r io.ReadCloser) io.ReadCloser {
func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser {
pr, pw := io.Pipe()
// For highly compressible layers, gzip.Writer will output a very small
// number of bytes per Write(). This is normally fine, but when pushing
// to a registry, we want to ensure that we're taking full advantage of
// the available bandwidth instead of sending tons of tiny writes over
// the wire.
// 64K ought to be small enough for anybody.
bw := bufio.NewWriterSize(pw, 2<<16)
// Returns err so we can pw.CloseWithError(err)
go func() error {
// TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect.
// Context: https://golang.org/issue/24283
gw, err := gzip.NewWriterLevel(pw, level)
gw, err := gzip.NewWriterLevel(bw, level)
if err != nil {
return pw.CloseWithError(err)
}
@@ -52,9 +61,20 @@ func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser {
defer gw.Close()
return pw.CloseWithError(err)
}
// Close gzip writer to Flush it and write gzip trailers.
if err := gw.Close(); err != nil {
return pw.CloseWithError(err)
}
// Flush bufio writer to ensure we write out everything.
if err := bw.Flush(); err != nil {
return pw.CloseWithError(err)
}
// We dont' really care if these fail.
defer pw.Close()
defer r.Close()
defer gw.Close()
return nil
}()

View File

@@ -264,6 +264,19 @@ func (l Path) writeLayer(layer v1.Layer) error {
return l.WriteBlob(d, r)
}
// RemoveBlob removes a file from the blobs directory in the Path
// at blobs/{hash.Algorithm}/{hash.Hex}
// It does *not* remove any reference to it from other manifests or indexes, or
// from the root index.json.
func (l Path) RemoveBlob(hash v1.Hash) error {
dir := l.path("blobs", hash.Algorithm)
err := os.Remove(filepath.Join(dir, hash.Hex))
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// WriteImage writes an image, including its manifest, config and all of its
// layers, to the blobs directory. If any blob already exists, as determined by
// the hash filename, does not write it.
@@ -314,6 +327,14 @@ func (l Path) WriteImage(img v1.Image) error {
return l.WriteBlob(d, ioutil.NopCloser(bytes.NewReader(manifest)))
}
type withLayer interface {
Layer(v1.Hash) (v1.Layer, error)
}
type withBlob interface {
Blob(v1.Hash) (io.ReadCloser, error)
}
func (l Path) writeIndexToFile(indexFile string, ii v1.ImageIndex) error {
index, err := ii.IndexManifest()
if err != nil {
@@ -343,6 +364,24 @@ func (l Path) writeIndexToFile(indexFile string, ii v1.ImageIndex) error {
default:
// TODO: The layout could reference arbitrary things, which we should
// probably just pass through.
var blob io.ReadCloser
// Workaround for #819.
if wl, ok := ii.(withLayer); ok {
layer, err := wl.Layer(desc.Digest)
if err != nil {
return err
}
blob, err = layer.Compressed()
} else if wb, ok := ii.(withBlob); ok {
blob, err = wb.Blob(desc.Digest)
}
if err != nil {
return err
}
if err := l.WriteBlob(desc.Digest, blob); err != nil {
return err
}
}
}

View File

@@ -46,11 +46,11 @@ e.g. to appease a registry with strict validation of images (_looking at you, GC
Rebase has [its own README](/cmd/crane/rebase.md).
This is the underlying implementation of [`crane rebase`](https://github.com/google/go-containerregistry/blob/master/cmd/crane/doc/crane_rebase.md).
This is the underlying implementation of [`crane rebase`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_rebase.md).
### `Extract`
Extract will flatten an image filesystem into a single tar stream,
respecting whiteout files.
This is the underlying implementation of [`crane export`](https://github.com/google/go-containerregistry/blob/master/cmd/crane/doc/crane_export.md).
This is the underlying implementation of [`crane export`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_export.md).

View File

@@ -16,6 +16,7 @@ package mutate
import (
"encoding/json"
"fmt"
"strings"
"github.com/google/go-containerregistry/pkg/logs"
@@ -65,6 +66,7 @@ type index struct {
mediaType *types.MediaType
imageMap map[v1.Hash]v1.Image
indexMap map[v1.Hash]v1.ImageIndex
layerMap map[v1.Hash]v1.Layer
}
var _ v1.ImageIndex = (*index)(nil)
@@ -86,6 +88,7 @@ func (i *index) compute() error {
i.imageMap = make(map[v1.Hash]v1.Image)
i.indexMap = make(map[v1.Hash]v1.ImageIndex)
i.layerMap = make(map[v1.Hash]v1.Layer)
m, err := i.base.IndexManifest()
if err != nil {
@@ -115,6 +118,8 @@ func (i *index) compute() error {
i.indexMap[desc.Digest] = idx
} else if img, ok := add.Add.(v1.Image); ok {
i.imageMap[desc.Digest] = img
} else if l, ok := add.Add.(v1.Layer); ok {
i.layerMap[desc.Digest] = l
} else {
logs.Warn.Printf("Unexpected index addendum: %T", add.Add)
}
@@ -151,6 +156,21 @@ func (i *index) ImageIndex(h v1.Hash) (v1.ImageIndex, error) {
return i.base.ImageIndex(h)
}
type withLayer interface {
Layer(v1.Hash) (v1.Layer, error)
}
// Workaround for #819.
func (i *index) Layer(h v1.Hash) (v1.Layer, error) {
if layer, ok := i.layerMap[h]; ok {
return layer, nil
}
if wl, ok := i.base.(withLayer); ok {
return wl.Layer(h)
}
return nil, fmt.Errorf("layer not found: %s", h)
}
// Digest returns the sha256 of this image's manifest.
func (i *index) Digest() (v1.Hash, error) {
if err := i.compute(); err != nil {

View File

@@ -76,7 +76,7 @@ which references an application/xml file from an image index.
That could look something like this:
![exotic image index anatomy](/images/index-anatomy-exotic.dot.svg)
![strange image index anatomy](/images/index-anatomy-strange.dot.svg)
Using a recursive index like this might not be possible with all registries,
but this flexibility allows for some interesting applications, e.g. the
@@ -114,4 +114,4 @@ however, it's possible to do _something_ useful with them via [`remote.Get`](htt
which doesn't try to interpret what is returned by the registry.
[`crane.Copy`](https://godoc.org/github.com/google/go-containerregistry/pkg/crane#Copy) takes advantage of this to implement support for copying schema 1 images,
see [here](https://github.com/google/go-containerregistry/blob/master/pkg/internal/legacy/copy.go).
see [here](https://github.com/google/go-containerregistry/blob/main/pkg/internal/legacy/copy.go).

View File

@@ -122,6 +122,30 @@ func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) {
return desc.ImageIndex()
}
// Workaround for #819.
func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) {
index, err := r.IndexManifest()
if err != nil {
return nil, err
}
for _, childDesc := range index.Manifests {
if h == childDesc.Digest {
l, err := partial.CompressedToLayer(&remoteLayer{
fetcher: r.fetcher,
digest: h,
})
if err != nil {
return nil, err
}
return &MountableLayer{
Layer: l,
Reference: r.Ref.Context().Digest(h.String()),
}, nil
}
}
return nil, fmt.Errorf("layer not found: %s", h)
}
func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) {
desc, err := r.childByPlatform(platform)
if err != nil {

View File

@@ -45,23 +45,27 @@ func MultiWrite(m map[name.Reference]Taggable, options ...Option) error {
}
}
o, err := makeOptions(repo, options...)
if err != nil {
return err
}
// Collect unique blobs (layers and config blobs).
blobs := map[v1.Hash]v1.Layer{}
newManifests := []map[name.Reference]Taggable{}
// Separate originally requested images and indexes, so we can push images first.
images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{}
var err error
for ref, i := range m {
if img, ok := i.(v1.Image); ok {
images[ref] = i
if err := addImageBlobs(img, blobs); err != nil {
if err := addImageBlobs(img, blobs, o.allowNondistributableArtifacts); err != nil {
return err
}
continue
}
if idx, ok := i.(v1.ImageIndex); ok {
indexes[ref] = i
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0)
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0, o.allowNondistributableArtifacts)
if err != nil {
return err
}
@@ -70,10 +74,6 @@ func MultiWrite(m map[name.Reference]Taggable, options ...Option) error {
return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i)
}
o, err := makeOptions(repo, options...)
if err != nil {
return err
}
// Determine if any of the layers are Mountable, because if so we need
// to request Pull scope too.
ls := []v1.Layer{}
@@ -161,7 +161,7 @@ func MultiWrite(m map[name.Reference]Taggable, options ...Option) error {
// addIndexBlobs adds blobs to the set of blobs we intend to upload, and
// returns the latest copy of the ordered collection of manifests to upload.
func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int) ([]map[name.Reference]Taggable, error) {
func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int, allowNondistributableArtifacts bool) ([]map[name.Reference]Taggable, error) {
if lvl > len(newManifests)-1 {
newManifests = append(newManifests, map[name.Reference]Taggable{})
}
@@ -177,7 +177,7 @@ func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repo
if err != nil {
return nil, err
}
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1)
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1, allowNondistributableArtifacts)
if err != nil {
return nil, err
}
@@ -189,42 +189,59 @@ func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repo
if err != nil {
return nil, err
}
if err := addImageBlobs(img, blobs); err != nil {
if err := addImageBlobs(img, blobs, allowNondistributableArtifacts); err != nil {
return nil, err
}
// Also track the sub-image manifest to upload later by digest.
newManifests[lvl][repo.Digest(desc.Digest.String())] = img
default:
// Workaround for #819.
if wl, ok := idx.(withLayer); ok {
layer, err := wl.Layer(desc.Digest)
if err != nil {
return nil, err
}
if err := addLayerBlob(layer, blobs, allowNondistributableArtifacts); err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("unknown media type: %v", desc.MediaType)
}
}
}
return newManifests, nil
}
func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer) error {
func addLayerBlob(l v1.Layer, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error {
// Ignore foreign layers.
mt, err := l.MediaType()
if err != nil {
return err
}
if mt.IsDistributable() || allowNondistributableArtifacts {
d, err := l.Digest()
if err != nil {
return err
}
blobs[d] = l
}
return nil
}
func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error {
ls, err := img.Layers()
if err != nil {
return err
}
// Collect all layers.
for _, l := range ls {
d, err := l.Digest()
if err != nil {
if err := addLayerBlob(l, blobs, allowNondistributableArtifacts); err != nil {
return err
}
// Ignore foreign layers.
mt, err := l.MediaType()
if err != nil {
return err
}
if !mt.IsDistributable() {
// TODO(jonjohnsonjr): Add "allow-nondistributable-artifacts" option.
continue
}
blobs[d] = l
}
// Collect config blob.
@@ -232,10 +249,5 @@ func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer) error {
if err != nil {
return err
}
cld, err := cl.Digest()
if err != nil {
return err
}
blobs[cld] = cl
return nil
return addLayerBlob(cl, blobs, allowNondistributableArtifacts)
}

View File

@@ -36,6 +36,7 @@ type options struct {
context context.Context
jobs int
userAgent string
allowNondistributableArtifacts bool
}
var defaultPlatform = v1.Platform{
@@ -173,3 +174,13 @@ func WithUserAgent(ua string) Option {
return nil
}
}
// WithNondistributable includes non-distributable (foreign) layers
// when writing images, see:
// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers
//
// The default behaviour is to skip these layers
func WithNondistributable(o *options) error {
o.allowNondistributableArtifacts = true
return nil
}

View File

@@ -26,7 +26,7 @@ import (
// The set of query string keys that we expect to send as part of the registry
// protocol. Anything else is potentially dangerous to leak, as it's probably
// from a redirect. These redirects often included tokens or signed URLs.
var paramWhitelist = map[string]struct{}{
var paramAllowlist = map[string]struct{}{
// Token exchange
"scope": {},
"service": {},
@@ -105,8 +105,8 @@ func redactURL(original *url.URL) *url.URL {
qs := original.Query()
for k, v := range qs {
for i := range v {
if _, ok := paramWhitelist[k]; !ok {
// key is not in the whitelist
if _, ok := paramAllowlist[k]; !ok {
// key is not in the Allowlist
v[i] = "REDACTED"
}
}

View File

@@ -77,8 +77,7 @@ func Write(ref name.Reference, img v1.Image, options ...Option) error {
if err != nil {
return err
}
if !mt.IsDistributable() {
// TODO(jonjohnsonjr): Add "allow-nondistributable-artifacts" option.
if !mt.IsDistributable() && !o.allowNondistributableArtifacts {
continue
}
@@ -391,6 +390,10 @@ func (w *writer) uploadOne(l v1.Layer) error {
return retry.Retry(tryUpload, retry.IsTemporary, backoff)
}
type withLayer interface {
Layer(v1.Hash) (v1.Layer, error)
}
func (w *writer) writeIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) error {
index, err := ii.IndexManifest()
if err != nil {
@@ -430,6 +433,17 @@ func (w *writer) writeIndex(ref name.Reference, ii v1.ImageIndex, options ...Opt
if err := Write(ref, img, options...); err != nil {
return err
}
default:
// Workaround for #819.
if wl, ok := ii.(withLayer); ok {
layer, err := wl.Layer(desc.Digest)
if err != nil {
return err
}
if err := w.uploadOne(layer); err != nil {
return err
}
}
}
}

View File

@@ -15,6 +15,7 @@
package stream
import (
"bufio"
"compress/gzip"
"crypto/sha256"
"encoding/hex"
@@ -130,6 +131,7 @@ type compressedReader struct {
h, zh hash.Hash // collects digests of compressed and uncompressed stream.
pr io.Reader
bw *bufio.Writer
count *countWriter
l *Layer // stream.Layer to update upon Close.
@@ -144,7 +146,14 @@ func newCompressedReader(l *Layer) (*compressedReader, error) {
// capture compressed digest, and a countWriter to capture compressed
// size.
pr, pw := io.Pipe()
zw, err := gzip.NewWriterLevel(io.MultiWriter(pw, zh, count), l.compression)
// Write compressed bytes to be read by the pipe.Reader, hashed by zh, and counted by count.
mw := io.MultiWriter(pw, zh, count)
// Buffer the output of the gzip writer so we don't have to wait on pr to keep writing.
// 64K ought to be small enough for anybody.
bw := bufio.NewWriterSize(mw, 2<<16)
zw, err := gzip.NewWriterLevel(bw, l.compression)
if err != nil {
return nil, err
}
@@ -152,6 +161,7 @@ func newCompressedReader(l *Layer) (*compressedReader, error) {
cr := &compressedReader{
closer: newMultiCloser(zw, l.blob),
pr: pr,
bw: bw,
h: h,
zh: zh,
count: count,
@@ -183,6 +193,11 @@ func (cr *compressedReader) Close() error {
return err
}
// Flush the buffer.
if err := cr.bw.Flush(); err != nil {
return err
}
diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil)))
if err != nil {
return err

View File

@@ -44,6 +44,10 @@ func Index(idx v1.ImageIndex) error {
return nil
}
type withLayer interface {
Layer(v1.Hash) (v1.Layer, error)
}
func validateChildren(idx v1.ImageIndex) error {
manifest, err := idx.IndexManifest()
if err != nil {
@@ -76,9 +80,25 @@ func validateChildren(idx v1.ImageIndex) error {
errs = append(errs, fmt.Sprintf("failed to validate image MediaType[%d](%s): %v", i, desc.Digest, err))
}
default:
// Workaround for #819.
if wl, ok := idx.(withLayer); ok {
layer, err := wl.Layer(desc.Digest)
if err != nil {
return fmt.Errorf("failed to get layer Manifests[%d]: %v", i, err)
}
if err := Layer(layer); err != nil {
lerr := fmt.Sprintf("failed to validate layer Manifests[%d](%s): %v", i, desc.Digest, err)
if desc.MediaType.IsDistributable() {
errs = append(errs, lerr)
} else {
logs.Warn.Printf("nondistributable layer failure: %v", lerr)
}
}
} else {
logs.Warn.Printf("Unexpected manifest: %s", desc.MediaType)
}
}
}
if len(errs) != 0 {
return errors.New(strings.Join(errs, "\n"))

View File

@@ -0,0 +1,13 @@
language: go
go:
- 1.2
- 1.3
- 1.4
- 1.9
- "1.10"
- 1.11
- 1.12
script:
- go test

View File

@@ -0,0 +1,354 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View File

@@ -0,0 +1,65 @@
# Versioning Library for Go
[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version)
go-version is a library for parsing versions and version constraints,
and verifying versions against a set of constraints. go-version
can sort a collection of versions properly, handles prerelease/beta
versions, can increment versions, etc.
Versions used with go-version must follow [SemVer](http://semver.org/).
## Installation and Usage
Package documentation can be found on
[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
Installation can be done with a normal `go get`:
```
$ go get github.com/hashicorp/go-version
```
#### Version Parsing and Comparison
```go
v1, err := version.NewVersion("1.2")
v2, err := version.NewVersion("1.5+metadata")
// Comparison example. There is also GreaterThan, Equal, and just
// a simple Compare that returns an int allowing easy >=, <=, etc.
if v1.LessThan(v2) {
fmt.Printf("%s is less than %s", v1, v2)
}
```
#### Version Constraints
```go
v1, err := version.NewVersion("1.2")
// Constraints example.
constraints, err := version.NewConstraint(">= 1.0, < 1.4")
if constraints.Check(v1) {
fmt.Printf("%s satisfies constraints %s", v1, constraints)
}
```
#### Version Sorting
```go
versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
versions := make([]*version.Version, len(versionsRaw))
for i, raw := range versionsRaw {
v, _ := version.NewVersion(raw)
versions[i] = v
}
// After this, the versions are properly sorted
sort.Sort(version.Collection(versions))
```
## Issues and Contributing
If you find an issue with this library, please report an issue. If you'd
like, we welcome any contributions. Fork this library and submit a pull
request.

View File

@@ -0,0 +1,204 @@
package version
import (
"fmt"
"reflect"
"regexp"
"strings"
)
// Constraint represents a single constraint for a version, such as
// ">= 1.0".
type Constraint struct {
f constraintFunc
check *Version
original string
}
// Constraints is a slice of constraints. We make a custom type so that
// we can add methods to it.
type Constraints []*Constraint
type constraintFunc func(v, c *Version) bool
var constraintOperators map[string]constraintFunc
var constraintRegexp *regexp.Regexp
func init() {
constraintOperators = map[string]constraintFunc{
"": constraintEqual,
"=": constraintEqual,
"!=": constraintNotEqual,
">": constraintGreaterThan,
"<": constraintLessThan,
">=": constraintGreaterThanEqual,
"<=": constraintLessThanEqual,
"~>": constraintPessimistic,
}
ops := make([]string, 0, len(constraintOperators))
for k := range constraintOperators {
ops = append(ops, regexp.QuoteMeta(k))
}
constraintRegexp = regexp.MustCompile(fmt.Sprintf(
`^\s*(%s)\s*(%s)\s*$`,
strings.Join(ops, "|"),
VersionRegexpRaw))
}
// NewConstraint will parse one or more constraints from the given
// constraint string. The string must be a comma-separated list of
// constraints.
func NewConstraint(v string) (Constraints, error) {
vs := strings.Split(v, ",")
result := make([]*Constraint, len(vs))
for i, single := range vs {
c, err := parseSingle(single)
if err != nil {
return nil, err
}
result[i] = c
}
return Constraints(result), nil
}
// Check tests if a version satisfies all the constraints.
func (cs Constraints) Check(v *Version) bool {
for _, c := range cs {
if !c.Check(v) {
return false
}
}
return true
}
// Returns the string format of the constraints
func (cs Constraints) String() string {
csStr := make([]string, len(cs))
for i, c := range cs {
csStr[i] = c.String()
}
return strings.Join(csStr, ",")
}
// Check tests if a constraint is validated by the given version.
func (c *Constraint) Check(v *Version) bool {
return c.f(v, c.check)
}
func (c *Constraint) String() string {
return c.original
}
func parseSingle(v string) (*Constraint, error) {
matches := constraintRegexp.FindStringSubmatch(v)
if matches == nil {
return nil, fmt.Errorf("Malformed constraint: %s", v)
}
check, err := NewVersion(matches[2])
if err != nil {
return nil, err
}
return &Constraint{
f: constraintOperators[matches[1]],
check: check,
original: v,
}, nil
}
func prereleaseCheck(v, c *Version) bool {
switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; {
case cPre && vPre:
// A constraint with a pre-release can only match a pre-release version
// with the same base segments.
return reflect.DeepEqual(c.Segments64(), v.Segments64())
case !cPre && vPre:
// A constraint without a pre-release can only match a version without a
// pre-release.
return false
case cPre && !vPre:
// OK, except with the pessimistic operator
case !cPre && !vPre:
// OK
}
return true
}
//-------------------------------------------------------------------
// Constraint functions
//-------------------------------------------------------------------
func constraintEqual(v, c *Version) bool {
return v.Equal(c)
}
func constraintNotEqual(v, c *Version) bool {
return !v.Equal(c)
}
func constraintGreaterThan(v, c *Version) bool {
return prereleaseCheck(v, c) && v.Compare(c) == 1
}
func constraintLessThan(v, c *Version) bool {
return prereleaseCheck(v, c) && v.Compare(c) == -1
}
func constraintGreaterThanEqual(v, c *Version) bool {
return prereleaseCheck(v, c) && v.Compare(c) >= 0
}
func constraintLessThanEqual(v, c *Version) bool {
return prereleaseCheck(v, c) && v.Compare(c) <= 0
}
func constraintPessimistic(v, c *Version) bool {
// Using a pessimistic constraint with a pre-release, restricts versions to pre-releases
if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") {
return false
}
// If the version being checked is naturally less than the constraint, then there
// is no way for the version to be valid against the constraint
if v.LessThan(c) {
return false
}
// We'll use this more than once, so grab the length now so it's a little cleaner
// to write the later checks
cs := len(c.segments)
// If the version being checked has less specificity than the constraint, then there
// is no way for the version to be valid against the constraint
if cs > len(v.segments) {
return false
}
// Check the segments in the constraint against those in the version. If the version
// being checked, at any point, does not have the same values in each index of the
// constraints segments, then it cannot be valid against the constraint.
for i := 0; i < c.si-1; i++ {
if v.segments[i] != c.segments[i] {
return false
}
}
// Check the last part of the segment in the constraint. If the version segment at
// this index is less than the constraints segment at this index, then it cannot
// be valid against the constraint
if c.segments[cs-1] > v.segments[cs-1] {
return false
}
// If nothing has rejected the version by now, it's valid
return true
}

View File

@@ -0,0 +1 @@
module github.com/hashicorp/go-version

View File

@@ -0,0 +1,380 @@
package version
import (
"bytes"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
)
// The compiled regular expression used to test the validity of a version.
var (
versionRegexp *regexp.Regexp
semverRegexp *regexp.Regexp
)
// The raw regular expression string used for testing the validity
// of a version.
const (
VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
`(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
`(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
`?`
// SemverRegexpRaw requires a separator between version and prerelease
SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
`(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
`(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
`?`
)
// Version represents a single version.
type Version struct {
metadata string
pre string
segments []int64
si int
original string
}
func init() {
versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
}
// NewVersion parses the given version and returns a new
// Version.
func NewVersion(v string) (*Version, error) {
return newVersion(v, versionRegexp)
}
// NewSemver parses the given version and returns a new
// Version that adheres strictly to SemVer specs
// https://semver.org/
func NewSemver(v string) (*Version, error) {
return newVersion(v, semverRegexp)
}
func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
matches := pattern.FindStringSubmatch(v)
if matches == nil {
return nil, fmt.Errorf("Malformed version: %s", v)
}
segmentsStr := strings.Split(matches[1], ".")
segments := make([]int64, len(segmentsStr))
si := 0
for i, str := range segmentsStr {
val, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return nil, fmt.Errorf(
"Error parsing version: %s", err)
}
segments[i] = int64(val)
si++
}
// Even though we could support more than three segments, if we
// got less than three, pad it with 0s. This is to cover the basic
// default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
for i := len(segments); i < 3; i++ {
segments = append(segments, 0)
}
pre := matches[7]
if pre == "" {
pre = matches[4]
}
return &Version{
metadata: matches[10],
pre: pre,
segments: segments,
si: si,
original: v,
}, nil
}
// Must is a helper that wraps a call to a function returning (*Version, error)
// and panics if error is non-nil.
func Must(v *Version, err error) *Version {
if err != nil {
panic(err)
}
return v
}
// Compare compares this version to another version. This
// returns -1, 0, or 1 if this version is smaller, equal,
// or larger than the other version, respectively.
//
// If you want boolean results, use the LessThan, Equal,
// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods.
func (v *Version) Compare(other *Version) int {
// A quick, efficient equality check
if v.String() == other.String() {
return 0
}
segmentsSelf := v.Segments64()
segmentsOther := other.Segments64()
// If the segments are the same, we must compare on prerelease info
if reflect.DeepEqual(segmentsSelf, segmentsOther) {
preSelf := v.Prerelease()
preOther := other.Prerelease()
if preSelf == "" && preOther == "" {
return 0
}
if preSelf == "" {
return 1
}
if preOther == "" {
return -1
}
return comparePrereleases(preSelf, preOther)
}
// Get the highest specificity (hS), or if they're equal, just use segmentSelf length
lenSelf := len(segmentsSelf)
lenOther := len(segmentsOther)
hS := lenSelf
if lenSelf < lenOther {
hS = lenOther
}
// Compare the segments
// Because a constraint could have more/less specificity than the version it's
// checking, we need to account for a lopsided or jagged comparison
for i := 0; i < hS; i++ {
if i > lenSelf-1 {
// This means Self had the lower specificity
// Check to see if the remaining segments in Other are all zeros
if !allZero(segmentsOther[i:]) {
// if not, it means that Other has to be greater than Self
return -1
}
break
} else if i > lenOther-1 {
// this means Other had the lower specificity
// Check to see if the remaining segments in Self are all zeros -
if !allZero(segmentsSelf[i:]) {
//if not, it means that Self has to be greater than Other
return 1
}
break
}
lhs := segmentsSelf[i]
rhs := segmentsOther[i]
if lhs == rhs {
continue
} else if lhs < rhs {
return -1
}
// Otherwis, rhs was > lhs, they're not equal
return 1
}
// if we got this far, they're equal
return 0
}
func allZero(segs []int64) bool {
for _, s := range segs {
if s != 0 {
return false
}
}
return true
}
func comparePart(preSelf string, preOther string) int {
if preSelf == preOther {
return 0
}
var selfInt int64
selfNumeric := true
selfInt, err := strconv.ParseInt(preSelf, 10, 64)
if err != nil {
selfNumeric = false
}
var otherInt int64
otherNumeric := true
otherInt, err = strconv.ParseInt(preOther, 10, 64)
if err != nil {
otherNumeric = false
}
// if a part is empty, we use the other to decide
if preSelf == "" {
if otherNumeric {
return -1
}
return 1
}
if preOther == "" {
if selfNumeric {
return 1
}
return -1
}
if selfNumeric && !otherNumeric {
return -1
} else if !selfNumeric && otherNumeric {
return 1
} else if !selfNumeric && !otherNumeric && preSelf > preOther {
return 1
} else if selfInt > otherInt {
return 1
}
return -1
}
func comparePrereleases(v string, other string) int {
// the same pre release!
if v == other {
return 0
}
// split both pre releases for analyse their parts
selfPreReleaseMeta := strings.Split(v, ".")
otherPreReleaseMeta := strings.Split(other, ".")
selfPreReleaseLen := len(selfPreReleaseMeta)
otherPreReleaseLen := len(otherPreReleaseMeta)
biggestLen := otherPreReleaseLen
if selfPreReleaseLen > otherPreReleaseLen {
biggestLen = selfPreReleaseLen
}
// loop for parts to find the first difference
for i := 0; i < biggestLen; i = i + 1 {
partSelfPre := ""
if i < selfPreReleaseLen {
partSelfPre = selfPreReleaseMeta[i]
}
partOtherPre := ""
if i < otherPreReleaseLen {
partOtherPre = otherPreReleaseMeta[i]
}
compare := comparePart(partSelfPre, partOtherPre)
// if parts are equals, continue the loop
if compare != 0 {
return compare
}
}
return 0
}
// Equal tests if two versions are equal.
func (v *Version) Equal(o *Version) bool {
return v.Compare(o) == 0
}
// GreaterThan tests if this version is greater than another version.
func (v *Version) GreaterThan(o *Version) bool {
return v.Compare(o) > 0
}
// GreaterThanOrEqualTo tests if this version is greater than or equal to another version.
func (v *Version) GreaterThanOrEqual(o *Version) bool {
return v.Compare(o) >= 0
}
// LessThan tests if this version is less than another version.
func (v *Version) LessThan(o *Version) bool {
return v.Compare(o) < 0
}
// LessThanOrEqualTo tests if this version is less than or equal to another version.
func (v *Version) LessThanOrEqual(o *Version) bool {
return v.Compare(o) <= 0
}
// Metadata returns any metadata that was part of the version
// string.
//
// Metadata is anything that comes after the "+" in the version.
// For example, with "1.2.3+beta", the metadata is "beta".
func (v *Version) Metadata() string {
return v.metadata
}
// Prerelease returns any prerelease data that is part of the version,
// or blank if there is no prerelease data.
//
// Prerelease information is anything that comes after the "-" in the
// version (but before any metadata). For example, with "1.2.3-beta",
// the prerelease information is "beta".
func (v *Version) Prerelease() string {
return v.pre
}
// Segments returns the numeric segments of the version as a slice of ints.
//
// This excludes any metadata or pre-release information. For example,
// for a version "1.2.3-beta", segments will return a slice of
// 1, 2, 3.
func (v *Version) Segments() []int {
segmentSlice := make([]int, len(v.segments))
for i, v := range v.segments {
segmentSlice[i] = int(v)
}
return segmentSlice
}
// Segments64 returns the numeric segments of the version as a slice of int64s.
//
// This excludes any metadata or pre-release information. For example,
// for a version "1.2.3-beta", segments will return a slice of
// 1, 2, 3.
func (v *Version) Segments64() []int64 {
result := make([]int64, len(v.segments))
copy(result, v.segments)
return result
}
// String returns the full version string included pre-release
// and metadata information.
//
// This value is rebuilt according to the parsed segments and other
// information. Therefore, ambiguities in the version string such as
// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and
// missing parts (1.0 => 1.0.0) will be made into a canonicalized form
// as shown in the parenthesized examples.
func (v *Version) String() string {
var buf bytes.Buffer
fmtParts := make([]string, len(v.segments))
for i, s := range v.segments {
// We can ignore err here since we've pre-parsed the values in segments
str := strconv.FormatInt(s, 10)
fmtParts[i] = str
}
fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
if v.pre != "" {
fmt.Fprintf(&buf, "-%s", v.pre)
}
if v.metadata != "" {
fmt.Fprintf(&buf, "+%s", v.metadata)
}
return buf.String()
}
// Original returns the original parsed version as-is, including any
// potential whitespace, `v` prefix, etc.
func (v *Version) Original() string {
return v.original
}

View File

@@ -0,0 +1,17 @@
package version
// Collection is a type that implements the sort.Interface interface
// so that versions can be sorted.
type Collection []*Version
func (v Collection) Len() int {
return len(v)
}
func (v Collection) Less(i, j int) bool {
return v[i].LessThan(v[j])
}
func (v Collection) Swap(i, j int) {
v[i], v[j] = v[j], v[i]
}

View File

@@ -22,6 +22,7 @@ type HWND uintptr
const (
InvalidHandle = ^Handle(0)
InvalidHWND = ^HWND(0)
// Flags for DefineDosDevice.
DDD_EXACT_MATCH_ON_REMOVE = 0x00000004
@@ -215,7 +216,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW
//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error)
//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW
//sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32) = user32.GetWindowThreadProcessId
//sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId
//sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow
//sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW
//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx
@@ -264,6 +265,9 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect
//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile
//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW
//sys FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.FindFirstChangeNotificationW
//sys FindNextChangeNotification(handle Handle) (err error)
//sys FindCloseChangeNotification(handle Handle) (err error)
//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW
//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) = crypt32.CertOpenStore
//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore
@@ -277,6 +281,13 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext
//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext
//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy
//sys CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) = crypt32.CertGetNameStringW
//sys CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) = crypt32.CertFindExtension
//sys CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) = crypt32.CryptQueryObject
//sys CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) = crypt32.CryptDecodeObject
//sys CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptProtectData
//sys CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptUnprotectData
//sys WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) = wintrust.WinVerifyTrustEx
//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW
//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey
//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW

View File

@@ -227,7 +227,7 @@ const (
)
const (
// filters for ReadDirectoryChangesW
// filters for ReadDirectoryChangesW and FindFirstChangeNotificationW
FILE_NOTIFY_CHANGE_FILE_NAME = 0x001
FILE_NOTIFY_CHANGE_DIR_NAME = 0x002
FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004
@@ -427,6 +427,67 @@ const (
CERT_CLOSE_STORE_FORCE_FLAG = 0x00000001
CERT_CLOSE_STORE_CHECK_FLAG = 0x00000002
/* CryptQueryObject object type */
CERT_QUERY_OBJECT_FILE = 1
CERT_QUERY_OBJECT_BLOB = 2
/* CryptQueryObject content type flags */
CERT_QUERY_CONTENT_CERT = 1
CERT_QUERY_CONTENT_CTL = 2
CERT_QUERY_CONTENT_CRL = 3
CERT_QUERY_CONTENT_SERIALIZED_STORE = 4
CERT_QUERY_CONTENT_SERIALIZED_CERT = 5
CERT_QUERY_CONTENT_SERIALIZED_CTL = 6
CERT_QUERY_CONTENT_SERIALIZED_CRL = 7
CERT_QUERY_CONTENT_PKCS7_SIGNED = 8
CERT_QUERY_CONTENT_PKCS7_UNSIGNED = 9
CERT_QUERY_CONTENT_PKCS7_SIGNED_EMBED = 10
CERT_QUERY_CONTENT_PKCS10 = 11
CERT_QUERY_CONTENT_PFX = 12
CERT_QUERY_CONTENT_CERT_PAIR = 13
CERT_QUERY_CONTENT_PFX_AND_LOAD = 14
CERT_QUERY_CONTENT_FLAG_CERT = (1 << CERT_QUERY_CONTENT_CERT)
CERT_QUERY_CONTENT_FLAG_CTL = (1 << CERT_QUERY_CONTENT_CTL)
CERT_QUERY_CONTENT_FLAG_CRL = (1 << CERT_QUERY_CONTENT_CRL)
CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE = (1 << CERT_QUERY_CONTENT_SERIALIZED_STORE)
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT = (1 << CERT_QUERY_CONTENT_SERIALIZED_CERT)
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CTL = (1 << CERT_QUERY_CONTENT_SERIALIZED_CTL)
CERT_QUERY_CONTENT_FLAG_SERIALIZED_CRL = (1 << CERT_QUERY_CONTENT_SERIALIZED_CRL)
CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED = (1 << CERT_QUERY_CONTENT_PKCS7_SIGNED)
CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED = (1 << CERT_QUERY_CONTENT_PKCS7_UNSIGNED)
CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED = (1 << CERT_QUERY_CONTENT_PKCS7_SIGNED_EMBED)
CERT_QUERY_CONTENT_FLAG_PKCS10 = (1 << CERT_QUERY_CONTENT_PKCS10)
CERT_QUERY_CONTENT_FLAG_PFX = (1 << CERT_QUERY_CONTENT_PFX)
CERT_QUERY_CONTENT_FLAG_CERT_PAIR = (1 << CERT_QUERY_CONTENT_CERT_PAIR)
CERT_QUERY_CONTENT_FLAG_PFX_AND_LOAD = (1 << CERT_QUERY_CONTENT_PFX_AND_LOAD)
CERT_QUERY_CONTENT_FLAG_ALL = (CERT_QUERY_CONTENT_FLAG_CERT | CERT_QUERY_CONTENT_FLAG_CTL | CERT_QUERY_CONTENT_FLAG_CRL | CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CTL | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CRL | CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED | CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED | CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED | CERT_QUERY_CONTENT_FLAG_PKCS10 | CERT_QUERY_CONTENT_FLAG_PFX | CERT_QUERY_CONTENT_FLAG_CERT_PAIR)
CERT_QUERY_CONTENT_FLAG_ALL_ISSUER_CERT = (CERT_QUERY_CONTENT_FLAG_CERT | CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT | CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED | CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED)
/* CryptQueryObject format type flags */
CERT_QUERY_FORMAT_BINARY = 1
CERT_QUERY_FORMAT_BASE64_ENCODED = 2
CERT_QUERY_FORMAT_ASN_ASCII_HEX_ENCODED = 3
CERT_QUERY_FORMAT_FLAG_BINARY = (1 << CERT_QUERY_FORMAT_BINARY)
CERT_QUERY_FORMAT_FLAG_BASE64_ENCODED = (1 << CERT_QUERY_FORMAT_BASE64_ENCODED)
CERT_QUERY_FORMAT_FLAG_ASN_ASCII_HEX_ENCODED = (1 << CERT_QUERY_FORMAT_ASN_ASCII_HEX_ENCODED)
CERT_QUERY_FORMAT_FLAG_ALL = (CERT_QUERY_FORMAT_FLAG_BINARY | CERT_QUERY_FORMAT_FLAG_BASE64_ENCODED | CERT_QUERY_FORMAT_FLAG_ASN_ASCII_HEX_ENCODED)
/* CertGetNameString name types */
CERT_NAME_EMAIL_TYPE = 1
CERT_NAME_RDN_TYPE = 2
CERT_NAME_ATTR_TYPE = 3
CERT_NAME_SIMPLE_DISPLAY_TYPE = 4
CERT_NAME_FRIENDLY_DISPLAY_TYPE = 5
CERT_NAME_DNS_TYPE = 6
CERT_NAME_URL_TYPE = 7
CERT_NAME_UPN_TYPE = 8
/* CertGetNameString flags */
CERT_NAME_ISSUER_FLAG = 0x1
CERT_NAME_DISABLE_IE4_UTF8_FLAG = 0x10000
CERT_NAME_SEARCH_ALL_NAMES_FLAG = 0x2
CERT_NAME_STR_ENABLE_PUNYCODE_FLAG = 0x00200000
/* AuthType values for SSLExtraCertChainPolicyPara struct */
AUTHTYPE_CLIENT = 1
AUTHTYPE_SERVER = 2
@@ -437,6 +498,22 @@ const (
SECURITY_FLAG_IGNORE_WRONG_USAGE = 0x00000200
SECURITY_FLAG_IGNORE_CERT_CN_INVALID = 0x00001000
SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000
/* Flags for Crypt[Un]ProtectData */
CRYPTPROTECT_UI_FORBIDDEN = 0x1
CRYPTPROTECT_LOCAL_MACHINE = 0x4
CRYPTPROTECT_CRED_SYNC = 0x8
CRYPTPROTECT_AUDIT = 0x10
CRYPTPROTECT_NO_RECOVERY = 0x20
CRYPTPROTECT_VERIFY_PROTECTION = 0x40
CRYPTPROTECT_CRED_REGENERATE = 0x80
/* Flags for CryptProtectPromptStruct */
CRYPTPROTECT_PROMPT_ON_UNPROTECT = 1
CRYPTPROTECT_PROMPT_ON_PROTECT = 2
CRYPTPROTECT_PROMPT_RESERVED = 4
CRYPTPROTECT_PROMPT_STRONG = 8
CRYPTPROTECT_PROMPT_REQUIRE_STRONG = 16
)
const (
@@ -459,10 +536,58 @@ const (
REALTIME_PRIORITY_CLASS = 0x00000100
)
/* wintrust.h constants for WinVerifyTrustEx */
const (
WTD_UI_ALL = 1
WTD_UI_NONE = 2
WTD_UI_NOBAD = 3
WTD_UI_NOGOOD = 4
WTD_REVOKE_NONE = 0
WTD_REVOKE_WHOLECHAIN = 1
WTD_CHOICE_FILE = 1
WTD_CHOICE_CATALOG = 2
WTD_CHOICE_BLOB = 3
WTD_CHOICE_SIGNER = 4
WTD_CHOICE_CERT = 5
WTD_STATEACTION_IGNORE = 0x00000000
WTD_STATEACTION_VERIFY = 0x00000010
WTD_STATEACTION_CLOSE = 0x00000002
WTD_STATEACTION_AUTO_CACHE = 0x00000003
WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004
WTD_USE_IE4_TRUST_FLAG = 0x1
WTD_NO_IE4_CHAIN_FLAG = 0x2
WTD_NO_POLICY_USAGE_FLAG = 0x4
WTD_REVOCATION_CHECK_NONE = 0x10
WTD_REVOCATION_CHECK_END_CERT = 0x20
WTD_REVOCATION_CHECK_CHAIN = 0x40
WTD_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT = 0x80
WTD_SAFER_FLAG = 0x100
WTD_HASH_ONLY_FLAG = 0x200
WTD_USE_DEFAULT_OSVER_CHECK = 0x400
WTD_LIFETIME_SIGNING_FLAG = 0x800
WTD_CACHE_ONLY_URL_RETRIEVAL = 0x1000
WTD_DISABLE_MD2_MD4 = 0x2000
WTD_MOTW = 0x4000
WTD_UICONTEXT_EXECUTE = 0
WTD_UICONTEXT_INSTALL = 1
)
var (
OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00")
OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00")
OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00")
WINTRUST_ACTION_GENERIC_VERIFY_V2 = GUID{
Data1: 0xaac56b,
Data2: 0xcd44,
Data3: 0x11d0,
Data4: [8]byte{0x8c, 0xc2, 0x0, 0xc0, 0x4f, 0xc2, 0x95, 0xee},
}
)
// Pointer represents a pointer to an arbitrary Windows type.
@@ -1051,7 +1176,57 @@ type MibIfRow struct {
}
type CertInfo struct {
// Not implemented
Version uint32
SerialNumber CryptIntegerBlob
SignatureAlgorithm CryptAlgorithmIdentifier
Issuer CertNameBlob
NotBefore Filetime
NotAfter Filetime
Subject CertNameBlob
SubjectPublicKeyInfo CertPublicKeyInfo
IssuerUniqueId CryptBitBlob
SubjectUniqueId CryptBitBlob
CountExtensions uint32
Extensions *CertExtension
}
type CertExtension struct {
ObjId *byte
Critical int32
Value CryptObjidBlob
}
type CryptAlgorithmIdentifier struct {
ObjId *byte
Parameters CryptObjidBlob
}
type CertPublicKeyInfo struct {
Algorithm CryptAlgorithmIdentifier
PublicKey CryptBitBlob
}
type DataBlob struct {
Size uint32
Data *byte
}
type CryptIntegerBlob DataBlob
type CryptUintBlob DataBlob
type CryptObjidBlob DataBlob
type CertNameBlob DataBlob
type CertRdnValueBlob DataBlob
type CertBlob DataBlob
type CrlBlob DataBlob
type CryptDataBlob DataBlob
type CryptHashBlob DataBlob
type CryptDigestBlob DataBlob
type CryptDerBlob DataBlob
type CryptAttrBlob DataBlob
type CryptBitBlob struct {
Size uint32
Data *byte
UnusedBits uint32
}
type CertContext struct {
@@ -1157,9 +1332,64 @@ type CertChainPolicyStatus struct {
ExtraPolicyStatus Pointer
}
type CryptDataBlob struct {
type CertPolicyInfo struct {
Identifier *byte
CountQualifiers uint32
Qualifiers *CertPolicyQualifierInfo
}
type CertPoliciesInfo struct {
Count uint32
PolicyInfos *CertPolicyInfo
}
type CertPolicyQualifierInfo struct {
// Not implemented
}
type CertStrongSignPara struct {
Size uint32
Data *byte
InfoChoice uint32
InfoOrSerializedInfoOrOID unsafe.Pointer
}
type CryptProtectPromptStruct struct {
Size uint32
PromptFlags uint32
App HWND
Prompt *uint16
}
type WinTrustData struct {
Size uint32
PolicyCallbackData uintptr
SIPClientData uintptr
UIChoice uint32
RevocationChecks uint32
UnionChoice uint32
FileOrCatalogOrBlobOrSgnrOrCert unsafe.Pointer
StateAction uint32
StateData Handle
URLReference *uint16
ProvFlags uint32
UIContext uint32
SignatureSettings *WinTrustSignatureSettings
}
type WinTrustFileInfo struct {
Size uint32
FilePath *uint16
File Handle
KnownSubject *GUID
}
type WinTrustSignatureSettings struct {
Size uint32
Index uint32
Flags uint32
SecondarySigs uint32
VerifiedSigIndex uint32
CryptoPolicy *CertStrongSignPara
}
const (

View File

@@ -51,6 +51,7 @@ var (
modshell32 = NewLazySystemDLL("shell32.dll")
moduser32 = NewLazySystemDLL("user32.dll")
moduserenv = NewLazySystemDLL("userenv.dll")
modwintrust = NewLazySystemDLL("wintrust.dll")
modws2_32 = NewLazySystemDLL("ws2_32.dll")
modwtsapi32 = NewLazySystemDLL("wtsapi32.dll")
@@ -145,12 +146,18 @@ var (
procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore")
procCertDuplicateCertificateContext = modcrypt32.NewProc("CertDuplicateCertificateContext")
procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore")
procCertFindExtension = modcrypt32.NewProc("CertFindExtension")
procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain")
procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext")
procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain")
procCertGetNameStringW = modcrypt32.NewProc("CertGetNameStringW")
procCertOpenStore = modcrypt32.NewProc("CertOpenStore")
procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW")
procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy")
procCryptDecodeObject = modcrypt32.NewProc("CryptDecodeObject")
procCryptProtectData = modcrypt32.NewProc("CryptProtectData")
procCryptQueryObject = modcrypt32.NewProc("CryptQueryObject")
procCryptUnprotectData = modcrypt32.NewProc("CryptUnprotectData")
procPFXImportCertStore = modcrypt32.NewProc("PFXImportCertStore")
procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W")
procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W")
@@ -183,9 +190,12 @@ var (
procDuplicateHandle = modkernel32.NewProc("DuplicateHandle")
procExitProcess = modkernel32.NewProc("ExitProcess")
procFindClose = modkernel32.NewProc("FindClose")
procFindCloseChangeNotification = modkernel32.NewProc("FindCloseChangeNotification")
procFindFirstChangeNotificationW = modkernel32.NewProc("FindFirstChangeNotificationW")
procFindFirstFileW = modkernel32.NewProc("FindFirstFileW")
procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW")
procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW")
procFindNextChangeNotification = modkernel32.NewProc("FindNextChangeNotification")
procFindNextFileW = modkernel32.NewProc("FindNextFileW")
procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW")
procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW")
@@ -347,6 +357,7 @@ var (
procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock")
procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock")
procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW")
procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx")
procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW")
procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW")
procWSACleanup = modws2_32.NewProc("WSACleanup")
@@ -1199,6 +1210,12 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex
return
}
func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) {
r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
ret = (*CertExtension)(unsafe.Pointer(r0))
return
}
func CertFreeCertificateChain(ctx *CertChainContext) {
syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
return
@@ -1220,6 +1237,12 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a
return
}
func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) {
r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
chars = uint32(r0)
return
}
func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0)
handle = Handle(r0)
@@ -1246,6 +1269,38 @@ func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext
return
}
func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) {
r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) {
r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
store = Handle(r0)
@@ -1525,6 +1580,36 @@ func FindClose(handle Handle) (err error) {
return
}
func FindCloseChangeNotification(handle Handle) (err error) {
r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
if err != nil {
return
}
return _FindFirstChangeNotification(_p0, watchSubtree, notifyFilter)
}
func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) {
var _p1 uint32
if watchSubtree {
_p1 = 1
}
r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
}
return
}
func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) {
r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0)
handle = Handle(r0)
@@ -1552,6 +1637,14 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er
return
}
func FindNextChangeNotification(handle Handle) (err error) {
r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func findNextFile1(handle Handle, data *win32finddata1) (err error) {
r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
if r1 == 0 {
@@ -2904,9 +2997,12 @@ func GetShellWindow() (shellWindow HWND) {
return
}
func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32) {
r0, _, _ := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0)
func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0)
tid = uint32(r0)
if tid == 0 {
err = errnoErr(e1)
}
return
}
@@ -2947,6 +3043,14 @@ func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) {
return
}
func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) {
r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
return
}
func FreeAddrInfoW(addrinfo *AddrinfoW) {
syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0)
return

View File

@@ -313,7 +313,7 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
# github.com/google/go-containerregistry v0.4.0
# github.com/google/go-containerregistry v0.4.1-0.20210208222243-cbafe638a7a9
## explicit
github.com/google/go-containerregistry/pkg/authn
github.com/google/go-containerregistry/pkg/internal/redact
@@ -375,6 +375,7 @@ github.com/gorilla/websocket
## explicit
# github.com/hashicorp/go-version v1.2.0
## explicit
github.com/hashicorp/go-version
# github.com/hashicorp/golang-lru v0.5.3
## explicit
# github.com/inconshreveable/mousetrap v1.0.0
@@ -403,7 +404,7 @@ github.com/linuxkit/virtsock/pkg/vsock
# github.com/matttproud/golang_protobuf_extensions v1.0.1
## explicit
github.com/matttproud/golang_protobuf_extensions/pbutil
# github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2
# github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0
## explicit
# github.com/miekg/pkcs11 v1.0.3
## explicit
@@ -434,10 +435,6 @@ github.com/moul/gotty-client
## explicit
# github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e
## explicit
# github.com/onsi/ginkgo v1.12.0
## explicit
# github.com/onsi/gomega v1.9.0
## explicit
# github.com/opencontainers/go-digest v1.0.0
## explicit
github.com/opencontainers/go-digest
@@ -625,6 +622,8 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/terminal
# golang.org/x/mod v0.4.1
## explicit
# golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
## explicit
golang.org/x/net/context
@@ -646,7 +645,7 @@ golang.org/x/oauth2/jwt
## explicit
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
# golang.org/x/sys v0.0.0-20210113181707-4bcb84eeeb78
# golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
## explicit
golang.org/x/sys/cpu
golang.org/x/sys/internal/unsafeheader
@@ -659,7 +658,7 @@ golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
# golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
## explicit
# golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3
# golang.org/x/tools v0.1.0
## explicit
# gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e
## explicit
@@ -767,8 +766,6 @@ google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/types/known/anypb
google.golang.org/protobuf/types/known/durationpb
google.golang.org/protobuf/types/known/timestamppb
# gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f
## explicit
# gopkg.in/dancannon/gorethink.v3 v3.0.5
## explicit
# gopkg.in/fatih/pool.v2 v2.0.0
@@ -788,10 +785,14 @@ gopkg.in/yaml.v3
## explicit
# k8s.io/apiserver v0.18.8
## explicit
# k8s.io/code-generator v0.20.1
# k8s.io/code-generator v0.20.2
## explicit
# k8s.io/csi-translation-lib v0.18.8
## explicit
# k8s.io/gengo v0.0.0-20210203185629-de9496dff47b
## explicit
# k8s.io/klog/v2 v2.5.0
## explicit
# rsc.io/letsencrypt v0.0.3
## explicit
# sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name "${NAME}" ../test.yml
linuxkit build -format kernel+initrd -name "${NAME}" ../test.yml
[ -f "${NAME}-kernel" ] || exit 1
[ -f "${NAME}-initrd.img" ] || exit 1
[ -f "${NAME}-cmdline" ] || exit 1

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format iso-bios -name "${NAME}" ../test.yml
linuxkit build -format iso-bios -name "${NAME}" ../test.yml
[ -f "${NAME}.iso" ] || exit 1
exit 0

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format iso-efi -name "${NAME}" ../test.yml
linuxkit build -format iso-efi -name "${NAME}" ../test.yml
[ -f "${NAME}"-efi.iso ] || exit 1
exit 0

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format gcp -name "${NAME}" ../test.yml
linuxkit build -format gcp -name "${NAME}" ../test.yml
[ -f "${NAME}.img.tar.gz" ] || exit 1
exit 0

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format aws -name "${NAME}" ../test.yml
linuxkit build -format aws -name "${NAME}" ../test.yml
[ -f "${NAME}.raw" ] || exit 1
exit 0

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format vhd -name "${NAME}" ../test.yml
linuxkit build -format vhd -name "${NAME}" ../test.yml
[ -f "${NAME}.vhd" ] || exit 1
exit 0

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format vmdk -name "${NAME}" ../test.yml
linuxkit build -format vmdk -name "${NAME}" ../test.yml
[ -f "${NAME}.vmdk" ] || exit 1
exit 0

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format raw-bios -name "${NAME}" ../test.yml
linuxkit build -format raw-bios -name "${NAME}" ../test.yml
[ -f "${NAME}-bios.img" ] || exit 1
exit 0

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format raw-efi -name "${NAME}" ../test.yml
linuxkit build -format raw-efi -name "${NAME}" ../test.yml
[ -f "${NAME}-efi.img" ] || exit 1
exit 0

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format qcow2-bios -name "${NAME}" ../test.yml
linuxkit build -format qcow2-bios -name "${NAME}" ../test.yml
[ -f "${NAME}.qcow2" ] || exit 1
exit 0

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format kernel+squashfs -name "${NAME}" ../test.yml
linuxkit build -format kernel+squashfs -name "${NAME}" ../test.yml
[ -f "${NAME}-kernel" ] || exit 1
[ -f "${NAME}-squashfs.img" ] || exit 1
[ -f "${NAME}-cmdline" ] || exit 1

View File

@@ -16,7 +16,7 @@ clean_up() {
trap clean_up EXIT
linuxkit build -docker -format kernel+iso -name "${NAME}" ../test.yml
linuxkit build -format kernel+iso -name "${NAME}" ../test.yml
[ -f "${NAME}-kernel" ] || exit 1
[ -f "${NAME}.iso" ] || exit 1
[ -f "${NAME}-cmdline" ] || exit 1

View File

@@ -17,8 +17,8 @@ clean_up() {
trap clean_up EXIT
# -disable-content-trust to speed up the test
linuxkit build -docker -disable-content-trust -format tar -name "${NAME}-1" ../test.yml
linuxkit build -docker -disable-content-trust -format tar -name "${NAME}-2" ../test.yml
linuxkit build -disable-content-trust -format tar -name "${NAME}-1" ../test.yml
linuxkit build -disable-content-trust -format tar -name "${NAME}-2" ../test.yml
diff -q "${NAME}-1.tar" "${NAME}-2.tar" || exit 1

View File

@@ -17,8 +17,8 @@ clean_up() {
trap clean_up EXIT
# -disable-content-trust to speed up the test
linuxkit build -docker -disable-content-trust -format kernel+initrd -name "${NAME}-1" ../test.yml
linuxkit build -docker -disable-content-trust -format kernel+initrd -name "${NAME}-2" ../test.yml
linuxkit build -disable-content-trust -format kernel+initrd -name "${NAME}-1" ../test.yml
linuxkit build -disable-content-trust -format kernel+initrd -name "${NAME}-2" ../test.yml
diff -q "${NAME}-1-cmdline" "${NAME}-2-cmdline" || exit 1
diff -q "${NAME}-1-kernel" "${NAME}-2-kernel" || exit 1

View File

@@ -19,7 +19,7 @@ trap clean_up EXIT
# Test code goes here
linuxkit build -docker -format kernel+initrd -name ${NAME} test.yml
linuxkit build -format kernel+initrd -name ${NAME} test.yml
RESULT="$(linuxkit run -disk file=${DISK},size=32M ${NAME})"
echo "${RESULT}"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -17,6 +17,6 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -17,6 +17,6 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -17,6 +17,6 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0

View File

@@ -15,7 +15,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name "${NAME}" test.yml
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
[ -f "${NAME}-kernel" ] || exit 1
[ -f "${NAME}-initrd.img" ] || exit 1
[ -f "${NAME}-cmdline" ]|| exit 1

View File

@@ -15,7 +15,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+squashfs -name "${NAME}" test.yml
linuxkit build -format kernel+squashfs -name "${NAME}" test.yml
[ -f "${NAME}-kernel" ] || exit 1
[ -f "${NAME}-squashfs.img" ] || exit 1
[ -f "${NAME}-cmdline" ]|| exit 1

View File

@@ -15,7 +15,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format iso-bios -name "${NAME}" test.yml
linuxkit build -format iso-bios -name "${NAME}" test.yml
[ -f "${NAME}.iso" ] || exit 1
linuxkit run qemu -iso "${NAME}.iso" | grep -q "Welcome to LinuxKit"

View File

@@ -25,7 +25,7 @@ if command -v qemu-system-x86_64; then
fi
fi
linuxkit build -docker -format iso-efi -name "${NAME}" test.yml
linuxkit build -format iso-efi -name "${NAME}" test.yml
[ -f "${NAME}-efi.iso" ] || exit 1
linuxkit run qemu -iso -uefi "${NAME}-efi.iso" | grep -q "Welcome to LinuxKit"

View File

@@ -15,7 +15,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format qcow2-bios -name "${NAME}" test.yml
linuxkit build -format qcow2-bios -name "${NAME}" test.yml
[ -f "${NAME}.qcow2" ] || exit 1
linuxkit run qemu "${NAME}.qcow2" | grep -q "Welcome to LinuxKit"

View File

@@ -15,7 +15,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format raw-bios -name "${NAME}" test.yml
linuxkit build -format raw-bios -name "${NAME}" test.yml
[ -f "${NAME}-bios.img" ] || exit 1
linuxkit run qemu "${NAME}-bios.img" | grep -q "Welcome to LinuxKit"

View File

@@ -15,7 +15,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format aws -name "${NAME}" test.yml
linuxkit build -format aws -name "${NAME}" test.yml
[ -f "${NAME}.raw" ] || exit 1
linuxkit run qemu "${NAME}.raw" | grep -q "Welcome to LinuxKit"

View File

@@ -26,7 +26,7 @@ if [ -z "${QEMU}" ]; then
exit $RT_CANCEL
fi
linuxkit build -docker -format kernel+initrd -name "${NAME}" test.yml
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
[ -f "${NAME}-kernel" ] || exit 1
[ -f "${NAME}-initrd.img" ] || exit 1
[ -f "${NAME}-cmdline" ]|| exit 1

View File

@@ -15,7 +15,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name "${NAME}" test.yml
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
[ -f "${NAME}-kernel" ] || exit 1
[ -f "${NAME}-initrd.img" ] || exit 1
[ -f "${NAME}-cmdline" ]|| exit 1

View File

@@ -15,7 +15,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+squashfs -name "${NAME}" test.yml
linuxkit build -format kernel+squashfs -name "${NAME}" test.yml
[ -f "${NAME}-kernel" ] || exit 1
[ -f "${NAME}-squashfs.img" ] || exit 1
[ -f "${NAME}-cmdline" ]|| exit 1

View File

@@ -15,7 +15,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name "${NAME}" test.yml
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
[ -f "${NAME}-kernel" ] || exit 1
[ -f "${NAME}-initrd.img" ] || exit 1
[ -f "${NAME}-cmdline" ] || exit 1

View File

@@ -29,7 +29,7 @@ trap clean_up EXIT
mkdir -p certs
printf '%s' "$GCLOUD_CREDENTIALS" > certs/svc_account.json
linuxkit build -docker -format gcp -name "${NAME}" test.yml
linuxkit build -format gcp -name "${NAME}" test.yml
[ -f "${NAME}.img.tar.gz" ] || exit 1
linuxkit push gcp -keys certs/svc_account.json -bucket linuxkit-gcp-test-bucket ${NAME}.img.tar.gz
# tee output of lk run to file as grep hides failures and doesn't

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker -format kernel+initrd -name "${NAME}" test.yml
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
RESULT="$(linuxkit run ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker -format kernel+initrd -name "${NAME}" test.yml
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
RESULT="$(linuxkit run ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker -format kernel+initrd -name "${NAME}" test.yml
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
RESULT="$(linuxkit run ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -17,7 +17,7 @@ clean_up() {
trap clean_up EXIT
# Test code goes here
linuxkit build -docker -format kernel+initrd -name "${NAME}" test.yml
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
RESULT="$(linuxkit run ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

View File

@@ -16,7 +16,7 @@ clean_up() {
}
trap clean_up EXIT
linuxkit build -docker -format kernel+initrd -name ${NAME} ../../common.yml test.yml
linuxkit build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"

Some files were not shown because too many files have changed in this diff Show More