diff --git a/inspect.go b/inspect.go index 8433f5b4..97eaa1e9 100644 --- a/inspect.go +++ b/inspect.go @@ -2,13 +2,16 @@ package main import ( "fmt" + "time" "github.com/Sirupsen/logrus" "github.com/codegangsta/cli" + "github.com/docker/distribution/digest" "github.com/docker/docker/api" + "github.com/docker/docker/image" "github.com/docker/docker/reference" "github.com/docker/docker/registry" - engineTypes "github.com/docker/engine-api/types" + types "github.com/docker/engine-api/types" containerTypes "github.com/docker/engine-api/types/container" "golang.org/x/net/context" ) @@ -34,8 +37,7 @@ type manifestFetcher interface { } type imageInspect struct { - // I shouldn't need json tag here... - ID string `json:"Id"` + V1ID string `json:"V1Id"` RepoTags []string RepoDigests []string Parent string @@ -185,7 +187,7 @@ func newManifestFetcher(endpoint registry.APIEndpoint, repoInfo *registry.Reposi return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) } -func getAuthConfig(c *cli.Context, ref reference.Named) (engineTypes.AuthConfig, error) { +func getAuthConfig(c *cli.Context, ref reference.Named) (types.AuthConfig, error) { // use docker/cliconfig // if no /.docker -> docker not installed fallback to require username|password @@ -203,7 +205,7 @@ func getAuthConfig(c *cli.Context, ref reference.Named) (engineTypes.AuthConfig, //} //} - return engineTypes.AuthConfig{}, nil + return types.AuthConfig{}, nil } func validateRepoName(name string) error { @@ -215,3 +217,41 @@ func validateRepoName(name string) error { } return nil } + +func makeImageInspect(repoInfo *registry.RepositoryInfo, img *image.Image, tag string, dgst digest.Digest) *imageInspect { + var repoTags = make([]string, 0, 1) + if tagged, isTagged := repoInfo.Named.(reference.NamedTagged); isTagged || tag != "" { + if !isTagged { + newTagged, err := reference.WithTag(repoInfo, tag) + if err == nil { + tagged = newTagged + } + } + if tagged != nil { + repoTags = append(repoTags, tagged.String()) + } + } + + var repoDigests = make([]string, 0, 1) + if err := dgst.Validate(); err == nil { + repoDigests = append(repoDigests, dgst.String()) + } + + return &imageInspect{ + V1ID: img.V1Image.ID, + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: img.Comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + Size: img.Size, + Registry: repoInfo.Index.Name, + } +} diff --git a/inspect_v2.go b/inspect_v2.go index b55c9247..0e5382d7 100644 --- a/inspect_v2.go +++ b/inspect_v2.go @@ -1,11 +1,25 @@ package main import ( + "encoding/json" + "errors" "fmt" + "runtime" + "github.com/Sirupsen/logrus" "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client" + dockerdistribution "github.com/docker/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + versionPkg "github.com/docker/docker/pkg/version" "github.com/docker/docker/reference" "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" "golang.org/x/net/context" ) @@ -17,6 +31,427 @@ type v2ManifestFetcher struct { } func (mf *v2ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) (*imageInspect, error) { - fmt.Println("ciaone") - return nil, nil + var ( + err error + imgInspect *imageInspect + ) + + //mf.repo, mf.confirmedV2, err = distribution.NewV2Repository(ctx, mf.repoInfo, mf.endpoint, mf.config.MetaHeaders, mf.config.AuthConfig, "pull") + mf.repo, mf.confirmedV2, err = dockerdistribution.NewV2Repository(ctx, mf.repoInfo, mf.endpoint, nil, &types.AuthConfig{}, "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return nil, fallbackError{err: err, confirmedV2: mf.confirmedV2} + } + + imgInspect, err = mf.fetchWithRepository(ctx, ref) + if err != nil { + switch t := err.(type) { + case errcode.Errors: + if len(t) == 1 { + err = t[0] + } + } + if registry.ContinueOnError(err) { + logrus.Debugf("Error trying v2 registry: %v", err) + err = fallbackError{err: err, confirmedV2: mf.confirmedV2} + } + } + return imgInspect, err +} + +func (mf *v2ManifestFetcher) fetchWithRepository(ctx context.Context, ref reference.Named) (*imageInspect, error) { + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + + tag string + ) + + manSvc, err := mf.repo.Manifests(ctx) + if err != nil { + return nil, err + } + + if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return nil, err + } + tagOrDigest = digested.Digest().String() + } else { + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tagOrDigest = tagged.Tag() + tag = tagOrDigest + } else { + tagList, err := mf.repo.Tags(ctx).All(ctx) + if err != nil { + return nil, err + } + for _, t := range tagList { + if t == reference.DefaultTag { + tag = t + } + } + if tag == "" && len(tagList) > 0 { + tag = tagList[0] + } + if tag == "" { + return nil, fmt.Errorf("No tags available for remote repository %s", mf.repoInfo.FullName()) + } + } + // NOTE: not using TagService.Get, since it uses HEAD requests + // against the manifests endpoint, which are not supported by + // all registry versions. + manifest, err = manSvc.Get(ctx, "", client.WithTag(tag)) + if err != nil { + return nil, allowV1Fallback(err) + } + } + + if manifest == nil { + return nil, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + mf.confirmedV2 = true + + var ( + image *image.Image + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + image, manifestDigest, err = mf.pullSchema1(ctx, ref, v) + if err != nil { + return nil, err + } + case *schema2.DeserializedManifest: + image, manifestDigest, err = mf.pullSchema2(ctx, ref, v) + if err != nil { + return nil, err + } + //case *manifestlist.DeserializedManifestList: + //image, manifestDigest, err = mf.pullManifestList(ctx, ref, v) + //if err != nil { + //return nil, err + //} + default: + return nil, errors.New("unsupported manifest format") + } + + return makeImageInspect(mf.repoInfo, image, tag, manifestDigest), nil +} + +func (mf *v2ManifestFetcher) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (img *image.Image, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return nil, "", err + } + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return nil, "", err + } + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return nil, "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return nil, "", err + } + history = append(history, h) + } + + rootFS := image.NewRootFS() + configRaw, err := makeRawConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history) + + config, err := json.Marshal(configRaw) + if err != nil { + return nil, "", err + } + + img, err = image.NewFromJSON(config) + if err != nil { + return nil, "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return img, manifestDigest, nil +} + +func makeRawConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) (map[string]*json.RawMessage, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := versionPkg.Version(dver.DockerVersion).LessThan("1.8.3") + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return c, nil +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return nil, err + } + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("Invalid parent ID in the base layer of the image.") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func (mf *v2ManifestFetcher) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (img *image.Image, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return nil, "", err + } + + target := mfst.Target() + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := mf.pullSchema2ImageConfig(ctx, target.Digest) + if err != nil { + errChan <- err + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + unmarshalledConfig image.Image // deserialized image config + ) + if runtime.GOOS == "windows" { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return nil, "", err + } + if unmarshalledConfig.RootFS == nil { + return nil, "", errors.New("image config has no rootfs section") + } + } + + if configJSON == nil { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return nil, "", err + } + } + + img, err = image.NewFromJSON(configJSON) + if err != nil { + return nil, "", err + } + + return img, manifestDigest, nil +} + +func (mf *v2ManifestFetcher) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := mf.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { + select { + case configJSON := <-configChan: + var unmarshalledConfig image.Image + if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { + return nil, image.Image{}, err + } + return configJSON, unmarshalledConfig, nil + case err := <-errChan: + return nil, image.Image{}, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && registry.ShouldV2Fallback(v0) { + return fallbackError{err: err, confirmedV2: false} + } + } + case errcode.Error: + if registry.ShouldV2Fallback(v) { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return "", err + } + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil }