diff --git a/vendor/github.com/containers/image/docker/daemon/client.go b/vendor/github.com/containers/image/docker/daemon/client.go index 82fab4b1..11f8f642 100644 --- a/vendor/github.com/containers/image/docker/daemon/client.go +++ b/vendor/github.com/containers/image/docker/daemon/client.go @@ -27,17 +27,24 @@ func newDockerClient(ctx *types.SystemContext) (*dockerclient.Client, error) { // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. // // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + // + // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set + // TLSClientConfig to nil. This can be achieved by using the form `http://` proto, _, _, err := dockerclient.ParseHost(host) if err != nil { return nil, err } var httpClient *http.Client if proto != "unix" { - hc, err := tlsConfig(ctx) - if err != nil { - return nil, err + if proto == "http" { + httpClient = httpConfig() + } else { + hc, err := tlsConfig(ctx) + if err != nil { + return nil, err + } + httpClient = hc } - httpClient = hc } return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) @@ -67,3 +74,12 @@ func tlsConfig(ctx *types.SystemContext) (*http.Client, error) { CheckRedirect: dockerclient.CheckRedirect, }, nil } + +func httpConfig() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: nil, + }, + CheckRedirect: dockerclient.CheckRedirect, + } +} diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index cc99027a..0823b7ed 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -544,7 +544,7 @@ func (s *storageImageDestination) Commit() error { return errors.Errorf("error applying blob %q: content not found", blob.Digest) } // Build the new layer using the diff, regardless of where it came from. - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff) + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, diff) if err != nil { return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) } diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 1d8b4d3f..bf73054b 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -7,6 +7,7 @@ import ( "path/filepath" "time" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" @@ -56,6 +57,12 @@ type Container struct { // is set before using it. Created time.Time `json:"created,omitempty"` + // UIDMap and GIDMap are used for setting up a container's root + // filesystem for use inside of a user namespace where UID mapping is + // being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + Flags map[string]interface{} `json:"flags,omitempty"` } @@ -70,7 +77,9 @@ type ContainerStore interface { // random one if an empty value is supplied) and optional names, // based on the specified image, using the specified layer as its // read-write layer. - Create(id string, names []string, image, layer, metadata string) (*Container, error) + // The maps in the container's options structure are recorded for the + // convenience of the caller, nothing more. + Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) // SetNames updates the list of names associated with the container // with the specified ID. @@ -117,6 +126,8 @@ func copyContainer(c *Container) *Container { BigDataSizes: copyStringInt64Map(c.BigDataSizes), BigDataDigests: copyStringDigestMap(c.BigDataDigests), Created: c.Created, + UIDMap: copyIDMap(c.UIDMap), + GIDMap: copyIDMap(c.GIDMap), Flags: copyStringInterfaceMap(c.Flags), } } @@ -252,7 +263,7 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro return r.Save() } -func (r *containerStore) Create(id string, names []string, image, layer, metadata string) (container *Container, err error) { +func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) { if id == "" { id = stringid.GenerateRandomID() _, idInUse := r.byid[id] @@ -282,6 +293,8 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat BigDataDigests: make(map[string]digest.Digest), Created: time.Now().UTC(), Flags: make(map[string]interface{}), + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), } r.containers = append(r.containers, container) r.byid[id] = container diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go index a0fb69eb..aef6becf 100644 --- a/vendor/github.com/containers/storage/containers_ffjson.go +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./containers.go +// source: containers.go package storage @@ -7,6 +7,7 @@ import ( "bytes" "encoding/json" "fmt" + "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/go-digest" fflib "github.com/pquerna/ffjson/fflib/v1" ) @@ -126,6 +127,46 @@ func (j *Container) MarshalJSONBuf(buf fflib.EncodingBuffer) error { } buf.WriteByte(',') } + if len(j.UIDMap) != 0 { + buf.WriteString(`"uidmap":`) + if j.UIDMap != nil { + buf.WriteString(`[`) + for i, v := range j.UIDMap { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.GIDMap) != 0 { + buf.WriteString(`"gidmap":`) + if j.GIDMap != nil { + buf.WriteString(`[`) + for i, v := range j.GIDMap { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } if len(j.Flags) != 0 { buf.WriteString(`"flags":`) /* Falling back. type=map[string]interface {} kind=map */ @@ -162,6 +203,10 @@ const ( ffjtContainerCreated + ffjtContainerUIDMap + + ffjtContainerGIDMap + ffjtContainerFlags ) @@ -183,6 +228,10 @@ var ffjKeyContainerBigDataDigests = []byte("big-data-digests") var ffjKeyContainerCreated = []byte("created") +var ffjKeyContainerUIDMap = []byte("uidmap") + +var ffjKeyContainerGIDMap = []byte("gidmap") + var ffjKeyContainerFlags = []byte("flags") // UnmarshalJSON umarshall json - template of ffjson @@ -280,6 +329,14 @@ mainparse: goto mainparse } + case 'g': + + if bytes.Equal(ffjKeyContainerGIDMap, kn) { + currentKey = ffjtContainerGIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + case 'i': if bytes.Equal(ffjKeyContainerID, kn) { @@ -317,6 +374,14 @@ mainparse: goto mainparse } + case 'u': + + if bytes.Equal(ffjKeyContainerUIDMap, kn) { + currentKey = ffjtContainerUIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + } if fflib.EqualFoldRight(ffjKeyContainerFlags, kn) { @@ -325,6 +390,18 @@ mainparse: goto mainparse } + if fflib.SimpleLetterEqualFold(ffjKeyContainerGIDMap, kn) { + currentKey = ffjtContainerGIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerUIDMap, kn) { + currentKey = ffjtContainerUIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + if fflib.SimpleLetterEqualFold(ffjKeyContainerCreated, kn) { currentKey = ffjtContainerCreated state = fflib.FFParse_want_colon @@ -423,6 +500,12 @@ mainparse: case ffjtContainerCreated: goto handle_Created + case ffjtContainerUIDMap: + goto handle_UIDMap + + case ffjtContainerGIDMap: + goto handle_GIDMap + case ffjtContainerFlags: goto handle_Flags @@ -931,6 +1014,142 @@ handle_Created: state = fflib.FFParse_after_value goto mainparse +handle_UIDMap: + + /* handler: j.UIDMap type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.UIDMap = nil + } else { + + j.UIDMap = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJUIDMap idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJUIDMap type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJUIDMap) + if err != nil { + return fs.WrapErr(err) + } + } + + j.UIDMap = append(j.UIDMap, tmpJUIDMap) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GIDMap: + + /* handler: j.GIDMap type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.GIDMap = nil + } else { + + j.GIDMap = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJGIDMap idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJGIDMap type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJGIDMap) + if err != nil { + return fs.WrapErr(err) + } + } + + j.GIDMap = append(j.GIDMap, tmpJGIDMap) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + handle_Flags: /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index c1cfabee..bbbe9e4a 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -167,7 +167,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap } } - a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, a) return a, nil } @@ -250,7 +250,7 @@ func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { return fmt.Errorf("--storage-opt is not supported for aufs") } - if err := a.createDirsFor(id); err != nil { + if err := a.createDirsFor(id, parent); err != nil { return err } // Write the layers metadata @@ -281,21 +281,26 @@ func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { // createDirsFor creates two directories for the given id. // mnt and diff -func (a *Driver) createDirsFor(id string) error { +func (a *Driver) createDirsFor(id, parent string) error { paths := []string{ "mnt", "diff", } - rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) - if err != nil { - return err - } // Directory permission is 0755. // The path of directories are /mnt/ // and /diff/ for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair() + if parent != "" { + st, err := system.Stat(path.Join(a.rootPath(), p, parent)) + if err != nil { + return err + } + rootPair.UID = int(st.UID()) + rootPair.GID = int(st.GID()) + } + if err := idtools.MkdirAllAndChownNew(path.Join(a.rootPath(), p, id), os.FileMode(0755), rootPair); err != nil { return err } } @@ -463,17 +468,21 @@ func (a *Driver) isParent(id, parent string) bool { // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". -func (a *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) { +func (a *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { if !a.isParent(id, parent) { - return a.naiveDiff.Diff(id, parent, mountLabel) + return a.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} } // AUFS doesn't need the parent layer to produce a diff. return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), }) } @@ -492,19 +501,22 @@ func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil } -func (a *Driver) applyDiff(id string, diff io.Reader) error { +func (a *Driver) applyDiff(id string, idMappings *idtools.IDMappings, diff io.Reader) error { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), }) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. -func (a *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) { +func (a *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { if !a.isParent(id, parent) { - return a.naiveDiff.DiffSize(id, parent, mountLabel) + return a.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) } // AUFS doesn't need the parent layer to calculate the diff size. return directory.Size(path.Join(a.rootPath(), "diff", id)) @@ -513,24 +525,24 @@ func (a *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. -func (a *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) { +func (a *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent, mountLabel string, diff io.Reader) (size int64, err error) { if !a.isParent(id, parent) { - return a.naiveDiff.ApplyDiff(id, parent, mountLabel, diff) + return a.naiveDiff.ApplyDiff(id, idMappings, parent, mountLabel, diff) } // AUFS doesn't need the parent id to apply the diff if it is the direct parent. - if err = a.applyDiff(id, diff); err != nil { + if err = a.applyDiff(id, idMappings, diff); err != nil { return } - return a.DiffSize(id, parent, mountLabel) + return directory.Size(path.Join(a.rootPath(), "diff", id)) } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. -func (a *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) { +func (a *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { if !a.isParent(id, parent) { - return a.naiveDiff.Changes(id, parent, mountLabel) + return a.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) } // AUFS doesn't have snapshots, so we need to get changes from all parent @@ -689,3 +701,9 @@ func useDirperm() bool { }) return enableDirperm } + +// UpdateLayerIDMap updates ID mappings in a layer from matching the ones +// specified by toContainer to those specified by toHost. +func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("aufs doesn't support changing ID mappings") +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index abc856c8..4b9699fd 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -90,7 +90,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap } } - return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil + return graphdriver.NewNaiveDiffDriver(driver, graphdriver.NewNaiveLayerIDMapUpdater(driver)), nil } func parseOptions(opt []string) (btrfsOptions, bool, error) { diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go new file mode 100644 index 00000000..c12e73b3 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -0,0 +1,167 @@ +package graphdriver + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" +) + +const ( + chownByMapsCmd = "storage-chown-by-maps" +) + +func init() { + reexec.Register(chownByMapsCmd, chownByMapsMain) +} + +func chownByMapsMain() { + if len(os.Args) < 2 { + fmt.Fprintf(os.Stderr, "requires mapping configuration on stdin and directory path") + os.Exit(1) + } + // Read and decode our configuration. + discreteMaps := [4][]idtools.IDMap{} + config := bytes.Buffer{} + if _, err := config.ReadFrom(os.Stdin); err != nil { + fmt.Fprintf(os.Stderr, "error reading configuration: %v", err) + os.Exit(1) + } + if err := json.Unmarshal(config.Bytes(), &discreteMaps); err != nil { + fmt.Fprintf(os.Stderr, "error decoding configuration: %v", err) + os.Exit(1) + } + // Try to chroot. This may not be possible, and on some systems that + // means we just Chdir() to the directory, so from here on we should be + // using relative paths. + if err := chrootOrChdir(os.Args[1]); err != nil { + fmt.Fprintf(os.Stderr, "error chrooting to %q: %v", os.Args[1], err) + os.Exit(1) + } + // Build the mapping objects. + toContainer := idtools.NewIDMappingsFromMaps(discreteMaps[0], discreteMaps[1]) + if len(toContainer.UIDs()) == 0 && len(toContainer.GIDs()) == 0 { + toContainer = nil + } + toHost := idtools.NewIDMappingsFromMaps(discreteMaps[2], discreteMaps[3]) + if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { + toHost = nil + } + chown := func(path string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error walking to %q: %v", path, err) + } + sysinfo := info.Sys() + if st, ok := sysinfo.(*syscall.Stat_t); ok { + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUid, mappedGid, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) + } + mappedUid, mappedGid = uid, gid + } + uid, gid = mappedUid, mappedGid + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHost(pair) + if err != nil { + return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + // Make the change. + if err := syscall.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err) + } + } + } + return nil + } + if err := filepath.Walk(".", chown); err != nil { + fmt.Fprintf(os.Stderr, "error during chown: %v", err) + os.Exit(1) + } + os.Exit(0) +} + +// ChownPathByMaps walks the filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error { + if toContainer == nil { + toContainer = &idtools.IDMappings{} + } + if toHost == nil { + toHost = &idtools.IDMappings{} + } + + config, err := json.Marshal([4][]idtools.IDMap{toContainer.UIDs(), toContainer.GIDs(), toHost.UIDs(), toHost.GIDs()}) + if err != nil { + return err + } + cmd := reexec.Command(chownByMapsCmd, path) + cmd.Stdin = bytes.NewReader(config) + output, err := cmd.CombinedOutput() + if len(output) > 0 && err != nil { + return fmt.Errorf("%v: %s", err, string(output)) + } + if err != nil { + return err + } + if len(output) > 0 { + return fmt.Errorf("%s", string(output)) + } + + return nil +} + +type naiveLayerIDMapUpdater struct { + ProtoDriver +} + +// NewNaiveLayerIDMapUpdater wraps the ProtoDriver in a LayerIDMapUpdater that +// uses ChownPathByMaps to update the ownerships in a layer's filesystem tree. +func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater { + return &naiveLayerIDMapUpdater{ProtoDriver: driver} +} + +// UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + driver := n.ProtoDriver + layerFs, err := driver.Get(id, mountLabel) + if err != nil { + return err + } + defer func() { + driver.Put(id) + }() + + return ChownPathByMaps(layerFs, toContainer, toHost) +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go new file mode 100644 index 00000000..c8c4905b --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go @@ -0,0 +1,21 @@ +// +build linux darwin freebsd solaris + +package graphdriver + +import ( + "fmt" + "os" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("error chrooting to %q: %v", path, err) + } + if err := syscall.Chdir(string(os.PathSeparator)); err != nil { + return fmt.Errorf("error changing to %q: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go new file mode 100644 index 00000000..1df03178 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go @@ -0,0 +1,15 @@ +package graphdriver + +import ( + "os" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chdir(path); err != nil { + return fmt.Errorf("error changing to %q: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index d68fb66c..a4ec6ebf 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -54,7 +54,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap locker: locker.New(), } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil } func (d *Driver) String() string { diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 615d93be..1b4ad336 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -92,25 +92,39 @@ type ProtoDriver interface { type DiffDriver interface { // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". - Diff(id, parent, mountLabel string) (io.ReadCloser, error) + Diff(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. - Changes(id, parent, mountLabel string) ([]archive.Change, error) + Changes(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. // The io.Reader must be an uncompressed stream. - ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) + ApplyDiff(id string, idMappings *idtools.IDMappings, parent string, mountLabel string, diff io.Reader) (size int64, err error) // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. - DiffSize(id, parent, mountLabel string) (size int64, err error) + DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +} + +// LayerIDMapUpdater is the interface that implements ID map changes for layers. +type LayerIDMapUpdater interface { + // UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership + // information using the toContainer and toHost mappings, using them to replace + // on-disk owner UIDs and GIDs which are "host" values in the first map with + // UIDs and GIDs for "host" values from the second map which correspond to the + // same "container" IDs. This method should only be called after a layer is + // first created and populated, and before it is mounted, as other changes made + // relative to a parent layer, but before this method is called, may be discarded + // by Diff(). + UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error } // Driver is the interface for layered/snapshot file system drivers. type Driver interface { ProtoDriver DiffDriver + LayerIDMapUpdater } // Capabilities defines a list of capabilities a driver may implement. diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index f74239cb..35869d90 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -24,29 +24,33 @@ var ( // Notably, the AUFS driver doesn't need to be wrapped like this. type NaiveDiffDriver struct { ProtoDriver - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap + LayerIDMapUpdater } // NewNaiveDiffDriver returns a fully functional driver that wraps the // given ProtoDriver and adds the capability of the following methods which // it may or may not support on its own: -// Diff(id, parent, mountLabel string) (io.ReadCloser, error) -// Changes(id, parent, mountLabel string) ([]archive.Change, error) -// ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) -// DiffSize(id, parent, mountLabel string) (size int64, err error) -func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { - return &NaiveDiffDriver{ProtoDriver: driver, - uidMaps: uidMaps, - gidMaps: gidMaps} +// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) +// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) +// ApplyDiff(id string, idMappings *idtools.IDMappings, parent, mountLabel string, diff io.Reader) (size int64, err error) +// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater} } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". -func (gdw *NaiveDiffDriver) Diff(id, parent, mountLabel string) (arch io.ReadCloser, err error) { +func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (arch io.ReadCloser, err error) { startTime := time.Now() driver := gdw.ProtoDriver + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + layerFs, err := driver.Get(id, mountLabel) if err != nil { return nil, err @@ -59,7 +63,11 @@ func (gdw *NaiveDiffDriver) Diff(id, parent, mountLabel string) (arch io.ReadClo }() if parent == "" { - archive, err := archive.Tar(layerFs, archive.Uncompressed) + archive, err := archive.TarWithOptions(layerFs, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) if err != nil { return nil, err } @@ -76,12 +84,12 @@ func (gdw *NaiveDiffDriver) Diff(id, parent, mountLabel string) (arch io.ReadClo } defer driver.Put(parent) - changes, err := archive.ChangesDirs(layerFs, parentFs) + changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) if err != nil { return nil, err } - archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + archive, err := archive.ExportChanges(layerFs, changes, idMappings.UIDs(), idMappings.GIDs()) if err != nil { return nil, err } @@ -101,9 +109,16 @@ func (gdw *NaiveDiffDriver) Diff(id, parent, mountLabel string) (arch io.ReadClo // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. -func (gdw *NaiveDiffDriver) Changes(id, parent, mountLabel string) ([]archive.Change, error) { +func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { driver := gdw.ProtoDriver + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + layerFs, err := driver.Get(id, mountLabel) if err != nil { return nil, err @@ -120,15 +135,19 @@ func (gdw *NaiveDiffDriver) Changes(id, parent, mountLabel string) ([]archive.Ch defer driver.Put(parent) } - return archive.ChangesDirs(layerFs, parentFs) + return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) } // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. -func (gdw *NaiveDiffDriver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) { +func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappings, parent, mountLabel string, diff io.Reader) (size int64, err error) { driver := gdw.ProtoDriver + if applyMappings == nil { + applyMappings = &idtools.IDMappings{} + } + // Mount the root filesystem so we can apply the diff/layer. layerFs, err := driver.Get(id, mountLabel) if err != nil { @@ -136,8 +155,11 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent, mountLabel string, diff io.Rea } defer driver.Put(id) - options := &archive.TarOptions{UIDMaps: gdw.uidMaps, - GIDMaps: gdw.gidMaps} + options := &archive.TarOptions{} + if applyMappings != nil { + options.UIDMaps = applyMappings.UIDs() + options.GIDMaps = applyMappings.GIDs() + } start := time.Now().UTC() logrus.Debug("Start untar layer") if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { @@ -151,10 +173,17 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent, mountLabel string, diff io.Rea // DiffSize calculates the changes between the specified layer // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. -func (gdw *NaiveDiffDriver) DiffSize(id, parent, mountLabel string) (size int64, err error) { +func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { driver := gdw.ProtoDriver - changes, err := gdw.Changes(id, parent, mountLabel) + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + changes, err := gdw.Changes(id, idMappings, parent, parentMappings, mountLabel) if err != nil { return } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index de6ee6da..6b5f6912 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -169,7 +169,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap options: *opts, } - d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d) if backingFs == "xfs" { // Try to enable project quota support over xfs. @@ -267,16 +267,22 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) flags := fmt.Sprintf("lowerdir=%s:%s", lower1Dir, lower2Dir) if len(flags) < unix.Getpagesize() { - if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { + err := mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) + if err == nil { logrus.Debugf("overlay test mount with multiple lowers succeeded") return supportsDType, nil + } else { + logrus.Debugf("overlay test mount with multiple lowers failed %v", err) } } flags = fmt.Sprintf("lowerdir=%s", lower1Dir) if len(flags) < unix.Getpagesize() { - if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { + err := mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) + if err == nil { logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower") return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") + } else { + logrus.Debugf("overlay test mount with a single lower failed %v", err) } } logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home) @@ -387,6 +393,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { return err } + if parent != "" { + st, err := system.Stat(d.dir(parent)) + if err != nil { + return err + } + rootUID = int(st.UID()) + rootGID = int(st.GID()) + } if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { return err } @@ -562,8 +576,32 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { return "", err } - newlowers := "" + // absLowers is the list of lowers as absolute paths, which works well with additional stores. + absLowers := []string{} + // relLowers is the list of lowers as paths relative to the driver's home directory. + relLowers := []string{} + + // Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers + // lists that we're building. "diff" itself is the upper, so it won't be in the lists. + link, err := ioutil.ReadFile(path.Join(dir, "link")) + if err != nil { + return "", err + } + diffN := 1 + _, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + for err == nil { + absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN))) + diffN++ + _, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + } + + // For each lower, resolve its path, and append it and any additional diffN + // directories to the lowers list. for _, l := range strings.Split(string(lowers), ":") { + if l == "" { + continue + } lower := "" newpath := path.Join(d.home, l) if _, err := os.Stat(newpath); err != nil { @@ -580,15 +618,23 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { } else { lower = newpath } - if newlowers == "" { - newlowers = lower - } else { - newlowers = newlowers + ":" + lower + absLowers = append(absLowers, lower) + relLowers = append(relLowers, l) + diffN = 1 + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + for err == nil { + absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(l, "..", nameWithSuffix("diff", diffN))) + diffN++ + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) } } - if len(lowers) == 0 { - newlowers = path.Join(dir, "empty") - lowers = []byte(newlowers) + + // If the lowers list is still empty, use an empty lower so that we can still force an + // SELinux context for the mount. + if len(absLowers) == 0 { + absLowers = append(absLowers, path.Join(dir, "empty")) + relLowers = append(relLowers, path.Join(id, "empty")) } mergedDir := path.Join(dir, "merged") @@ -606,7 +652,7 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { }() workDir := path.Join(dir, "work") - opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, diffDir, workDir) + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir) mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir @@ -619,7 +665,7 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize { //FIXME: We need to figure out to get this to work with additional stores - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), path.Join(id, "diff"), path.Join(id, "work")) mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize { return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) @@ -697,9 +743,13 @@ func (d *Driver) isParent(id, parent string) bool { } // ApplyDiff applies the new layer into a root -func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) { +func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent string, mountLabel string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { - return d.naiveDiff.ApplyDiff(id, parent, mountLabel, diff) + return d.naiveDiff.ApplyDiff(id, idMappings, parent, mountLabel, diff) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} } applyDir := d.getDiffPath(id) @@ -707,8 +757,8 @@ func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size logrus.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ - UIDMaps: d.uidMaps, - GIDMaps: d.gidMaps, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), WhiteoutFormat: archive.OverlayWhiteoutFormat, }); err != nil { return 0, err @@ -726,18 +776,22 @@ func (d *Driver) getDiffPath(id string) string { // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) { +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.DiffSize(id, parent, mountLabel) + return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) } return directory.Size(d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". -func (d *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) { +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.Diff(id, parent, mountLabel) + return d.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} } lowerDirs, err := d.getLowerDirs(id) @@ -749,8 +803,8 @@ func (d *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) { logrus.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, - UIDMaps: d.uidMaps, - GIDMaps: d.gidMaps, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), WhiteoutFormat: archive.OverlayWhiteoutFormat, WhiteoutData: lowerDirs, }) @@ -758,9 +812,9 @@ func (d *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) { // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. -func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) { +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.Changes(id, parent, mountLabel) + return d.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) } // Overlay doesn't have snapshots, so we need to get changes from all parent // layers. @@ -777,3 +831,73 @@ func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error func (d *Driver) AdditionalImageStores() []string { return d.options.imageStores } + +// UpdateLayerIDMap updates ID mappings in a from matching the ones specified +// by toContainer to those specified by toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + var err error + dir := d.dir(id) + diffDir := filepath.Join(dir, "diff") + + rootUID, rootGID := 0, 0 + if toHost != nil { + rootUID, rootGID, err = idtools.GetRootUIDGID(toHost.UIDs(), toHost.GIDs()) + if err != nil { + return err + } + } + + // Mount the new layer and handle ownership changes and possible copy_ups in it. + layerFs, err := d.Get(id, mountLabel) + if err != nil { + return err + } + err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost) + if err != nil { + if err2 := d.Put(id); err2 != nil { + logrus.Errorf("%v; error unmounting %v: %v", err, id, err2) + } + return err + } + if err = d.Put(id); err != nil { + return err + } + + // Rotate the diff directories. + i := 0 + _, err = os.Stat(nameWithSuffix(diffDir, i)) + for err == nil { + i++ + _, err = os.Stat(nameWithSuffix(diffDir, i)) + } + for i > 0 { + err = os.Rename(nameWithSuffix(diffDir, i-1), nameWithSuffix(diffDir, i)) + if err != nil { + return err + } + i-- + } + + // Re-create the directory that we're going to use as the upper layer. + if err := idtools.MkdirAs(diffDir, 0755, rootUID, rootGID); err != nil { + return err + } + return nil +} + +// dumbJoin is more or less a dumber version of filepath.Join, but one which +// won't Clean() the path, allowing us to append ".." as a component and trust +// pathname resolution to do some non-obvious work. +func dumbJoin(names ...string) string { + if len(names) == 0 { + return string(os.PathSeparator) + } + return strings.Join(names, string(os.PathSeparator)) +} + +func nameWithSuffix(name string, number int) string { + if number == 0 { + return name + } + return fmt.Sprintf("%s%d", name, number) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index ae62207d..cee55f8d 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -43,7 +43,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap continue } } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil } // Driver holds information about the driver, home directory of the driver. @@ -91,6 +91,14 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { return err } + if parent != "" { + st, err := system.Stat(d.dir(parent)) + if err != nil { + return err + } + rootIDs.UID = int(st.UID()) + rootIDs.GID = int(st.GID()) + } if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { return err } diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index e9e9f5c6..b750715b 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -472,7 +472,7 @@ func (d *Driver) Cleanup() error { // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". // The layer should be mounted when calling this function -func (d *Driver) Diff(id, parent, mountLabel string) (_ io.ReadCloser, err error) { +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ io.ReadCloser, err error) { panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { @@ -509,7 +509,7 @@ func (d *Driver) Diff(id, parent, mountLabel string) (_ io.ReadCloser, err error // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. // The layer should not be mounted when calling this function. -func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) { +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { @@ -565,7 +565,7 @@ func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error // layer with the specified id and parent, returning the size of the // new layer in bytes. // The layer should not be mounted when calling this function -func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (int64, error) { +func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent, mountLabel string, diff io.Reader) (int64, error) { panicIfUsedByLcow() var layerChain []string if parent != "" { @@ -600,14 +600,14 @@ func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (int64 // DiffSize calculates the changes between the specified layer // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) { +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { panicIfUsedByLcow() rPId, err := d.resolveID(parent) if err != nil { return } - changes, err := d.Changes(id, rPId, mountLabel) + changes, err := d.Changes(id, idMappings, rPId, parentMappings, mountLabel) if err != nil { return } @@ -940,6 +940,17 @@ func (d *Driver) AdditionalImageStores() []string { return nil } +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} + +// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from +// matching those in toContainer to matching those in toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("windows doesn't support changing ID mappings") +} + type storageOptions struct { size uint64 } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index 8c8e7d67..886ebc8b 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -119,7 +119,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil } func parseOptions(opt []string) (zfsOptions, error) { diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go index bed6f8cd..562415c0 100644 --- a/vendor/github.com/containers/storage/errors.go +++ b/vendor/github.com/containers/storage/errors.go @@ -53,4 +53,6 @@ var ( ErrInvalidBigDataName = errors.New("not a valid name for a big data item") // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. ErrDigestUnknown = errors.New("could not compute digest of item") + // ErrLayerNotMounted is returned when the requested information can only be computed for a mounted layer, and the layer is not mounted. + ErrLayerNotMounted = errors.New("layer is not mounted") ) diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go index f91ee6d4..f6a8b065 100644 --- a/vendor/github.com/containers/storage/images_ffjson.go +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./images.go +// source: images.go package storage diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 91117a4f..9a060e68 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -8,12 +8,16 @@ import ( "io/ioutil" "os" "path/filepath" + "reflect" + "sort" "time" drivers "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/truncindex" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -93,6 +97,11 @@ type Layer struct { // Flags is arbitrary data about the layer. Flags map[string]interface{} `json:"flags,omitempty"` + + // UIDMap and GIDMap are used for setting up a layer's contents + // for use inside of a user namespace where UID mapping is being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` } type layerMountPoint struct { @@ -178,13 +187,13 @@ type LayerStore interface { // underlying drivers can accept a "size" option. At this time, most // underlying drivers do not themselves distinguish between writeable // and read-only layers. - Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (*Layer, error) + Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) // CreateWithFlags combines the functions of Create and SetFlag. - CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) + CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) // Put combines the functions of CreateWithFlags and ApplyDiff. - Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) + Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) // SetNames replaces the list of names associated with a layer with the // supplied values. @@ -204,6 +213,10 @@ type LayerStore interface { // Unmount unmounts a layer when it is no longer in use. Unmount(id string) error + // ParentOwners returns the UIDs and GIDs of parents of the layer's mountpoint + // for which the layer's UID and GID maps don't contain corresponding entries. + ParentOwners(id string) (uids, gids []int, err error) + // ApplyDiff reads a tarstream which was created by a previous call to Diff and // applies its changes to a specified layer. ApplyDiff(to string, diff io.Reader) (int64, error) @@ -221,6 +234,8 @@ type layerStore struct { bymount map[string]*Layer bycompressedsum map[digest.Digest][]string byuncompressedsum map[digest.Digest][]string + uidMap []idtools.IDMap + gidMap []idtools.IDMap } func copyLayer(l *Layer) *Layer { @@ -239,6 +254,8 @@ func copyLayer(l *Layer) *Layer { UncompressedSize: l.UncompressedSize, CompressionType: l.CompressionType, Flags: copyStringInterfaceMap(l.Flags), + UIDMap: copyIDMap(l.UIDMap), + GIDMap: copyIDMap(l.GIDMap), } } @@ -382,7 +399,7 @@ func (r *layerStore) Save() error { return ioutils.AtomicWriteFile(mpath, jmdata, 0600) } -func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) { +func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) { if err := os.MkdirAll(rundir, 0700); err != nil { return nil, err } @@ -403,6 +420,8 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (Layer byid: make(map[string]*Layer), bymount: make(map[string]*Layer), byname: make(map[string]*Layer), + uidMap: copyIDMap(uidMap), + gidMap: copyIDMap(gidMap), } if err := rlstore.Load(); err != nil { return nil, err @@ -489,7 +508,7 @@ func (r *layerStore) Status() ([][2]string, error) { return r.driver.Status(), nil } -func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { if !r.IsReadWrite() { return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) } @@ -500,11 +519,6 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o if err := os.MkdirAll(r.layerdir, 0700); err != nil { return nil, -1, err } - if parent != "" { - if parentLayer, ok := r.lookup(parent); ok { - parent = parentLayer.ID - } - } if id == "" { id = stringid.GenerateRandomID() _, idInUse := r.byid[id] @@ -522,6 +536,15 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o return nil, -1, ErrDuplicateName } } + parent := "" + var parentMappings *idtools.IDMappings + if parentLayer != nil { + parent = parentLayer.ID + parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap) + } else { + parentMappings = &idtools.IDMappings{} + } + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) opts := drivers.CreateOpts{ MountLabel: mountLabel, StorageOpt: options, @@ -531,6 +554,15 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o } else { err = r.driver.Create(id, parent, &opts) } + if !reflect.DeepEqual(parentMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(parentMappings.GIDs(), idMappings.GIDs()) { + err = r.driver.UpdateLayerIDMap(id, parentMappings, idMappings, mountLabel) + if err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + } if err == nil { layer = &Layer{ ID: id, @@ -539,6 +571,8 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o MountLabel: mountLabel, Created: time.Now().UTC(), Flags: make(map[string]interface{}), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), } r.layers = append(r.layers, layer) r.idindex.Add(id) @@ -580,13 +614,13 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o return copyLayer(layer), size, err } -func (r *layerStore) CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { - layer, _, err = r.Put(id, parent, names, mountLabel, options, writeable, flags, nil) +func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { + layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil) return layer, err } -func (r *layerStore) Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (layer *Layer, err error) { - return r.CreateWithFlags(id, parent, names, mountLabel, options, writeable, nil) +func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) { + return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil) } func (r *layerStore) Mount(id, mountLabel string) (string, error) { @@ -645,6 +679,67 @@ func (r *layerStore) Unmount(id string) error { return err } +func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { + layer, ok := r.lookup(id) + if !ok { + return nil, nil, ErrLayerUnknown + } + if len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + // We're not using any mappings, so there aren't any unmapped IDs on parent directories. + return nil, nil, nil + } + if layer.MountPoint == "" { + // We don't know which directories to examine. + return nil, nil, ErrLayerNotMounted + } + rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID) + } + m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) + fsuids := make(map[int]struct{}) + fsgids := make(map[int]struct{}) + for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + st, err := system.Stat(dir) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading ownership of directory %q", dir) + } + lst, err := system.Lstat(dir) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading ownership of directory-in-case-it's-a-symlink %q", dir) + } + fsuid := int(st.UID()) + fsgid := int(st.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + fsuid = int(lst.UID()) + fsgid = int(lst.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + } + for uid := range fsuids { + uids = append(uids, uid) + } + for gid := range fsgids { + gids = append(gids, gid) + } + if len(uids) > 1 { + sort.Ints(uids) + } + if len(gids) > 1 { + sort.Ints(gids) + } + return uids, gids, nil +} + func (r *layerStore) removeName(layer *Layer, name string) { layer.Names = stringSliceWithoutValue(layer.Names, name) } @@ -771,12 +866,11 @@ func (r *layerStore) Wipe() error { return nil } -func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, toLayer *Layer, err error) { +func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) { var ok bool - var fromLayer *Layer toLayer, ok = r.lookup(to) if !ok { - return "", "", nil, ErrLayerUnknown + return "", "", nil, nil, ErrLayerUnknown } to = toLayer.ID if from == "" { @@ -793,15 +887,22 @@ func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID st } } } - return from, to, toLayer, nil + return from, to, fromLayer, toLayer, nil +} + +func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { + if layer == nil { + return &idtools.IDMappings{} + } + return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) } func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { - from, to, toLayer, err := r.findParentAndLayer(from, to) + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) if err != nil { return nil, ErrLayerUnknown } - return r.driver.Changes(to, from, toLayer.MountLabel) + return r.driver.Changes(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) } type simpleGetCloser struct { @@ -836,7 +937,7 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { var metadata storage.Unpacker - from, to, toLayer, err := r.findParentAndLayer(from, to) + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) if err != nil { return nil, ErrLayerUnknown } @@ -874,7 +975,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, } if from != toLayer.Parent { - diff, err := r.driver.Diff(to, from, toLayer.MountLabel) + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) if err != nil { return nil, err } @@ -886,7 +987,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, if !os.IsNotExist(err) { return nil, err } - diff, err := r.driver.Diff(to, from, toLayer.MountLabel) + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) if err != nil { return nil, err } @@ -925,12 +1026,12 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, } func (r *layerStore) DiffSize(from, to string) (size int64, err error) { - var toLayer *Layer - from, to, toLayer, err = r.findParentAndLayer(from, to) + var fromLayer, toLayer *Layer + from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to) if err != nil { return -1, ErrLayerUnknown } - return r.driver.DiffSize(to, from, toLayer.MountLabel) + return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) } func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { @@ -970,7 +1071,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error if err != nil { return -1, err } - size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, layer.MountLabel, payload) + size, err = r.driver.ApplyDiff(layer.ID, r.layerMappings(layer), layer.Parent, layer.MountLabel, payload) if err != nil { return -1, err } diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go index 608fe186..125b5d8c 100644 --- a/vendor/github.com/containers/storage/layers_ffjson.go +++ b/vendor/github.com/containers/storage/layers_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./layers.go +// source: layers.go package storage @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/go-digest" fflib "github.com/pquerna/ffjson/fflib/v1" ) @@ -323,6 +324,46 @@ func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { } buf.WriteByte(',') } + if len(j.UIDMap) != 0 { + buf.WriteString(`"uidmap":`) + if j.UIDMap != nil { + buf.WriteString(`[`) + for i, v := range j.UIDMap { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.GIDMap) != 0 { + buf.WriteString(`"gidmap":`) + if j.GIDMap != nil { + buf.WriteString(`[`) + for i, v := range j.GIDMap { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } buf.Rewind(1) buf.WriteByte('}') return nil @@ -355,6 +396,10 @@ const ( ffjtLayerCompressionType ffjtLayerFlags + + ffjtLayerUIDMap + + ffjtLayerGIDMap ) var ffjKeyLayerID = []byte("id") @@ -381,6 +426,10 @@ var ffjKeyLayerCompressionType = []byte("compression") var ffjKeyLayerFlags = []byte("flags") +var ffjKeyLayerUIDMap = []byte("uidmap") + +var ffjKeyLayerGIDMap = []byte("gidmap") + // UnmarshalJSON umarshall json - template of ffjson func (j *Layer) UnmarshalJSON(input []byte) error { fs := fflib.NewFFLexer(input) @@ -486,6 +535,14 @@ mainparse: goto mainparse } + case 'g': + + if bytes.Equal(ffjKeyLayerGIDMap, kn) { + currentKey = ffjtLayerGIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + case 'i': if bytes.Equal(ffjKeyLayerID, kn) { @@ -523,6 +580,26 @@ mainparse: goto mainparse } + case 'u': + + if bytes.Equal(ffjKeyLayerUIDMap, kn) { + currentKey = ffjtLayerUIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerGIDMap, kn) { + currentKey = ffjtLayerGIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerUIDMap, kn) { + currentKey = ffjtLayerUIDMap + state = fflib.FFParse_want_colon + goto mainparse } if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) { @@ -650,6 +727,12 @@ mainparse: case ffjtLayerFlags: goto handle_Flags + case ffjtLayerUIDMap: + goto handle_UIDMap + + case ffjtLayerGIDMap: + goto handle_GIDMap + case ffjtLayernosuchkey: err = fs.SkipField(tok) if err != nil { @@ -1108,6 +1191,142 @@ handle_Flags: state = fflib.FFParse_after_value goto mainparse +handle_UIDMap: + + /* handler: j.UIDMap type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.UIDMap = nil + } else { + + j.UIDMap = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJUIDMap idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJUIDMap type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJUIDMap) + if err != nil { + return fs.WrapErr(err) + } + } + + j.UIDMap = append(j.UIDMap, tmpJUIDMap) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GIDMap: + + /* handler: j.GIDMap type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.GIDMap = nil + } else { + + j.GIDMap = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJGIDMap idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJGIDMap type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJGIDMap) + if err != nil { + return fs.WrapErr(err) + } + } + + j.GIDMap = append(j.GIDMap, tmpJGIDMap) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + wantedvalue: return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) wrongtokenerror: diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index e74cddd7..1002296f 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -59,18 +59,21 @@ type ( } ) -// Archiver allows the reuse of most utility functions of this package -// with a pluggable Untar function. Also, to facilitate the passing of -// specific id mappings for untar, an archiver can be created with maps -// which will then be passed to Untar operations +// Archiver allows the reuse of most utility functions of this package with a +// pluggable Untar function. To facilitate the passing of specific id mappings +// for untar, an archiver can be created with maps which will then be passed to +// Untar operations. If ChownOpts is set, its values are mapped using +// UntarIDMappings before being used to create files and directories on disk. type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMappings *idtools.IDMappings + Untar func(io.Reader, string, *TarOptions) error + TarIDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + UntarIDMappings *idtools.IDMappings } // NewDefaultArchiver returns a new Archiver without any IDMappings func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}} + return &Archiver{Untar: Untar, TarIDMappings: &idtools.IDMappings{}, UntarIDMappings: &idtools.IDMappings{}} } // breakoutError is used to differentiate errors related to breaking out @@ -942,7 +945,7 @@ loop: } trBuf.Reset(tr) - if err := remapIDs(idMappings, hdr); err != nil { + if err := remapIDs(nil, idMappings, options.ChownOpts, hdr); err != nil { return err } @@ -1023,14 +1026,28 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + tarMappings := archiver.TarIDMappings + if tarMappings == nil { + tarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + Compression: Uncompressed, + } + archive, err := TarWithOptions(src, options) if err != nil { return err } defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMappings.UIDs(), - GIDMaps: archiver.IDMappings.GIDs(), + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options = &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, } return archiver.Untar(archive, dst, options) } @@ -1042,9 +1059,14 @@ func (archiver *Archiver) UntarPath(src, dst string) error { return err } defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } options := &TarOptions{ - UIDMaps: archiver.IDMappings.UIDs(), - GIDMaps: archiver.IDMappings.GIDs(), + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, } return archiver.Untar(archive, dst, options) } @@ -1065,7 +1087,10 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // if this archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner - rootIDs := archiver.IDMappings.RootPair() + rootIDs := archiver.UntarIDMappings.RootPair() + if archiver.ChownOpts != nil { + rootIDs = *archiver.ChownOpts + } // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { @@ -1116,7 +1141,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - if err := remapIDs(archiver.IDMappings, hdr); err != nil { + if err := remapIDs(archiver.TarIDMappings, archiver.UntarIDMappings, archiver.ChownOpts, hdr); err != nil { return err } @@ -1143,10 +1168,29 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { return err } -func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { - ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) +func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, hdr *tar.Header) (err error) { + var uid, gid int + if chownOpts != nil { + uid, gid = chownOpts.UID, chownOpts.GID + } else { + if readIDMappings != nil && !readIDMappings.Empty() { + uid, gid, err = readIDMappings.ToContainer(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + if err != nil { + return err + } + } else { + uid, gid = hdr.Uid, hdr.Gid + } + } + ids := idtools.IDPair{UID: uid, GID: gid} + if writeIDMappings != nil && !writeIDMappings.Empty() { + ids, err = writeIDMappings.ToHost(ids) + if err != nil { + return err + } + } hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err + return nil } // cmdStream executes a command, and returns its stdout as a stream. diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go new file mode 100644 index 00000000..6f0f4524 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go @@ -0,0 +1,2230 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: pkg/archive/archive.go + +package archive + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "github.com/containers/storage/pkg/idtools" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *Archiver) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Archiver) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"Untar":`) + /* Falling back. type=func(io.Reader, string, *archive.TarOptions) error kind=func */ + err = buf.Encode(j.Untar) + if err != nil { + return err + } + if j.TarIDMappings != nil { + /* Struct fall back. type=idtools.IDMappings kind=struct */ + buf.WriteString(`,"TarIDMappings":`) + err = buf.Encode(j.TarIDMappings) + if err != nil { + return err + } + } else { + buf.WriteString(`,"TarIDMappings":null`) + } + if j.ChownOpts != nil { + /* Struct fall back. type=idtools.IDPair kind=struct */ + buf.WriteString(`,"ChownOpts":`) + err = buf.Encode(j.ChownOpts) + if err != nil { + return err + } + } else { + buf.WriteString(`,"ChownOpts":null`) + } + if j.UntarIDMappings != nil { + /* Struct fall back. type=idtools.IDMappings kind=struct */ + buf.WriteString(`,"UntarIDMappings":`) + err = buf.Encode(j.UntarIDMappings) + if err != nil { + return err + } + } else { + buf.WriteString(`,"UntarIDMappings":null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffjtArchiverbase = iota + ffjtArchivernosuchkey + + ffjtArchiverUntar + + ffjtArchiverTarIDMappings + + ffjtArchiverChownOpts + + ffjtArchiverUntarIDMappings +) + +var ffjKeyArchiverUntar = []byte("Untar") + +var ffjKeyArchiverTarIDMappings = []byte("TarIDMappings") + +var ffjKeyArchiverChownOpts = []byte("ChownOpts") + +var ffjKeyArchiverUntarIDMappings = []byte("UntarIDMappings") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Archiver) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Archiver) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtArchiverbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtArchivernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'C': + + if bytes.Equal(ffjKeyArchiverChownOpts, kn) { + currentKey = ffjtArchiverChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'T': + + if bytes.Equal(ffjKeyArchiverTarIDMappings, kn) { + currentKey = ffjtArchiverTarIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'U': + + if bytes.Equal(ffjKeyArchiverUntar, kn) { + currentKey = ffjtArchiverUntar + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyArchiverUntarIDMappings, kn) { + currentKey = ffjtArchiverUntarIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyArchiverUntarIDMappings, kn) { + currentKey = ffjtArchiverUntarIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyArchiverChownOpts, kn) { + currentKey = ffjtArchiverChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyArchiverTarIDMappings, kn) { + currentKey = ffjtArchiverTarIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyArchiverUntar, kn) { + currentKey = ffjtArchiverUntar + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtArchivernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtArchiverUntar: + goto handle_Untar + + case ffjtArchiverTarIDMappings: + goto handle_TarIDMappings + + case ffjtArchiverChownOpts: + goto handle_ChownOpts + + case ffjtArchiverUntarIDMappings: + goto handle_UntarIDMappings + + case ffjtArchivernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Untar: + + /* handler: j.Untar type=func(io.Reader, string, *archive.TarOptions) error kind=func quoted=false*/ + + { + /* Falling back. type=func(io.Reader, string, *archive.TarOptions) error kind=func */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.Untar) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TarIDMappings: + + /* handler: j.TarIDMappings type=idtools.IDMappings kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMappings kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.TarIDMappings) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ChownOpts: + + /* handler: j.ChownOpts type=idtools.IDPair kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDPair kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.ChownOpts) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UntarIDMappings: + + /* handler: j.UntarIDMappings type=idtools.IDMappings kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMappings kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.UntarIDMappings) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *TarOptions) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"IncludeFiles":`) + if j.IncludeFiles != nil { + buf.WriteString(`[`) + for i, v := range j.IncludeFiles { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"ExcludePatterns":`) + if j.ExcludePatterns != nil { + buf.WriteString(`[`) + for i, v := range j.ExcludePatterns { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"Compression":`) + fflib.FormatBits2(buf, uint64(j.Compression), 10, j.Compression < 0) + if j.NoLchown { + buf.WriteString(`,"NoLchown":true`) + } else { + buf.WriteString(`,"NoLchown":false`) + } + buf.WriteString(`,"UIDMaps":`) + if j.UIDMaps != nil { + buf.WriteString(`[`) + for i, v := range j.UIDMaps { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"GIDMaps":`) + if j.GIDMaps != nil { + buf.WriteString(`[`) + for i, v := range j.GIDMaps { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + if j.ChownOpts != nil { + /* Struct fall back. type=idtools.IDPair kind=struct */ + buf.WriteString(`,"ChownOpts":`) + err = buf.Encode(j.ChownOpts) + if err != nil { + return err + } + } else { + buf.WriteString(`,"ChownOpts":null`) + } + if j.IncludeSourceDir { + buf.WriteString(`,"IncludeSourceDir":true`) + } else { + buf.WriteString(`,"IncludeSourceDir":false`) + } + buf.WriteString(`,"WhiteoutFormat":`) + fflib.FormatBits2(buf, uint64(j.WhiteoutFormat), 10, j.WhiteoutFormat < 0) + buf.WriteString(`,"WhiteoutData":`) + /* Interface types must use runtime reflection. type=interface {} kind=interface */ + err = buf.Encode(j.WhiteoutData) + if err != nil { + return err + } + if j.NoOverwriteDirNonDir { + buf.WriteString(`,"NoOverwriteDirNonDir":true`) + } else { + buf.WriteString(`,"NoOverwriteDirNonDir":false`) + } + if j.RebaseNames == nil { + buf.WriteString(`,"RebaseNames":null`) + } else { + buf.WriteString(`,"RebaseNames":{ `) + for key, value := range j.RebaseNames { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.WriteJsonString(buf, string(value)) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + if j.InUserNS { + buf.WriteString(`,"InUserNS":true`) + } else { + buf.WriteString(`,"InUserNS":false`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffjtTarOptionsbase = iota + ffjtTarOptionsnosuchkey + + ffjtTarOptionsIncludeFiles + + ffjtTarOptionsExcludePatterns + + ffjtTarOptionsCompression + + ffjtTarOptionsNoLchown + + ffjtTarOptionsUIDMaps + + ffjtTarOptionsGIDMaps + + ffjtTarOptionsChownOpts + + ffjtTarOptionsIncludeSourceDir + + ffjtTarOptionsWhiteoutFormat + + ffjtTarOptionsWhiteoutData + + ffjtTarOptionsNoOverwriteDirNonDir + + ffjtTarOptionsRebaseNames + + ffjtTarOptionsInUserNS +) + +var ffjKeyTarOptionsIncludeFiles = []byte("IncludeFiles") + +var ffjKeyTarOptionsExcludePatterns = []byte("ExcludePatterns") + +var ffjKeyTarOptionsCompression = []byte("Compression") + +var ffjKeyTarOptionsNoLchown = []byte("NoLchown") + +var ffjKeyTarOptionsUIDMaps = []byte("UIDMaps") + +var ffjKeyTarOptionsGIDMaps = []byte("GIDMaps") + +var ffjKeyTarOptionsChownOpts = []byte("ChownOpts") + +var ffjKeyTarOptionsIncludeSourceDir = []byte("IncludeSourceDir") + +var ffjKeyTarOptionsWhiteoutFormat = []byte("WhiteoutFormat") + +var ffjKeyTarOptionsWhiteoutData = []byte("WhiteoutData") + +var ffjKeyTarOptionsNoOverwriteDirNonDir = []byte("NoOverwriteDirNonDir") + +var ffjKeyTarOptionsRebaseNames = []byte("RebaseNames") + +var ffjKeyTarOptionsInUserNS = []byte("InUserNS") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *TarOptions) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *TarOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtTarOptionsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtTarOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'C': + + if bytes.Equal(ffjKeyTarOptionsCompression, kn) { + currentKey = ffjtTarOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsChownOpts, kn) { + currentKey = ffjtTarOptionsChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'E': + + if bytes.Equal(ffjKeyTarOptionsExcludePatterns, kn) { + currentKey = ffjtTarOptionsExcludePatterns + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'G': + + if bytes.Equal(ffjKeyTarOptionsGIDMaps, kn) { + currentKey = ffjtTarOptionsGIDMaps + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'I': + + if bytes.Equal(ffjKeyTarOptionsIncludeFiles, kn) { + currentKey = ffjtTarOptionsIncludeFiles + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsIncludeSourceDir, kn) { + currentKey = ffjtTarOptionsIncludeSourceDir + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsInUserNS, kn) { + currentKey = ffjtTarOptionsInUserNS + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'N': + + if bytes.Equal(ffjKeyTarOptionsNoLchown, kn) { + currentKey = ffjtTarOptionsNoLchown + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsNoOverwriteDirNonDir, kn) { + currentKey = ffjtTarOptionsNoOverwriteDirNonDir + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'R': + + if bytes.Equal(ffjKeyTarOptionsRebaseNames, kn) { + currentKey = ffjtTarOptionsRebaseNames + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'U': + + if bytes.Equal(ffjKeyTarOptionsUIDMaps, kn) { + currentKey = ffjtTarOptionsUIDMaps + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'W': + + if bytes.Equal(ffjKeyTarOptionsWhiteoutFormat, kn) { + currentKey = ffjtTarOptionsWhiteoutFormat + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsWhiteoutData, kn) { + currentKey = ffjtTarOptionsWhiteoutData + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsInUserNS, kn) { + currentKey = ffjtTarOptionsInUserNS + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsRebaseNames, kn) { + currentKey = ffjtTarOptionsRebaseNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyTarOptionsNoOverwriteDirNonDir, kn) { + currentKey = ffjtTarOptionsNoOverwriteDirNonDir + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyTarOptionsWhiteoutData, kn) { + currentKey = ffjtTarOptionsWhiteoutData + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyTarOptionsWhiteoutFormat, kn) { + currentKey = ffjtTarOptionsWhiteoutFormat + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsIncludeSourceDir, kn) { + currentKey = ffjtTarOptionsIncludeSourceDir + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsChownOpts, kn) { + currentKey = ffjtTarOptionsChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsGIDMaps, kn) { + currentKey = ffjtTarOptionsGIDMaps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsUIDMaps, kn) { + currentKey = ffjtTarOptionsUIDMaps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyTarOptionsNoLchown, kn) { + currentKey = ffjtTarOptionsNoLchown + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsCompression, kn) { + currentKey = ffjtTarOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsExcludePatterns, kn) { + currentKey = ffjtTarOptionsExcludePatterns + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsIncludeFiles, kn) { + currentKey = ffjtTarOptionsIncludeFiles + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtTarOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtTarOptionsIncludeFiles: + goto handle_IncludeFiles + + case ffjtTarOptionsExcludePatterns: + goto handle_ExcludePatterns + + case ffjtTarOptionsCompression: + goto handle_Compression + + case ffjtTarOptionsNoLchown: + goto handle_NoLchown + + case ffjtTarOptionsUIDMaps: + goto handle_UIDMaps + + case ffjtTarOptionsGIDMaps: + goto handle_GIDMaps + + case ffjtTarOptionsChownOpts: + goto handle_ChownOpts + + case ffjtTarOptionsIncludeSourceDir: + goto handle_IncludeSourceDir + + case ffjtTarOptionsWhiteoutFormat: + goto handle_WhiteoutFormat + + case ffjtTarOptionsWhiteoutData: + goto handle_WhiteoutData + + case ffjtTarOptionsNoOverwriteDirNonDir: + goto handle_NoOverwriteDirNonDir + + case ffjtTarOptionsRebaseNames: + goto handle_RebaseNames + + case ffjtTarOptionsInUserNS: + goto handle_InUserNS + + case ffjtTarOptionsnosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_IncludeFiles: + + /* handler: j.IncludeFiles type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.IncludeFiles = nil + } else { + + j.IncludeFiles = []string{} + + wantVal := true + + for { + + var tmpJIncludeFiles string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJIncludeFiles type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJIncludeFiles = string(string(outBuf)) + + } + } + + j.IncludeFiles = append(j.IncludeFiles, tmpJIncludeFiles) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ExcludePatterns: + + /* handler: j.ExcludePatterns type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.ExcludePatterns = nil + } else { + + j.ExcludePatterns = []string{} + + wantVal := true + + for { + + var tmpJExcludePatterns string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJExcludePatterns type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJExcludePatterns = string(string(outBuf)) + + } + } + + j.ExcludePatterns = append(j.ExcludePatterns, tmpJExcludePatterns) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Compression: + + /* handler: j.Compression type=archive.Compression kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.Compression = Compression(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NoLchown: + + /* handler: j.NoLchown type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.NoLchown = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.NoLchown = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UIDMaps: + + /* handler: j.UIDMaps type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.UIDMaps = nil + } else { + + j.UIDMaps = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJUIDMaps idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJUIDMaps type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJUIDMaps) + if err != nil { + return fs.WrapErr(err) + } + } + + j.UIDMaps = append(j.UIDMaps, tmpJUIDMaps) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GIDMaps: + + /* handler: j.GIDMaps type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.GIDMaps = nil + } else { + + j.GIDMaps = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJGIDMaps idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJGIDMaps type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJGIDMaps) + if err != nil { + return fs.WrapErr(err) + } + } + + j.GIDMaps = append(j.GIDMaps, tmpJGIDMaps) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ChownOpts: + + /* handler: j.ChownOpts type=idtools.IDPair kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDPair kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.ChownOpts) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IncludeSourceDir: + + /* handler: j.IncludeSourceDir type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.IncludeSourceDir = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.IncludeSourceDir = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WhiteoutFormat: + + /* handler: j.WhiteoutFormat type=archive.WhiteoutFormat kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for WhiteoutFormat", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.WhiteoutFormat = WhiteoutFormat(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WhiteoutData: + + /* handler: j.WhiteoutData type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.WhiteoutData) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NoOverwriteDirNonDir: + + /* handler: j.NoOverwriteDirNonDir type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.NoOverwriteDirNonDir = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.NoOverwriteDirNonDir = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RebaseNames: + + /* handler: j.RebaseNames type=map[string]string kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.RebaseNames = nil + } else { + + j.RebaseNames = make(map[string]string, 0) + + wantVal := true + + for { + + var k string + + var tmpJRebaseNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJRebaseNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJRebaseNames = string(string(outBuf)) + + } + } + + j.RebaseNames[k] = tmpJRebaseNames + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InUserNS: + + /* handler: j.InUserNS type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.InUserNS = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.InUserNS = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *TempArchive) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *TempArchive) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"Size":`) + fflib.FormatBits2(buf, uint64(j.Size), 10, j.Size < 0) + buf.WriteByte('}') + return nil +} + +const ( + ffjtTempArchivebase = iota + ffjtTempArchivenosuchkey + + ffjtTempArchiveSize +) + +var ffjKeyTempArchiveSize = []byte("Size") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *TempArchive) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *TempArchive) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtTempArchivebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtTempArchivenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'S': + + if bytes.Equal(ffjKeyTempArchiveSize, kn) { + currentKey = ffjtTempArchiveSize + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyTempArchiveSize, kn) { + currentKey = ffjtTempArchiveSize + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtTempArchivenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtTempArchiveSize: + goto handle_Size + + case ffjtTempArchivenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Size: + + /* handler: j.Size type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.Size = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *tarAppender) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *tarAppender) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + if j.TarWriter != nil { + /* Struct fall back. type=tar.Writer kind=struct */ + buf.WriteString(`{"TarWriter":`) + err = buf.Encode(j.TarWriter) + if err != nil { + return err + } + } else { + buf.WriteString(`{"TarWriter":null`) + } + if j.Buffer != nil { + /* Struct fall back. type=bufio.Writer kind=struct */ + buf.WriteString(`,"Buffer":`) + err = buf.Encode(j.Buffer) + if err != nil { + return err + } + } else { + buf.WriteString(`,"Buffer":null`) + } + /* Falling back. type=map[uint64]string kind=map */ + buf.WriteString(`,"SeenFiles":`) + err = buf.Encode(j.SeenFiles) + if err != nil { + return err + } + if j.IDMappings != nil { + /* Struct fall back. type=idtools.IDMappings kind=struct */ + buf.WriteString(`,"IDMappings":`) + err = buf.Encode(j.IDMappings) + if err != nil { + return err + } + } else { + buf.WriteString(`,"IDMappings":null`) + } + if j.ChownOpts != nil { + /* Struct fall back. type=idtools.IDPair kind=struct */ + buf.WriteString(`,"ChownOpts":`) + err = buf.Encode(j.ChownOpts) + if err != nil { + return err + } + } else { + buf.WriteString(`,"ChownOpts":null`) + } + buf.WriteString(`,"WhiteoutConverter":`) + /* Interface types must use runtime reflection. type=archive.tarWhiteoutConverter kind=interface */ + err = buf.Encode(j.WhiteoutConverter) + if err != nil { + return err + } + buf.WriteByte('}') + return nil +} + +const ( + ffjttarAppenderbase = iota + ffjttarAppendernosuchkey + + ffjttarAppenderTarWriter + + ffjttarAppenderBuffer + + ffjttarAppenderSeenFiles + + ffjttarAppenderIDMappings + + ffjttarAppenderChownOpts + + ffjttarAppenderWhiteoutConverter +) + +var ffjKeytarAppenderTarWriter = []byte("TarWriter") + +var ffjKeytarAppenderBuffer = []byte("Buffer") + +var ffjKeytarAppenderSeenFiles = []byte("SeenFiles") + +var ffjKeytarAppenderIDMappings = []byte("IDMappings") + +var ffjKeytarAppenderChownOpts = []byte("ChownOpts") + +var ffjKeytarAppenderWhiteoutConverter = []byte("WhiteoutConverter") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *tarAppender) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *tarAppender) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjttarAppenderbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjttarAppendernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'B': + + if bytes.Equal(ffjKeytarAppenderBuffer, kn) { + currentKey = ffjttarAppenderBuffer + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'C': + + if bytes.Equal(ffjKeytarAppenderChownOpts, kn) { + currentKey = ffjttarAppenderChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'I': + + if bytes.Equal(ffjKeytarAppenderIDMappings, kn) { + currentKey = ffjttarAppenderIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'S': + + if bytes.Equal(ffjKeytarAppenderSeenFiles, kn) { + currentKey = ffjttarAppenderSeenFiles + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'T': + + if bytes.Equal(ffjKeytarAppenderTarWriter, kn) { + currentKey = ffjttarAppenderTarWriter + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'W': + + if bytes.Equal(ffjKeytarAppenderWhiteoutConverter, kn) { + currentKey = ffjttarAppenderWhiteoutConverter + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffjKeytarAppenderWhiteoutConverter, kn) { + currentKey = ffjttarAppenderWhiteoutConverter + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeytarAppenderChownOpts, kn) { + currentKey = ffjttarAppenderChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeytarAppenderIDMappings, kn) { + currentKey = ffjttarAppenderIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeytarAppenderSeenFiles, kn) { + currentKey = ffjttarAppenderSeenFiles + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeytarAppenderBuffer, kn) { + currentKey = ffjttarAppenderBuffer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeytarAppenderTarWriter, kn) { + currentKey = ffjttarAppenderTarWriter + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjttarAppendernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjttarAppenderTarWriter: + goto handle_TarWriter + + case ffjttarAppenderBuffer: + goto handle_Buffer + + case ffjttarAppenderSeenFiles: + goto handle_SeenFiles + + case ffjttarAppenderIDMappings: + goto handle_IDMappings + + case ffjttarAppenderChownOpts: + goto handle_ChownOpts + + case ffjttarAppenderWhiteoutConverter: + goto handle_WhiteoutConverter + + case ffjttarAppendernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_TarWriter: + + /* handler: j.TarWriter type=tar.Writer kind=struct quoted=false*/ + + { + /* Falling back. type=tar.Writer kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.TarWriter) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Buffer: + + /* handler: j.Buffer type=bufio.Writer kind=struct quoted=false*/ + + { + /* Falling back. type=bufio.Writer kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.Buffer) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_SeenFiles: + + /* handler: j.SeenFiles type=map[uint64]string kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.SeenFiles = nil + } else { + + j.SeenFiles = make(map[uint64]string, 0) + + wantVal := true + + for { + + var k uint64 + + var tmpJSeenFiles string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + k = uint64(tval) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJSeenFiles type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJSeenFiles = string(string(outBuf)) + + } + } + + j.SeenFiles[k] = tmpJSeenFiles + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IDMappings: + + /* handler: j.IDMappings type=idtools.IDMappings kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMappings kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.IDMappings) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ChownOpts: + + /* handler: j.ChownOpts type=idtools.IDPair kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDPair kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.ChownOpts) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WhiteoutConverter: + + /* handler: j.WhiteoutConverter type=archive.tarWhiteoutConverter kind=interface quoted=false*/ + + { + /* Falling back. type=archive.tarWhiteoutConverter kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.WhiteoutConverter) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index 8ab6b83f..d3d6c8f7 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -257,6 +257,7 @@ func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whit // FileInfo describes the information of a file. type FileInfo struct { parent *FileInfo + idMappings *idtools.IDMappings name string stat *system.StatT children map[string]*FileInfo @@ -329,7 +330,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { // be visible when actually comparing the stat fields. The only time this // breaks down is if some code intentionally hides a change by setting // back mtime - if statDifferent(oldStat, newStat) || + if statDifferent(oldStat, oldInfo, newStat, info) || !bytes.Equal(oldChild.capability, newChild.capability) { change := Change{ Path: newChild.path(), @@ -379,18 +380,19 @@ func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { return changes } -func newRootFileInfo() *FileInfo { +func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { // As this runs on the daemon side, file paths are OS specific. root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), + name: string(os.PathSeparator), + idMappings: idMappings, + children: make(map[string]*FileInfo), } return root } // ChangesDirs compares two directories and generates an array of Change objects describing the changes. // If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { +func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { var ( oldRoot, newRoot *FileInfo ) @@ -402,7 +404,7 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) { defer os.Remove(emptyDir) oldDir = emptyDir } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir, oldMappings, newMappings) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go index 8a82e857..b1230936 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -9,6 +9,7 @@ import ( "syscall" "unsafe" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "golang.org/x/sys/unix" ) @@ -22,10 +23,12 @@ import ( // directly. Eliminating stat calls in this way can save up to seconds on large // images. type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo + idmap1 *idtools.IDMappings + idmap2 *idtools.IDMappings } // collectFileInfoForChanges returns a complete representation of the trees @@ -34,12 +37,12 @@ type walker struct { // and dir2 will be pruned from the results. This method is *only* to be used // to generating a list of changes between the two directories, as it does not // reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { +func collectFileInfoForChanges(dir1, dir2 string, idmap1, idmap2 *idtools.IDMappings) (*FileInfo, *FileInfo, error) { w := &walker{ dir1: dir1, dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), + root1: newRootFileInfo(idmap1), + root2: newRootFileInfo(idmap2), } i1, err := os.Lstat(w.dir1) @@ -69,9 +72,10 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) } info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: root.idMappings, } cpath := filepath.Join(dir, path) stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index e1d1e7a9..bbbd8c9d 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -9,21 +9,22 @@ import ( "runtime" "strings" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" ) -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { +func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtools.IDMappings) (*FileInfo, *FileInfo, error) { var ( oldRoot, newRoot *FileInfo err1, err2 error errs = make(chan error, 2) ) go func() { - oldRoot, err1 = collectFileInfo(oldDir) + oldRoot, err1 = collectFileInfo(oldDir, oldIDMap) errs <- err1 }() go func() { - newRoot, err2 = collectFileInfo(newDir) + newRoot, err2 = collectFileInfo(newDir, newIDMap) errs <- err2 }() @@ -37,8 +38,8 @@ func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, err return oldRoot, newRoot, nil } -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() +func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { + root := newRootFileInfo(idMappings) err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { if err != nil { @@ -73,9 +74,10 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) { } info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: idMappings, } s, err := system.Lstat(path) diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go index d669c01b..031ec341 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -6,15 +6,26 @@ import ( "os" "syscall" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "golang.org/x/sys/unix" ) -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { // Don't look at size for dirs, its not a good measure of change + oldUid, oldGid := oldStat.UID(), oldStat.GID() + uid, gid := newStat.UID(), newStat.GID() + if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil { + uid = uint32(cuid) + gid = uint32(cgid) + if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUid), GID: int(oldGid)}); err == nil { + oldUid = uint32(oldcuid) + oldGid = uint32(oldcgid) + } + } + ownerChanged := uid != oldUid || gid != oldGid if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || + ownerChanged || oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go index 5ad3d7e3..966400e5 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -6,7 +6,7 @@ import ( "github.com/containers/storage/pkg/system" ) -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { // Don't look at size for dirs, its not a good measure of change if oldStat.Mtim() != newStat.Mtim() || diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index f93f4cb1..eb1f2b4c 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -192,7 +192,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, srcData = tmpFile } - if err := remapIDs(idMappings, srcHdr); err != nil { + if err := remapIDs(nil, idMappings, options.ChownOpts, srcHdr); err != nil { return 0, err } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index 2735f140..b9fa228e 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -16,7 +16,18 @@ func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { if idMappings == nil { idMappings = &idtools.IDMappings{} } - return &archive.Archiver{Untar: Untar, IDMappings: idMappings} + return &archive.Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings} +} + +// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver { + if tarIDMappings == nil { + tarIDMappings = &idtools.IDMappings{} + } + if untarIDMappings == nil { + untarIDMappings = &idtools.IDMappings{} + } + return &archive.Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings} } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index c7de67ec..407dda87 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strconv" "strings" "sync" "time" @@ -136,9 +137,8 @@ type StoreOptions struct { GraphDriverName string `json:"driver,omitempty"` // GraphDriverOptions are driver-specific options. GraphDriverOptions []string `json:"driver-options,omitempty"` - // UIDMap and GIDMap are used mainly for deciding on the ownership of - // files in layers as they're stored on disk, which is often necessary - // when user namespaces are being used. + // UIDMap and GIDMap are used for setting up a container's root filesystem + // for use inside of a user namespace where UID mapping is being used. UIDMap []idtools.IDMap `json:"uidmap,omitempty"` GIDMap []idtools.IDMap `json:"gidmap,omitempty"` } @@ -152,6 +152,8 @@ type Store interface { GraphRoot() string GraphDriverName() string GraphOptions() []string + UIDMap() []idtools.IDMap + GIDMap() []idtools.IDMap // GraphDriver obtains and returns a handle to the graph Driver object used // by the Store. @@ -161,7 +163,7 @@ type Store interface { // optionally having the specified ID (one will be assigned if none is // specified), with the specified layer (or no layer) as its parent, // and with optional names. (The writeable flag is ignored.) - CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) + CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) // PutLayer combines the functions of CreateLayer and ApplyDiff, // marking the layer for automatic removal if applying the diff fails @@ -174,7 +176,7 @@ type Store interface { // if reexec.Init { // return // } - PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error) + PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) // CreateImage creates a new image, optionally with the specified ID // (one will be assigned if none is specified), with optional names, @@ -303,6 +305,11 @@ type Store interface { // if we don't have a value on hand. LayerSize(id string) (int64, error) + // LayerParentOwners returns the UIDs and GIDs of owners of parents of + // the layer's mountpoint for which the layer's UID and GID maps (if + // any are defined) don't contain corresponding IDs. + LayerParentOwners(id string) ([]int, []int, error) + // Layers returns a list of the currently known layers. Layers() ([]Layer, error) @@ -413,6 +420,11 @@ type Store interface { // directory. FromContainerRunDirectory(id, file string) ([]byte, error) + // ContainerParentOwners returns the UIDs and GIDs of owners of parents + // of the container's layer's mountpoint for which the layer's UID and + // GID maps (if any are defined) don't contain corresponding IDs. + ContainerParentOwners(id string) ([]int, []int, error) + // Lookup returns the ID of a layer, image, or container with the specified // name or ID. Lookup(name string) (string, error) @@ -429,6 +441,33 @@ type Store interface { Version() ([][2]string, error) } +// IDMappingOptions are used for specifying how ID mapping should be set up for +// a layer or container. +type IDMappingOptions struct { + // UIDMap and GIDMap are used for setting up a layer's root filesystem + // for use inside of a user namespace where ID mapping is being used. + // If HostUIDMapping/HostGIDMapping is true, no mapping of the + // respective type will be used. Otherwise, if UIDMap and/or GIDMap + // contain at least one mapping, one or both will be used. By default, + // if neither of those conditions apply, if the layer has a parent + // layer, the parent layer's mapping will be used, and if it does not + // have a parent layer, the mapping which was passed to the Store + // object when it was initialized will be used. + HostUIDMapping bool + HostGIDMapping bool + UIDMap []idtools.IDMap + GIDMap []idtools.IDMap +} + +// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods. +type LayerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this layer. If nothing is specified, the layer will + // inherit settings from its parent layer or, if it has no parent + // layer, the Store object. + IDMappingOptions +} + // ImageOptions is used for passing options to a Store's CreateImage() method. type ImageOptions struct { // CreationDate, if not zero, will override the default behavior of marking the image as having been @@ -440,6 +479,11 @@ type ImageOptions struct { // ContainerOptions is used for passing options to a Store's CreateContainer() method. type ContainerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this container's layer. If nothing is specified, the + // container's layer will inherit settings from the image's top layer + // or, if it is not being created based on an image, the Store object. + IDMappingOptions } type store struct { @@ -558,6 +602,14 @@ func (s *store) GraphOptions() []string { return s.graphOptions } +func (s *store) UIDMap() []idtools.IDMap { + return copyIDMap(s.uidMap) +} + +func (s *store) GIDMap() []idtools.IDMap { + return copyIDMap(s.gidMap) +} + func (s *store) load() error { driver, err := s.GraphDriver() if err != nil { @@ -662,7 +714,7 @@ func (s *store) LayerStore() (LayerStore, error) { if err := os.MkdirAll(glpath, 0700); err != nil { return nil, err } - rls, err := newLayerStore(rlpath, glpath, driver) + rls, err := newLayerStore(rlpath, glpath, driver, s.uidMap, s.gidMap) if err != nil { return nil, err } @@ -742,7 +794,8 @@ func (s *store) ContainerStore() (ContainerStore, error) { return nil, ErrLoadError } -func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error) { +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) { + var parentLayer *Layer rlstore, err := s.LayerStore() if err != nil { return nil, -1, err @@ -768,9 +821,27 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w if id == "" { id = stringid.GenerateRandomID() } + if options == nil { + options = &LayerOptions{} + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } + uidMap := options.UIDMap + gidMap := options.GIDMap if parent != "" { var ilayer *Layer for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) { + if lstore != rlstore { + lstore.Lock() + defer lstore.Unlock() + if modified, err := lstore.Modified(); modified || err != nil { + lstore.Load() + } + } if l, err := lstore.Get(parent); err == nil && l != nil { ilayer = l parent = ilayer.ID @@ -780,6 +851,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w if ilayer == nil { return nil, -1, ErrLayerUnknown } + parentLayer = ilayer containers, err := rcstore.Containers() if err != nil { return nil, -1, err @@ -789,12 +861,33 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w return nil, -1, ErrParentIsContainer } } + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } else { + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } } - return rlstore.Put(id, parent, names, mountLabel, nil, writeable, nil, diff) + layerOptions := &LayerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + }, + } + return rlstore.Put(id, parentLayer, names, mountLabel, nil, layerOptions, writeable, nil, diff) } -func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) { - layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, nil) +func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { + layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, options, nil) return layer, err } @@ -849,6 +942,15 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o } func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { + if options == nil { + options = &ContainerOptions{} + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } rlstore, err := s.LayerStore() if err != nil { return nil, err @@ -862,8 +964,10 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat id = stringid.GenerateRandomID() } - imageTopLayer := "" + var imageTopLayer *Layer imageID := "" + uidMap := options.UIDMap + gidMap := options.GIDMap if image != "" { istore, err := s.ImageStore() if err != nil { @@ -888,10 +992,53 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if cimage == nil { return nil, ErrImageUnknown } - imageTopLayer = cimage.TopLayer imageID = cimage.ID + + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + var ilayer *Layer + for _, store := range append([]ROLayerStore{rlstore}, lstores...) { + if store != rlstore { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + } + ilayer, err = store.Get(cimage.TopLayer) + if err == nil { + break + } + } + if ilayer == nil { + return nil, ErrLayerUnknown + } + imageTopLayer = ilayer + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } else { + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } } - clayer, err := rlstore.Create(layer, imageTopLayer, nil, "", nil, true) + layerOptions := &LayerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + }, + } + clayer, err := rlstore.Create(layer, imageTopLayer, nil, "", nil, layerOptions, true) if err != nil { return nil, err } @@ -905,7 +1052,15 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - container, err := rcstore.Create(id, names, imageID, layer, metadata) + options = &ContainerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: len(clayer.UIDMap) == 0, + HostGIDMapping: len(clayer.GIDMap) == 0, + UIDMap: copyIDMap(clayer.UIDMap), + GIDMap: copyIDMap(clayer.GIDMap), + }, + } + container, err := rcstore.Create(id, names, imageID, layer, metadata, options) if err != nil || container == nil { rlstore.Delete(layer) } @@ -1944,6 +2099,51 @@ func (s *store) LayerSize(id string) (int64, error) { return -1, ErrLayerUnknown } +func (s *store) LayerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + if rlstore.Exists(id) { + return rlstore.ParentOwners(id) + } + return nil, nil, ErrLayerUnknown +} + +func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + container, err := rcstore.Get(id) + if err != nil { + return nil, nil, err + } + if rlstore.Exists(container.LayerID) { + return rlstore.ParentOwners(container.LayerID) + } + return nil, nil, ErrLayerUnknown +} + func (s *store) Layers() ([]Layer, error) { var layers []Layer lstore, err := s.LayerStore() @@ -2399,6 +2599,18 @@ type OptionsConfig struct { // OverrideKernelCheck OverrideKernelCheck string `toml:"override_kernel_check"` + + // RemapUIDs is a list of default UID mappings to use for layers. + RemapUIDs string `toml:"remap-uids"` + // RemapGIDs is a list of default GID mappings to use for layers. + RemapGIDs string `toml:"remap-gids"` + + // RemapUser is the name of one or more entries in /etc/subuid which + // should be used to set up default UID mappings. + RemapUser string `toml:"remap-user"` + // RemapGroup is the name of one or more entries in /etc/subgid which + // should be used to set up default GID mappings. + RemapGroup string `toml:"remap-group"` } // TOML-friendly explicit tables used for conversions. @@ -2448,6 +2660,71 @@ func init() { if config.Storage.Options.OverrideKernelCheck != "" { DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck)) } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { + config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser + } + if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" { + config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" { + mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup) + if err != nil { + fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) + return + } + DefaultStoreOptions.UIDMap = mappings.UIDs() + DefaultStoreOptions.GIDMap = mappings.GIDs() + } + nonDigitsToWhitespace := func(r rune) rune { + if strings.IndexRune("0123456789", r) == -1 { + return ' ' + } else { + return r + } + } + parseTriple := func(spec []string) (container, host, size uint32, err error) { + cid, err := strconv.ParseUint(spec[0], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) + } + hid, err := strconv.ParseUint(spec[1], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) + } + sz, err := strconv.ParseUint(spec[2], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) + } + return uint32(cid), uint32(hid), uint32(sz), nil + } + parseIDMap := func(idMapSpec, mapSetting string) (idmap []idtools.IDMap) { + if len(idMapSpec) > 0 { + idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec)) + if len(idSpec)%3 != 0 { + fmt.Printf("Error initializing ID mappings: %s setting is malformed.\n", mapSetting) + return nil + } + for i := range idSpec { + if i%3 != 0 { + continue + } + cid, hid, size, err := parseTriple(idSpec[i : i+3]) + if err != nil { + fmt.Printf("Error initializing ID mappings: %s setting is malformed.\n", mapSetting) + return nil + } + mapping := idtools.IDMap{ + ContainerID: int(cid), + HostID: int(hid), + Size: int(size), + } + idmap = append(idmap, mapping) + } + } + return idmap + } + DefaultStoreOptions.UIDMap = append(DefaultStoreOptions.UIDMap, parseIDMap(config.Storage.Options.RemapUIDs, "remap-uids")...) + DefaultStoreOptions.GIDMap = append(DefaultStoreOptions.GIDMap, parseIDMap(config.Storage.Options.RemapGIDs, "remap-gids")...) if os.Getenv("STORAGE_DRIVER") != "" { DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") } diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c new file mode 100644 index 00000000..f84d61ee --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c @@ -0,0 +1,228 @@ +#include "osxkeychain_darwin.h" +#include +#include +#include +#include + +char *get_error(OSStatus status) { + char *buf = malloc(128); + CFStringRef str = SecCopyErrorMessageString(status, NULL); + int success = CFStringGetCString(str, buf, 128, kCFStringEncodingUTF8); + if (!success) { + strncpy(buf, "Unknown error", 128); + } + return buf; +} + +char *keychain_add(struct Server *server, char *label, char *username, char *secret) { + SecKeychainItemRef item; + + OSStatus status = SecKeychainAddInternetPassword( + NULL, + strlen(server->host), server->host, + 0, NULL, + strlen(username), username, + strlen(server->path), server->path, + server->port, + server->proto, + kSecAuthenticationTypeDefault, + strlen(secret), secret, + &item + ); + + if (status) { + return get_error(status); + } + + SecKeychainAttribute attribute; + SecKeychainAttributeList attrs; + attribute.tag = kSecLabelItemAttr; + attribute.data = label; + attribute.length = strlen(label); + attrs.count = 1; + attrs.attr = &attribute; + + status = SecKeychainItemModifyContent(item, &attrs, 0, NULL); + + if (status) { + return get_error(status); + } + + return NULL; +} + +char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret) { + char *tmp; + SecKeychainItemRef item; + + OSStatus status = SecKeychainFindInternetPassword( + NULL, + strlen(server->host), server->host, + 0, NULL, + 0, NULL, + strlen(server->path), server->path, + server->port, + server->proto, + kSecAuthenticationTypeDefault, + secret_l, (void **)&tmp, + &item); + + if (status) { + return get_error(status); + } + + *secret = strdup(tmp); + SecKeychainItemFreeContent(NULL, tmp); + + SecKeychainAttributeList list; + SecKeychainAttribute attr; + + list.count = 1; + list.attr = &attr; + attr.tag = kSecAccountItemAttr; + + status = SecKeychainItemCopyContent(item, NULL, &list, NULL, NULL); + if (status) { + return get_error(status); + } + + *username = strdup(attr.data); + *username_l = attr.length; + SecKeychainItemFreeContent(&list, NULL); + + return NULL; +} + +char *keychain_delete(struct Server *server) { + SecKeychainItemRef item; + + OSStatus status = SecKeychainFindInternetPassword( + NULL, + strlen(server->host), server->host, + 0, NULL, + 0, NULL, + strlen(server->path), server->path, + server->port, + server->proto, + kSecAuthenticationTypeDefault, + 0, NULL, + &item); + + if (status) { + return get_error(status); + } + + status = SecKeychainItemDelete(item); + if (status) { + return get_error(status); + } + return NULL; +} + +char * CFStringToCharArr(CFStringRef aString) { + if (aString == NULL) { + return NULL; + } + CFIndex length = CFStringGetLength(aString); + CFIndex maxSize = + CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1; + char *buffer = (char *)malloc(maxSize); + if (CFStringGetCString(aString, buffer, maxSize, + kCFStringEncodingUTF8)) { + return buffer; + } + return NULL; +} + +char *keychain_list(char *credsLabel, char *** paths, char *** accts, unsigned int *list_l) { + CFStringRef credsLabelCF = CFStringCreateWithCString(NULL, credsLabel, kCFStringEncodingUTF8); + CFMutableDictionaryRef query = CFDictionaryCreateMutable (NULL, 1, NULL, NULL); + CFDictionaryAddValue(query, kSecClass, kSecClassInternetPassword); + CFDictionaryAddValue(query, kSecReturnAttributes, kCFBooleanTrue); + CFDictionaryAddValue(query, kSecMatchLimit, kSecMatchLimitAll); + CFDictionaryAddValue(query, kSecAttrLabel, credsLabelCF); + //Use this query dictionary + CFTypeRef result= NULL; + OSStatus status = SecItemCopyMatching( + query, + &result); + + CFRelease(credsLabelCF); + + //Ran a search and store the results in result + if (status) { + return get_error(status); + } + CFIndex numKeys = CFArrayGetCount(result); + *paths = (char **) malloc((int)sizeof(char *)*numKeys); + *accts = (char **) malloc((int)sizeof(char *)*numKeys); + //result is of type CFArray + for(CFIndex i=0; i +*/ +import "C" +import ( + "errors" + "net/url" + "strconv" + "strings" + "unsafe" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// errCredentialsNotFound is the specific error message returned by OS X +// when the credentials are not in the keychain. +const errCredentialsNotFound = "The specified item could not be found in the keychain." + +// Osxkeychain handles secrets using the OS X Keychain as store. +type Osxkeychain struct{} + +// Add adds new credentials to the keychain. +func (h Osxkeychain) Add(creds *credentials.Credentials) error { + h.Delete(creds.ServerURL) + + s, err := splitServer(creds.ServerURL) + if err != nil { + return err + } + defer freeServer(s) + + label := C.CString(credentials.CredsLabel) + defer C.free(unsafe.Pointer(label)) + username := C.CString(creds.Username) + defer C.free(unsafe.Pointer(username)) + secret := C.CString(creds.Secret) + defer C.free(unsafe.Pointer(secret)) + + errMsg := C.keychain_add(s, label, username, secret) + if errMsg != nil { + defer C.free(unsafe.Pointer(errMsg)) + return errors.New(C.GoString(errMsg)) + } + + return nil +} + +// Delete removes credentials from the keychain. +func (h Osxkeychain) Delete(serverURL string) error { + s, err := splitServer(serverURL) + if err != nil { + return err + } + defer freeServer(s) + + errMsg := C.keychain_delete(s) + if errMsg != nil { + defer C.free(unsafe.Pointer(errMsg)) + return errors.New(C.GoString(errMsg)) + } + + return nil +} + +// Get returns the username and secret to use for a given registry server URL. +func (h Osxkeychain) Get(serverURL string) (string, string, error) { + s, err := splitServer(serverURL) + if err != nil { + return "", "", err + } + defer freeServer(s) + + var usernameLen C.uint + var username *C.char + var secretLen C.uint + var secret *C.char + defer C.free(unsafe.Pointer(username)) + defer C.free(unsafe.Pointer(secret)) + + errMsg := C.keychain_get(s, &usernameLen, &username, &secretLen, &secret) + if errMsg != nil { + defer C.free(unsafe.Pointer(errMsg)) + goMsg := C.GoString(errMsg) + if goMsg == errCredentialsNotFound { + return "", "", credentials.NewErrCredentialsNotFound() + } + + return "", "", errors.New(goMsg) + } + + user := C.GoStringN(username, C.int(usernameLen)) + pass := C.GoStringN(secret, C.int(secretLen)) + return user, pass, nil +} + +// List returns the stored URLs and corresponding usernames. +func (h Osxkeychain) List() (map[string]string, error) { + credsLabelC := C.CString(credentials.CredsLabel) + defer C.free(unsafe.Pointer(credsLabelC)) + + var pathsC **C.char + defer C.free(unsafe.Pointer(pathsC)) + var acctsC **C.char + defer C.free(unsafe.Pointer(acctsC)) + var listLenC C.uint + errMsg := C.keychain_list(credsLabelC, &pathsC, &acctsC, &listLenC) + if errMsg != nil { + defer C.free(unsafe.Pointer(errMsg)) + goMsg := C.GoString(errMsg) + return nil, errors.New(goMsg) + } + + defer C.freeListData(&pathsC, listLenC) + defer C.freeListData(&acctsC, listLenC) + + var listLen int + listLen = int(listLenC) + pathTmp := (*[1 << 30]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen] + acctTmp := (*[1 << 30]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen] + //taking the array of c strings into go while ignoring all the stuff irrelevant to credentials-helper + resp := make(map[string]string) + for i := 0; i < listLen; i++ { + if C.GoString(pathTmp[i]) == "0" { + continue + } + resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i]) + } + return resp, nil +} + +func splitServer(serverURL string) (*C.struct_Server, error) { + u, err := parseURL(serverURL) + if err != nil { + return nil, err + } + + proto := C.kSecProtocolTypeHTTPS + if u.Scheme == "http" { + proto = C.kSecProtocolTypeHTTP + } + var port int + p := getPort(u) + if p != "" { + port, err = strconv.Atoi(p) + if err != nil { + return nil, err + } + } + + return &C.struct_Server{ + proto: C.SecProtocolType(proto), + host: C.CString(getHostname(u)), + port: C.uint(port), + path: C.CString(u.Path), + }, nil +} + +func freeServer(s *C.struct_Server) { + C.free(unsafe.Pointer(s.host)) + C.free(unsafe.Pointer(s.path)) +} + +// parseURL parses and validates a given serverURL to an url.URL, and +// returns an error if validation failed. Querystring parameters are +// omitted in the resulting URL, because they are not used in the helper. +// +// If serverURL does not have a valid scheme, `//` is used as scheme +// before parsing. This prevents the hostname being used as path, +// and the credentials being stored without host. +func parseURL(serverURL string) (*url.URL, error) { + // Check if serverURL has a scheme, otherwise add `//` as scheme. + if !strings.Contains(serverURL, "://") && !strings.HasPrefix(serverURL, "//") { + serverURL = "//" + serverURL + } + + u, err := url.Parse(serverURL) + if err != nil { + return nil, err + } + + if u.Scheme != "" && u.Scheme != "https" && u.Scheme != "http" { + return nil, errors.New("unsupported scheme: " + u.Scheme) + } + if getHostname(u) == "" { + return nil, errors.New("no hostname in URL") + } + + u.RawQuery = "" + return u, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h new file mode 100644 index 00000000..c54e7d72 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h @@ -0,0 +1,14 @@ +#include + +struct Server { + SecProtocolType proto; + char *host; + char *path; + unsigned int port; +}; + +char *keychain_add(struct Server *server, char *label, char *username, char *secret); +char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret); +char *keychain_delete(struct Server *server); +char *keychain_list(char *credsLabel, char *** data, char *** accts, unsigned int *list_l); +void freeListData(char *** data, unsigned int length); \ No newline at end of file diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go new file mode 100644 index 00000000..0b7297d2 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go @@ -0,0 +1,13 @@ +//+build go1.8 + +package osxkeychain + +import "net/url" + +func getHostname(u *url.URL) string { + return u.Hostname() +} + +func getPort(u *url.URL) string { + return u.Port() +} diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go new file mode 100644 index 00000000..bdf9b7b0 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go @@ -0,0 +1,41 @@ +//+build !go1.8 + +package osxkeychain + +import ( + "net/url" + "strings" +) + +func getHostname(u *url.URL) string { + return stripPort(u.Host) +} + +func getPort(u *url.URL) string { + return portOnly(u.Host) +} + +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c new file mode 100644 index 00000000..35dea92d --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c @@ -0,0 +1,162 @@ +#include +#include +#include "secretservice_linux.h" + +const SecretSchema *docker_get_schema(void) +{ + static const SecretSchema docker_schema = { + "io.docker.Credentials", SECRET_SCHEMA_NONE, + { + { "label", SECRET_SCHEMA_ATTRIBUTE_STRING }, + { "server", SECRET_SCHEMA_ATTRIBUTE_STRING }, + { "username", SECRET_SCHEMA_ATTRIBUTE_STRING }, + { "docker_cli", SECRET_SCHEMA_ATTRIBUTE_STRING }, + { "NULL", 0 }, + } + }; + return &docker_schema; +} + +GError *add(char *label, char *server, char *username, char *secret) { + GError *err = NULL; + + secret_password_store_sync (DOCKER_SCHEMA, SECRET_COLLECTION_DEFAULT, + server, secret, NULL, &err, + "label", label, + "server", server, + "username", username, + "docker_cli", "1", + NULL); + return err; +} + +GError *delete(char *server) { + GError *err = NULL; + + secret_password_clear_sync(DOCKER_SCHEMA, NULL, &err, + "server", server, + "docker_cli", "1", + NULL); + if (err != NULL) + return err; + return NULL; +} + +char *get_attribute(const char *attribute, SecretItem *item) { + GHashTable *attributes; + GHashTableIter iter; + gchar *value, *key; + + attributes = secret_item_get_attributes(item); + g_hash_table_iter_init(&iter, attributes); + while (g_hash_table_iter_next(&iter, (void **)&key, (void **)&value)) { + if (strncmp(key, attribute, strlen(key)) == 0) + return (char *)value; + } + g_hash_table_unref(attributes); + return NULL; +} + +GError *get(char *server, char **username, char **secret) { + GError *err = NULL; + GHashTable *attributes; + SecretService *service; + GList *items, *l; + SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK; + SecretValue *secretValue; + gsize length; + gchar *value; + + attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free); + g_hash_table_insert(attributes, g_strdup("server"), g_strdup(server)); + g_hash_table_insert(attributes, g_strdup("docker_cli"), g_strdup("1")); + + service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err); + if (err == NULL) { + items = secret_service_search_sync(service, DOCKER_SCHEMA, attributes, flags, NULL, &err); + if (err == NULL) { + for (l = items; l != NULL; l = g_list_next(l)) { + value = secret_item_get_schema_name(l->data); + if (strncmp(value, "io.docker.Credentials", strlen(value)) != 0) { + g_free(value); + continue; + } + g_free(value); + secretValue = secret_item_get_secret(l->data); + if (secret != NULL) { + *secret = strdup(secret_value_get(secretValue, &length)); + secret_value_unref(secretValue); + } + *username = get_attribute("username", l->data); + } + g_list_free_full(items, g_object_unref); + } + g_object_unref(service); + } + g_hash_table_unref(attributes); + if (err != NULL) { + return err; + } + return NULL; +} + +GError *list(char *ref_label, char *** paths, char *** accts, unsigned int *list_l) { + GList *items; + GError *err = NULL; + SecretService *service; + SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK; + GHashTable *attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free); + + // List credentials with the right label only + g_hash_table_insert(attributes, g_strdup("label"), g_strdup(ref_label)); + + service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err); + if (err != NULL) { + return err; + } + + items = secret_service_search_sync(service, NULL, attributes, flags, NULL, &err); + int numKeys = g_list_length(items); + if (err != NULL) { + return err; + } + + char **tmp_paths = (char **) calloc(1,(int)sizeof(char *)*numKeys); + char **tmp_accts = (char **) calloc(1,(int)sizeof(char *)*numKeys); + + // items now contains our keys from the gnome keyring + // we will now put it in our two lists to return it to go + GList *current; + int listNumber = 0; + for(current = items; current!=NULL; current = current->next) { + char *pathTmp = secret_item_get_label(current->data); + // you cannot have a key without a label in the gnome keyring + char *acctTmp = get_attribute("username",current->data); + if (acctTmp==NULL) { + acctTmp = "account not defined"; + } + + tmp_paths[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(pathTmp)+1)); + tmp_accts[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(acctTmp)+1)); + + memcpy(tmp_paths[listNumber], pathTmp, sizeof(char)*(strlen(pathTmp)+1)); + memcpy(tmp_accts[listNumber], acctTmp, sizeof(char)*(strlen(acctTmp)+1)); + + listNumber = listNumber + 1; + } + + *paths = (char **) realloc(tmp_paths, (int)sizeof(char *)*listNumber); + *accts = (char **) realloc(tmp_accts, (int)sizeof(char *)*listNumber); + + *list_l = listNumber; + + return NULL; +} + +void freeListData(char *** data, unsigned int length) { + int i; + for(i=0; i +*/ +import "C" +import ( + "errors" + "unsafe" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// Secretservice handles secrets using Linux secret-service as a store. +type Secretservice struct{} + +// Add adds new credentials to the keychain. +func (h Secretservice) Add(creds *credentials.Credentials) error { + if creds == nil { + return errors.New("missing credentials") + } + credsLabel := C.CString(credentials.CredsLabel) + defer C.free(unsafe.Pointer(credsLabel)) + server := C.CString(creds.ServerURL) + defer C.free(unsafe.Pointer(server)) + username := C.CString(creds.Username) + defer C.free(unsafe.Pointer(username)) + secret := C.CString(creds.Secret) + defer C.free(unsafe.Pointer(secret)) + + if err := C.add(credsLabel, server, username, secret); err != nil { + defer C.g_error_free(err) + errMsg := (*C.char)(unsafe.Pointer(err.message)) + return errors.New(C.GoString(errMsg)) + } + return nil +} + +// Delete removes credentials from the store. +func (h Secretservice) Delete(serverURL string) error { + if serverURL == "" { + return errors.New("missing server url") + } + server := C.CString(serverURL) + defer C.free(unsafe.Pointer(server)) + + if err := C.delete(server); err != nil { + defer C.g_error_free(err) + errMsg := (*C.char)(unsafe.Pointer(err.message)) + return errors.New(C.GoString(errMsg)) + } + return nil +} + +// Get returns the username and secret to use for a given registry server URL. +func (h Secretservice) Get(serverURL string) (string, string, error) { + if serverURL == "" { + return "", "", errors.New("missing server url") + } + var username *C.char + defer C.free(unsafe.Pointer(username)) + var secret *C.char + defer C.free(unsafe.Pointer(secret)) + server := C.CString(serverURL) + defer C.free(unsafe.Pointer(server)) + + err := C.get(server, &username, &secret) + if err != nil { + defer C.g_error_free(err) + errMsg := (*C.char)(unsafe.Pointer(err.message)) + return "", "", errors.New(C.GoString(errMsg)) + } + user := C.GoString(username) + pass := C.GoString(secret) + if pass == "" { + return "", "", credentials.NewErrCredentialsNotFound() + } + return user, pass, nil +} + +// List returns the stored URLs and corresponding usernames for a given credentials label +func (h Secretservice) List() (map[string]string, error) { + credsLabelC := C.CString(credentials.CredsLabel) + defer C.free(unsafe.Pointer(credsLabelC)) + + var pathsC **C.char + defer C.free(unsafe.Pointer(pathsC)) + var acctsC **C.char + defer C.free(unsafe.Pointer(acctsC)) + var listLenC C.uint + err := C.list(credsLabelC, &pathsC, &acctsC, &listLenC) + if err != nil { + defer C.free(unsafe.Pointer(err)) + return nil, errors.New("Error from list function in secretservice_linux.c likely due to error in secretservice library") + } + defer C.freeListData(&pathsC, listLenC) + defer C.freeListData(&acctsC, listLenC) + + resp := make(map[string]string) + + listLen := int(listLenC) + if listLen == 0 { + return resp, nil + } + // The maximum capacity of the following two slices is limited to (2^29)-1 to remain compatible + // with 32-bit platforms. The size of a `*C.char` (a pointer) is 4 Byte on a 32-bit system + // and (2^29)*4 == math.MaxInt32 + 1. -- See issue golang/go#13656 + pathTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen] + acctTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen] + for i := 0; i < listLen; i++ { + resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i]) + } + + return resp, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h new file mode 100644 index 00000000..a28179db --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h @@ -0,0 +1,13 @@ +#define SECRET_WITH_UNSTABLE 1 +#define SECRET_API_SUBJECT_TO_CHANGE 1 +#include + +const SecretSchema *docker_get_schema(void) G_GNUC_CONST; + +#define DOCKER_SCHEMA docker_get_schema() + +GError *add(char *label, char *server, char *username, char *secret); +GError *delete(char *server); +GError *get(char *server, char **username, char **secret); +GError *list(char *label, char *** paths, char *** accts, unsigned int *list_l); +void freeListData(char *** data, unsigned int length); diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto new file mode 100644 index 00000000..7eaf2291 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto @@ -0,0 +1,139 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto new file mode 100644 index 00000000..f71baa09 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto @@ -0,0 +1,166 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto new file mode 100644 index 00000000..cc8a0021 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto @@ -0,0 +1,831 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + //reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + //reserved 8; // javalite_serializable + //reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). By default these types are + // represented as JavaScript strings. This avoids loss of precision that can + // happen when a large value is converted to a floating point JavaScript + // numbers. Specifying JS_NUMBER for the jstype causes the generated + // JavaScript code to use the JavaScript "number" type instead of strings. + // This option is an enum to permit additional types to be added, + // e.g. goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + //reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + //reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto new file mode 100644 index 00000000..8bbaa8b6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto new file mode 100644 index 00000000..6057c852 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto new file mode 100644 index 00000000..994af79f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto @@ -0,0 +1,246 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto new file mode 100644 index 00000000..4f78641f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto new file mode 100644 index 00000000..4ba0b97b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto new file mode 100644 index 00000000..c5632e5c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md new file mode 100644 index 00000000..57570137 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md @@ -0,0 +1,44 @@ +## nsenter + +The `nsenter` package registers a special init constructor that is called before +the Go runtime has a chance to boot. This provides us the ability to `setns` on +existing namespaces and avoid the issues that the Go runtime has with multiple +threads. This constructor will be called if this package is registered, +imported, in your go application. + +The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd/cgo/) +package. In cgo, if the import of "C" is immediately preceded by a comment, that comment, +called the preamble, is used as a header when compiling the C parts of the package. +So every time we import package `nsenter`, the C code function `nsexec()` would be +called. And package `nsenter` is now only imported in `main_unix.go`, so every time +before we call `cmd.Start` on linux, that C code would run. + +Because `nsexec()` must be run before the Go runtime in order to use the +Linux kernel namespace, you must `import` this library into a package if +you plan to use `libcontainer` directly. Otherwise Go will not execute +the `nsexec()` constructor, which means that the re-exec will not cause +the namespaces to be joined. You can import it like this: + +```go +import _ "github.com/opencontainers/runc/libcontainer/nsenter" +``` + +`nsexec()` will first get the file descriptor number for the init pipe +from the environment variable `_LIBCONTAINER_INITPIPE` (which was opened +by the parent and kept open across the fork-exec of the `nsexec()` init +process). The init pipe is used to read bootstrap data (namespace paths, +clone flags, uid and gid mappings, and the console path) from the parent +process. `nsexec()` will then call `setns(2)` to join the namespaces +provided in the bootstrap data (if available), `clone(2)` a child process +with the provided clone flags, update the user and group ID mappings, do +some further miscellaneous setup steps, and then send the PID of the +child process to the parent of the `nsexec()` "caller". Finally, +the parent `nsexec()` will exit and the child `nsexec()` process will +return to allow the Go runtime take over. + +NOTE: We do both `setns(2)` and `clone(2)` even if we don't have any +CLONE_NEW* clone flags because we must fork a new process in order to +enter the PID namespace. + + + diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h new file mode 100644 index 00000000..9e9bdca0 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h @@ -0,0 +1,32 @@ +#ifndef NSENTER_NAMESPACE_H +#define NSENTER_NAMESPACE_H + +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif +#include + +/* All of these are taken from include/uapi/linux/sched.h */ +#ifndef CLONE_NEWNS +# define CLONE_NEWNS 0x00020000 /* New mount namespace group */ +#endif +#ifndef CLONE_NEWCGROUP +# define CLONE_NEWCGROUP 0x02000000 /* New cgroup namespace */ +#endif +#ifndef CLONE_NEWUTS +# define CLONE_NEWUTS 0x04000000 /* New utsname namespace */ +#endif +#ifndef CLONE_NEWIPC +# define CLONE_NEWIPC 0x08000000 /* New ipc namespace */ +#endif +#ifndef CLONE_NEWUSER +# define CLONE_NEWUSER 0x10000000 /* New user namespace */ +#endif +#ifndef CLONE_NEWPID +# define CLONE_NEWPID 0x20000000 /* New pid namespace */ +#endif +#ifndef CLONE_NEWNET +# define CLONE_NEWNET 0x40000000 /* New network namespace */ +#endif + +#endif /* NSENTER_NAMESPACE_H */ diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go new file mode 100644 index 00000000..07f4d63e --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go @@ -0,0 +1,12 @@ +// +build linux,!gccgo + +package nsenter + +/* +#cgo CFLAGS: -Wall +extern void nsexec(); +void __attribute__((constructor)) init(void) { + nsexec(); +} +*/ +import "C" diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go new file mode 100644 index 00000000..63c7a3ec --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go @@ -0,0 +1,25 @@ +// +build linux,gccgo + +package nsenter + +/* +#cgo CFLAGS: -Wall +extern void nsexec(); +void __attribute__((constructor)) init(void) { + nsexec(); +} +*/ +import "C" + +// AlwaysFalse is here to stay false +// (and be exported so the compiler doesn't optimize out its reference) +var AlwaysFalse bool + +func init() { + if AlwaysFalse { + // by referencing this C init() in a noop test, it will ensure the compiler + // links in the C function. + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 + C.init() + } +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go new file mode 100644 index 00000000..ac701ca3 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux !cgo + +package nsenter + +import "C" diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c new file mode 100644 index 00000000..2c69cee5 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c @@ -0,0 +1,963 @@ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* Get all of the CLONE_NEW* flags. */ +#include "namespace.h" + +/* Synchronisation values. */ +enum sync_t { + SYNC_USERMAP_PLS = 0x40, /* Request parent to map our users. */ + SYNC_USERMAP_ACK = 0x41, /* Mapping finished by the parent. */ + SYNC_RECVPID_PLS = 0x42, /* Tell parent we're sending the PID. */ + SYNC_RECVPID_ACK = 0x43, /* PID was correctly received by parent. */ + SYNC_GRANDCHILD = 0x44, /* The grandchild is ready to run. */ + SYNC_CHILD_READY = 0x45, /* The child or grandchild is ready to return. */ + + /* XXX: This doesn't help with segfaults and other such issues. */ + SYNC_ERR = 0xFF, /* Fatal error, no turning back. The error code follows. */ +}; + +/* longjmp() arguments. */ +#define JUMP_PARENT 0x00 +#define JUMP_CHILD 0xA0 +#define JUMP_INIT 0xA1 + +/* JSON buffer. */ +#define JSON_MAX 4096 + +/* Assume the stack grows down, so arguments should be above it. */ +struct clone_t { + /* + * Reserve some space for clone() to locate arguments + * and retcode in this place + */ + char stack[4096] __attribute__ ((aligned(16))); + char stack_ptr[0]; + + /* There's two children. This is used to execute the different code. */ + jmp_buf *env; + int jmpval; +}; + +struct nlconfig_t { + char *data; + + /* Process settings. */ + uint32_t cloneflags; + char *oom_score_adj; + size_t oom_score_adj_len; + + /* User namespace settings. */ + char *uidmap; + size_t uidmap_len; + char *gidmap; + size_t gidmap_len; + char *namespaces; + size_t namespaces_len; + uint8_t is_setgroup; + + /* Rootless container settings. */ + uint8_t is_rootless; + char *uidmappath; + size_t uidmappath_len; + char *gidmappath; + size_t gidmappath_len; +}; + +/* + * List of netlink message types sent to us as part of bootstrapping the init. + * These constants are defined in libcontainer/message_linux.go. + */ +#define INIT_MSG 62000 +#define CLONE_FLAGS_ATTR 27281 +#define NS_PATHS_ATTR 27282 +#define UIDMAP_ATTR 27283 +#define GIDMAP_ATTR 27284 +#define SETGROUP_ATTR 27285 +#define OOM_SCORE_ADJ_ATTR 27286 +#define ROOTLESS_ATTR 27287 +#define UIDMAPPATH_ATTR 27288 +#define GIDMAPPATH_ATTR 27289 + +/* + * Use the raw syscall for versions of glibc which don't include a function for + * it, namely (glibc 2.12). + */ +#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14 +# define _GNU_SOURCE +# include "syscall.h" +# if !defined(SYS_setns) && defined(__NR_setns) +# define SYS_setns __NR_setns +# endif + +#ifndef SYS_setns +# error "setns(2) syscall not supported by glibc version" +#endif + +int setns(int fd, int nstype) +{ + return syscall(SYS_setns, fd, nstype); +} +#endif + +/* XXX: This is ugly. */ +static int syncfd = -1; + +/* TODO(cyphar): Fix this so it correctly deals with syncT. */ +#define bail(fmt, ...) \ + do { \ + int ret = __COUNTER__ + 1; \ + fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__); \ + if (syncfd >= 0) { \ + enum sync_t s = SYNC_ERR; \ + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) \ + fprintf(stderr, "nsenter: failed: write(s)"); \ + if (write(syncfd, &ret, sizeof(ret)) != sizeof(ret)) \ + fprintf(stderr, "nsenter: failed: write(ret)"); \ + } \ + exit(ret); \ + } while(0) + +static int write_file(char *data, size_t data_len, char *pathfmt, ...) +{ + int fd, len, ret = 0; + char path[PATH_MAX]; + + va_list ap; + va_start(ap, pathfmt); + len = vsnprintf(path, PATH_MAX, pathfmt, ap); + va_end(ap); + if (len < 0) + return -1; + + fd = open(path, O_RDWR); + if (fd < 0) { + return -1; + } + + len = write(fd, data, data_len); + if (len != data_len) { + ret = -1; + goto out; + } + + out: + close(fd); + return ret; +} + +enum policy_t { + SETGROUPS_DEFAULT = 0, + SETGROUPS_ALLOW, + SETGROUPS_DENY, +}; + +/* This *must* be called before we touch gid_map. */ +static void update_setgroups(int pid, enum policy_t setgroup) +{ + char *policy; + + switch (setgroup) { + case SETGROUPS_ALLOW: + policy = "allow"; + break; + case SETGROUPS_DENY: + policy = "deny"; + break; + case SETGROUPS_DEFAULT: + default: + /* Nothing to do. */ + return; + } + + if (write_file(policy, strlen(policy), "/proc/%d/setgroups", pid) < 0) { + /* + * If the kernel is too old to support /proc/pid/setgroups, + * open(2) or write(2) will return ENOENT. This is fine. + */ + if (errno != ENOENT) + bail("failed to write '%s' to /proc/%d/setgroups", policy, pid); + } +} + +static int try_mapping_tool(const char *app, int pid, char *map, size_t map_len) +{ + int child; + + /* + * If @app is NULL, execve will segfault. Just check it here and bail (if + * we're in this path, the caller is already getting desparate and there + * isn't a backup to this failing). This usually would be a configuration + * or programming issue. + */ + if (!app) + bail("mapping tool not present"); + + child = fork(); + if (child < 0) + bail("failed to fork"); + + if (!child) { +#define MAX_ARGV 20 + char *argv[MAX_ARGV]; + char *envp[] = { NULL }; + char pid_fmt[16]; + int argc = 0; + char *next; + + snprintf(pid_fmt, 16, "%d", pid); + + argv[argc++] = (char *)app; + argv[argc++] = pid_fmt; + /* + * Convert the map string into a list of argument that + * newuidmap/newgidmap can understand. + */ + + while (argc < MAX_ARGV) { + if (*map == '\0') { + argv[argc++] = NULL; + break; + } + argv[argc++] = map; + next = strpbrk(map, "\n "); + if (next == NULL) + break; + *next++ = '\0'; + map = next + strspn(next, "\n "); + } + + execve(app, argv, envp); + bail("failed to execv"); + } else { + int status; + + while (true) { + if (waitpid(child, &status, 0) < 0) { + if (errno == EINTR) + continue; + bail("failed to waitpid"); + } + if (WIFEXITED(status) || WIFSIGNALED(status)) + return WEXITSTATUS(status); + } + } + + return -1; +} + +static void update_uidmap(const char *path, int pid, char *map, size_t map_len) +{ + if (map == NULL || map_len <= 0) + return; + + if (write_file(map, map_len, "/proc/%d/uid_map", pid) < 0) { + if (errno != EPERM) + bail("failed to update /proc/%d/uid_map", pid); + if (try_mapping_tool(path, pid, map, map_len)) + bail("failed to use newuid map on %d", pid); + } +} + +static void update_gidmap(const char *path, int pid, char *map, size_t map_len) +{ + if (map == NULL || map_len <= 0) + return; + + if (write_file(map, map_len, "/proc/%d/gid_map", pid) < 0) { + if (errno != EPERM) + bail("failed to update /proc/%d/gid_map", pid); + if (try_mapping_tool(path, pid, map, map_len)) + bail("failed to use newgid map on %d", pid); + } +} + +static void update_oom_score_adj(char *data, size_t len) +{ + if (data == NULL || len <= 0) + return; + + if (write_file(data, len, "/proc/self/oom_score_adj") < 0) + bail("failed to update /proc/self/oom_score_adj"); +} + +/* A dummy function that just jumps to the given jumpval. */ +static int child_func(void *arg) __attribute__ ((noinline)); +static int child_func(void *arg) +{ + struct clone_t *ca = (struct clone_t *)arg; + longjmp(*ca->env, ca->jmpval); +} + +static int clone_parent(jmp_buf *env, int jmpval) __attribute__ ((noinline)); +static int clone_parent(jmp_buf *env, int jmpval) +{ + struct clone_t ca = { + .env = env, + .jmpval = jmpval, + }; + + return clone(child_func, ca.stack_ptr, CLONE_PARENT | SIGCHLD, &ca); +} + +/* + * Gets the init pipe fd from the environment, which is used to read the + * bootstrap data and tell the parent what the new pid is after we finish + * setting up the environment. + */ +static int initpipe(void) +{ + int pipenum; + char *initpipe, *endptr; + + initpipe = getenv("_LIBCONTAINER_INITPIPE"); + if (initpipe == NULL || *initpipe == '\0') + return -1; + + pipenum = strtol(initpipe, &endptr, 10); + if (*endptr != '\0') + bail("unable to parse _LIBCONTAINER_INITPIPE"); + + return pipenum; +} + +/* Returns the clone(2) flag for a namespace, given the name of a namespace. */ +static int nsflag(char *name) +{ + if (!strcmp(name, "cgroup")) + return CLONE_NEWCGROUP; + else if (!strcmp(name, "ipc")) + return CLONE_NEWIPC; + else if (!strcmp(name, "mnt")) + return CLONE_NEWNS; + else if (!strcmp(name, "net")) + return CLONE_NEWNET; + else if (!strcmp(name, "pid")) + return CLONE_NEWPID; + else if (!strcmp(name, "user")) + return CLONE_NEWUSER; + else if (!strcmp(name, "uts")) + return CLONE_NEWUTS; + + /* If we don't recognise a name, fallback to 0. */ + return 0; +} + +static uint32_t readint32(char *buf) +{ + return *(uint32_t *) buf; +} + +static uint8_t readint8(char *buf) +{ + return *(uint8_t *) buf; +} + +static void nl_parse(int fd, struct nlconfig_t *config) +{ + size_t len, size; + struct nlmsghdr hdr; + char *data, *current; + + /* Retrieve the netlink header. */ + len = read(fd, &hdr, NLMSG_HDRLEN); + if (len != NLMSG_HDRLEN) + bail("invalid netlink header length %zu", len); + + if (hdr.nlmsg_type == NLMSG_ERROR) + bail("failed to read netlink message"); + + if (hdr.nlmsg_type != INIT_MSG) + bail("unexpected msg type %d", hdr.nlmsg_type); + + /* Retrieve data. */ + size = NLMSG_PAYLOAD(&hdr, 0); + current = data = malloc(size); + if (!data) + bail("failed to allocate %zu bytes of memory for nl_payload", size); + + len = read(fd, data, size); + if (len != size) + bail("failed to read netlink payload, %zu != %zu", len, size); + + /* Parse the netlink payload. */ + config->data = data; + while (current < data + size) { + struct nlattr *nlattr = (struct nlattr *)current; + size_t payload_len = nlattr->nla_len - NLA_HDRLEN; + + /* Advance to payload. */ + current += NLA_HDRLEN; + + /* Handle payload. */ + switch (nlattr->nla_type) { + case CLONE_FLAGS_ATTR: + config->cloneflags = readint32(current); + break; + case ROOTLESS_ATTR: + config->is_rootless = readint8(current); + break; + case OOM_SCORE_ADJ_ATTR: + config->oom_score_adj = current; + config->oom_score_adj_len = payload_len; + break; + case NS_PATHS_ATTR: + config->namespaces = current; + config->namespaces_len = payload_len; + break; + case UIDMAP_ATTR: + config->uidmap = current; + config->uidmap_len = payload_len; + break; + case GIDMAP_ATTR: + config->gidmap = current; + config->gidmap_len = payload_len; + break; + case UIDMAPPATH_ATTR: + config->uidmappath = current; + config->uidmappath_len = payload_len; + break; + case GIDMAPPATH_ATTR: + config->gidmappath = current; + config->gidmappath_len = payload_len; + break; + case SETGROUP_ATTR: + config->is_setgroup = readint8(current); + break; + default: + bail("unknown netlink message type %d", nlattr->nla_type); + } + + current += NLA_ALIGN(payload_len); + } +} + +void nl_free(struct nlconfig_t *config) +{ + free(config->data); +} + +void join_namespaces(char *nslist) +{ + int num = 0, i; + char *saveptr = NULL; + char *namespace = strtok_r(nslist, ",", &saveptr); + struct namespace_t { + int fd; + int ns; + char type[PATH_MAX]; + char path[PATH_MAX]; + } *namespaces = NULL; + + if (!namespace || !strlen(namespace) || !strlen(nslist)) + bail("ns paths are empty"); + + /* + * We have to open the file descriptors first, since after + * we join the mnt namespace we might no longer be able to + * access the paths. + */ + do { + int fd; + char *path; + struct namespace_t *ns; + + /* Resize the namespace array. */ + namespaces = realloc(namespaces, ++num * sizeof(struct namespace_t)); + if (!namespaces) + bail("failed to reallocate namespace array"); + ns = &namespaces[num - 1]; + + /* Split 'ns:path'. */ + path = strstr(namespace, ":"); + if (!path) + bail("failed to parse %s", namespace); + *path++ = '\0'; + + fd = open(path, O_RDONLY); + if (fd < 0) + bail("failed to open %s", path); + + ns->fd = fd; + ns->ns = nsflag(namespace); + strncpy(ns->path, path, PATH_MAX); + } while ((namespace = strtok_r(NULL, ",", &saveptr)) != NULL); + + /* + * The ordering in which we join namespaces is important. We should + * always join the user namespace *first*. This is all guaranteed + * from the container_linux.go side of this, so we're just going to + * follow the order given to us. + */ + + for (i = 0; i < num; i++) { + struct namespace_t ns = namespaces[i]; + + if (setns(ns.fd, ns.ns) < 0) + bail("failed to setns to %s", ns.path); + + close(ns.fd); + } + + free(namespaces); +} + +void nsexec(void) +{ + int pipenum; + jmp_buf env; + int sync_child_pipe[2], sync_grandchild_pipe[2]; + struct nlconfig_t config = { 0 }; + + /* + * If we don't have an init pipe, just return to the go routine. + * We'll only get an init pipe for start or exec. + */ + pipenum = initpipe(); + if (pipenum == -1) + return; + + /* Parse all of the netlink configuration. */ + nl_parse(pipenum, &config); + + /* Set oom_score_adj. This has to be done before !dumpable because + * /proc/self/oom_score_adj is not writeable unless you're an privileged + * user (if !dumpable is set). All children inherit their parent's + * oom_score_adj value on fork(2) so this will always be propagated + * properly. + */ + update_oom_score_adj(config.oom_score_adj, config.oom_score_adj_len); + + /* + * Make the process non-dumpable, to avoid various race conditions that + * could cause processes in namespaces we're joining to access host + * resources (or potentially execute code). + * + * However, if the number of namespaces we are joining is 0, we are not + * going to be switching to a different security context. Thus setting + * ourselves to be non-dumpable only breaks things (like rootless + * containers), which is the recommendation from the kernel folks. + */ + if (config.namespaces) { + if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0) + bail("failed to set process as non-dumpable"); + } + + /* Pipe so we can tell the child when we've finished setting up. */ + if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_child_pipe) < 0) + bail("failed to setup sync pipe between parent and child"); + + /* + * We need a new socketpair to sync with grandchild so we don't have + * race condition with child. + */ + if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_grandchild_pipe) < 0) + bail("failed to setup sync pipe between parent and grandchild"); + + /* TODO: Currently we aren't dealing with child deaths properly. */ + + /* + * Okay, so this is quite annoying. + * + * In order for this unsharing code to be more extensible we need to split + * up unshare(CLONE_NEWUSER) and clone() in various ways. The ideal case + * would be if we did clone(CLONE_NEWUSER) and the other namespaces + * separately, but because of SELinux issues we cannot really do that. But + * we cannot just dump the namespace flags into clone(...) because several + * usecases (such as rootless containers) require more granularity around + * the namespace setup. In addition, some older kernels had issues where + * CLONE_NEWUSER wasn't handled before other namespaces (but we cannot + * handle this while also dealing with SELinux so we choose SELinux support + * over broken kernel support). + * + * However, if we unshare(2) the user namespace *before* we clone(2), then + * all hell breaks loose. + * + * The parent no longer has permissions to do many things (unshare(2) drops + * all capabilities in your old namespace), and the container cannot be set + * up to have more than one {uid,gid} mapping. This is obviously less than + * ideal. In order to fix this, we have to first clone(2) and then unshare. + * + * Unfortunately, it's not as simple as that. We have to fork to enter the + * PID namespace (the PID namespace only applies to children). Since we'll + * have to double-fork, this clone_parent() call won't be able to get the + * PID of the _actual_ init process (without doing more synchronisation than + * I can deal with at the moment). So we'll just get the parent to send it + * for us, the only job of this process is to update + * /proc/pid/{setgroups,uid_map,gid_map}. + * + * And as a result of the above, we also need to setns(2) in the first child + * because if we join a PID namespace in the topmost parent then our child + * will be in that namespace (and it will not be able to give us a PID value + * that makes sense without resorting to sending things with cmsg). + * + * This also deals with an older issue caused by dumping cloneflags into + * clone(2): On old kernels, CLONE_PARENT didn't work with CLONE_NEWPID, so + * we have to unshare(2) before clone(2) in order to do this. This was fixed + * in upstream commit 1f7f4dde5c945f41a7abc2285be43d918029ecc5, and was + * introduced by 40a0d32d1eaffe6aac7324ca92604b6b3977eb0e. As far as we're + * aware, the last mainline kernel which had this bug was Linux 3.12. + * However, we cannot comment on which kernels the broken patch was + * backported to. + * + * -- Aleksa "what has my life come to?" Sarai + */ + + switch (setjmp(env)) { + /* + * Stage 0: We're in the parent. Our job is just to create a new child + * (stage 1: JUMP_CHILD) process and write its uid_map and + * gid_map. That process will go on to create a new process, then + * it will send us its PID which we will send to the bootstrap + * process. + */ + case JUMP_PARENT:{ + int len; + pid_t child, first_child = -1; + char buf[JSON_MAX]; + bool ready = false; + + /* For debugging. */ + prctl(PR_SET_NAME, (unsigned long)"runc:[0:PARENT]", 0, 0, 0); + + /* Start the process of getting a container. */ + child = clone_parent(&env, JUMP_CHILD); + if (child < 0) + bail("unable to fork: child_func"); + + /* + * State machine for synchronisation with the children. + * + * Father only return when both child and grandchild are + * ready, so we can receive all possible error codes + * generated by children. + */ + while (!ready) { + enum sync_t s; + int ret; + + syncfd = sync_child_pipe[1]; + close(sync_child_pipe[0]); + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with child: next state"); + + switch (s) { + case SYNC_ERR: + /* We have to mirror the error code of the child. */ + if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret)) + bail("failed to sync with child: read(error code)"); + + exit(ret); + case SYNC_USERMAP_PLS: + /* + * Enable setgroups(2) if we've been asked to. But we also + * have to explicitly disable setgroups(2) if we're + * creating a rootless container (this is required since + * Linux 3.19). + */ + if (config.is_rootless && config.is_setgroup) { + kill(child, SIGKILL); + bail("cannot allow setgroup in an unprivileged user namespace setup"); + } + + if (config.is_setgroup) + update_setgroups(child, SETGROUPS_ALLOW); + if (config.is_rootless) + update_setgroups(child, SETGROUPS_DENY); + + /* Set up mappings. */ + update_uidmap(config.uidmappath, child, config.uidmap, config.uidmap_len); + update_gidmap(config.gidmappath, child, config.gidmap, config.gidmap_len); + + s = SYNC_USERMAP_ACK; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with child: write(SYNC_USERMAP_ACK)"); + } + break; + case SYNC_RECVPID_PLS:{ + first_child = child; + + /* Get the init_func pid. */ + if (read(syncfd, &child, sizeof(child)) != sizeof(child)) { + kill(first_child, SIGKILL); + bail("failed to sync with child: read(childpid)"); + } + + /* Send ACK. */ + s = SYNC_RECVPID_ACK; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(first_child, SIGKILL); + kill(child, SIGKILL); + bail("failed to sync with child: write(SYNC_RECVPID_ACK)"); + } + } + break; + case SYNC_CHILD_READY: + ready = true; + break; + default: + bail("unexpected sync value: %u", s); + } + } + + /* Now sync with grandchild. */ + + ready = false; + while (!ready) { + enum sync_t s; + int ret; + + syncfd = sync_grandchild_pipe[1]; + close(sync_grandchild_pipe[0]); + + s = SYNC_GRANDCHILD; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with child: write(SYNC_GRANDCHILD)"); + } + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with child: next state"); + + switch (s) { + case SYNC_ERR: + /* We have to mirror the error code of the child. */ + if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret)) + bail("failed to sync with child: read(error code)"); + + exit(ret); + case SYNC_CHILD_READY: + ready = true; + break; + default: + bail("unexpected sync value: %u", s); + } + } + + /* + * Send the init_func pid and the pid of the first child back to our parent. + * + * We need to send both back because we can't reap the first child we created (CLONE_PARENT). + * It becomes the responsibility of our parent to reap the first child. + */ + len = snprintf(buf, JSON_MAX, "{\"pid\": %d, \"pid_first\": %d}\n", child, first_child); + if (len < 0) { + kill(child, SIGKILL); + bail("unable to generate JSON for child pid"); + } + if (write(pipenum, buf, len) != len) { + kill(child, SIGKILL); + bail("unable to send child pid to bootstrapper"); + } + + exit(0); + } + + /* + * Stage 1: We're in the first child process. Our job is to join any + * provided namespaces in the netlink payload and unshare all + * of the requested namespaces. If we've been asked to + * CLONE_NEWUSER, we will ask our parent (stage 0) to set up + * our user mappings for us. Then, we create a new child + * (stage 2: JUMP_INIT) for PID namespace. We then send the + * child's PID to our parent (stage 0). + */ + case JUMP_CHILD:{ + pid_t child; + enum sync_t s; + + /* We're in a child and thus need to tell the parent if we die. */ + syncfd = sync_child_pipe[0]; + close(sync_child_pipe[1]); + + /* For debugging. */ + prctl(PR_SET_NAME, (unsigned long)"runc:[1:CHILD]", 0, 0, 0); + + /* + * We need to setns first. We cannot do this earlier (in stage 0) + * because of the fact that we forked to get here (the PID of + * [stage 2: JUMP_INIT]) would be meaningless). We could send it + * using cmsg(3) but that's just annoying. + */ + if (config.namespaces) + join_namespaces(config.namespaces); + + /* + * Unshare all of the namespaces. Now, it should be noted that this + * ordering might break in the future (especially with rootless + * containers). But for now, it's not possible to split this into + * CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues. + * + * Note that we don't merge this with clone() because there were + * some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID) + * was broken, so we'll just do it the long way anyway. + */ + if (unshare(config.cloneflags) < 0) + bail("failed to unshare namespaces"); + + /* + * Deal with user namespaces first. They are quite special, as they + * affect our ability to unshare other namespaces and are used as + * context for privilege checks. + */ + if (config.cloneflags & CLONE_NEWUSER) { + /* + * We don't have the privileges to do any mapping here (see the + * clone_parent rant). So signal our parent to hook us up. + */ + + /* Switching is only necessary if we joined namespaces. */ + if (config.namespaces) { + if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) < 0) + bail("failed to set process as dumpable"); + } + s = SYNC_USERMAP_PLS; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with parent: write(SYNC_USERMAP_PLS)"); + + /* ... wait for mapping ... */ + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with parent: read(SYNC_USERMAP_ACK)"); + if (s != SYNC_USERMAP_ACK) + bail("failed to sync with parent: SYNC_USERMAP_ACK: got %u", s); + /* Switching is only necessary if we joined namespaces. */ + if (config.namespaces) { + if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0) + bail("failed to set process as dumpable"); + } + } + + /* + * TODO: What about non-namespace clone flags that we're dropping here? + * + * We fork again because of PID namespace, setns(2) or unshare(2) don't + * change the PID namespace of the calling process, because doing so + * would change the caller's idea of its own PID (as reported by getpid()), + * which would break many applications and libraries, so we must fork + * to actually enter the new PID namespace. + */ + child = clone_parent(&env, JUMP_INIT); + if (child < 0) + bail("unable to fork: init_func"); + + /* Send the child to our parent, which knows what it's doing. */ + s = SYNC_RECVPID_PLS; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with parent: write(SYNC_RECVPID_PLS)"); + } + if (write(syncfd, &child, sizeof(child)) != sizeof(child)) { + kill(child, SIGKILL); + bail("failed to sync with parent: write(childpid)"); + } + + /* ... wait for parent to get the pid ... */ + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with parent: read(SYNC_RECVPID_ACK)"); + } + if (s != SYNC_RECVPID_ACK) { + kill(child, SIGKILL); + bail("failed to sync with parent: SYNC_RECVPID_ACK: got %u", s); + } + + s = SYNC_CHILD_READY; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with parent: write(SYNC_CHILD_READY)"); + } + + /* Our work is done. [Stage 2: JUMP_INIT] is doing the rest of the work. */ + exit(0); + } + + /* + * Stage 2: We're the final child process, and the only process that will + * actually return to the Go runtime. Our job is to just do the + * final cleanup steps and then return to the Go runtime to allow + * init_linux.go to run. + */ + case JUMP_INIT:{ + /* + * We're inside the child now, having jumped from the + * start_child() code after forking in the parent. + */ + enum sync_t s; + + /* We're in a child and thus need to tell the parent if we die. */ + syncfd = sync_grandchild_pipe[0]; + close(sync_grandchild_pipe[1]); + close(sync_child_pipe[0]); + close(sync_child_pipe[1]); + + /* For debugging. */ + prctl(PR_SET_NAME, (unsigned long)"runc:[2:INIT]", 0, 0, 0); + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with parent: read(SYNC_GRANDCHILD)"); + if (s != SYNC_GRANDCHILD) + bail("failed to sync with parent: SYNC_GRANDCHILD: got %u", s); + + if (setsid() < 0) + bail("setsid failed"); + + if (setuid(0) < 0) + bail("setuid failed"); + + if (setgid(0) < 0) + bail("setgid failed"); + + if (!config.is_rootless && config.is_setgroup) { + if (setgroups(0, NULL) < 0) + bail("setgroups failed"); + } + + s = SYNC_CHILD_READY; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with patent: write(SYNC_CHILD_READY)"); + + /* Close sync pipes. */ + close(sync_grandchild_pipe[0]); + + /* Free netlink data. */ + nl_free(&config); + + /* Finish executing, let the Go runtime take over. */ + return; + } + default: + bail("unexpected jump value"); + } + + /* Should never be reached. */ + bail("should never be reached"); +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/go4.org/README.md b/vendor/go4.org/README.md index 327863d4..677ec17c 100644 --- a/vendor/go4.org/README.md +++ b/vendor/go4.org/README.md @@ -5,8 +5,8 @@ [go4.org](http://go4.org) is a collection of packages for Go programmers. -They started out living in [Camlistore](https://camlistore.org)'s repo -and elsewhere but they have nothing to do with Camlistore, so we're +They started out living in [Perkeep](https://perkeep.org)'s repo +and elsewhere but they have nothing to do with Perkeep, so we're moving them here. ## Details @@ -49,40 +49,9 @@ moving them here. place to help run the tests on different OS/arches. For now we have Travis at least. -## Contributing +## Contact -To add code to go4, send a pull request or push a change to Gerrithub. - -We assume you already have your $GOPATH set and the go4 code cloned at -$GOPATH/src/go4.org. For example: - -* `git clone https://review.gerrithub.io/camlistore/go4 $GOPATH/src/go4.org` - -### To push a code review to Gerrithub directly: - -* Sign in to [http://gerrithub.io](http://gerrithub.io "Gerrithub") with your Github account. - -* Install the git hook that adds the magic "Change-Id" line to your commit messages: - - `curl "https://camlistore.googlesource.com/camlistore/+/master/misc/commit-msg.githook?format=TEXT" | base64 -d > $GOPATH/src/go4.org/.git/hooks/commit-msg` - -* make changes - -* commit (the unit of code review is a single commit identified by the Change-ID, **NOT** a series of commits on a branch) - -* `git push ssh://$YOUR_GITHUB_USERNAME@review.gerrithub.io:29418/camlistore/go4 HEAD:refs/for/master` - -### Using Github Pull Requests - -* send a pull request with a single commit - -* create a Gerrithub code review at https://review.gerrithub.io/plugins/github-plugin/static/pullrequests.html, selecting the pull request you just created. - -### Problems contributing? - -* Please file an issue or contact the [Camlistore mailing list](https://groups.google.com/forum/#!forum/camlistore) for any problems with the above. - -See [https://review.gerrithub.io/Documentation/user-upload.html](https://review.gerrithub.io/Documentation/user-upload.html) for more generic documentation. - -(TODO: more docs on Gerrit, integrate git-codereview, etc.) +For any question, or communication when a Github issue is not appropriate, +please contact the [Perkeep mailing +list](https://groups.google.com/forum/#!forum/perkeep). diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h new file mode 100644 index 00000000..b3f74162 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s new file mode 100644 index 00000000..ee7b4bd5 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s new file mode 100644 index 00000000..cd793a5b --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s @@ -0,0 +1,65 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 00000000..cb8fbc57 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,834 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have an implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +import ( + "encoding/binary" +) + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := load3(src[29:]) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go new file mode 100644 index 00000000..da9b10d9 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html +package curve25519 // import "golang.org/x/crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s new file mode 100644 index 00000000..39081610 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 00000000..9e9040b2 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go new file mode 100644 index 00000000..5822bd53 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s new file mode 100644 index 00000000..5ce80a2e --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R9:R8 + ANDQ SI,R8 + SHLQ $13,R11:R10 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BP:BX + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s new file mode 100644 index 00000000..12f73734 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,R8:CX + ANDQ SI,CX + SHLQ $13,R10:R9 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R12:R11 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R14:R13 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,BX:R15 + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/fcntl.go similarity index 74% rename from vendor/golang.org/x/sys/unix/flock.go rename to vendor/golang.org/x/sys/unix/fcntl.go index 2994ce75..0c58c7e1 100644 --- a/vendor/golang.org/x/sys/unix/flock.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -12,6 +12,12 @@ import "unsafe" // systems by flock_linux_32bit.go to be SYS_FCNTL64. var fcntl64Syscall uintptr = SYS_FCNTL +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + valptr, _, err := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) + return int(valptr), err +} + // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk))) diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go similarity index 100% rename from vendor/golang.org/x/sys/unix/flock_linux_32bit.go rename to vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index d3903ede..53fb8518 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -311,47 +311,6 @@ func Getsockname(fd int) (sa Sockaddr, err error) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) -func GetsockoptByte(fd, level, opt int) (value byte, err error) { - var n byte - vallen := _Socklen(1) - err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) - return n, err -} - -func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { - vallen := _Socklen(4) - err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) - return value, err -} - -func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { - var value IPMreq - vallen := _Socklen(SizeofIPMreq) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { - var value IPv6Mreq - vallen := _Socklen(SizeofIPv6Mreq) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { - var value IPv6MTUInfo - vallen := _Socklen(SizeofIPv6MTUInfo) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { - var value ICMPv6Filter - vallen := _Socklen(SizeofICMPv6Filter) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 76cf81f5..a24ba5fb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -782,19 +782,6 @@ func Getsockname(fd int) (sa Sockaddr, err error) { return anyToSockaddr(&rsa) } -func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { - vallen := _Socklen(4) - err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) - return value, err -} - -func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { - var value IPMreq - vallen := _Socklen(SizeofIPMreq) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { var value IPMreqn vallen := _Socklen(SizeofIPMreqn) @@ -802,27 +789,6 @@ func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { return &value, err } -func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { - var value IPv6Mreq - vallen := _Socklen(SizeofIPv6Mreq) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { - var value IPv6MTUInfo - vallen := _Socklen(SizeofIPv6MTUInfo) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { - var value ICMPv6Filter - vallen := _Socklen(SizeofICMPv6Filter) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - func GetsockoptUcred(fd, level, opt int) (*Ucred, error) { var value Ucred vallen := _Socklen(SizeofUcred) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index f4d2a345..b7629529 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -312,6 +312,12 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { //sys fcntl(fd int, cmd int, arg int) (val int, err error) +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + valptr, _, err := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + return int(valptr), err +} + // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0) @@ -676,6 +682,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_connect //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = libsendfile.sendfile //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_sendto //sys socket(domain int, typ int, proto int) (fd int, err error) = libsocket.__xnet_socket //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.__xnet_socketpair diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 9d4e7a67..91c32ddf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -21,8 +21,3 @@ func (iov *Iovec) SetLen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } - -func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { - // TODO(aram): implement this, see issue 5847. - panic("unimplemented") -} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8c66ae51..262dc520 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -199,6 +199,13 @@ func Getpeername(fd int) (sa Sockaddr, err error) { return anyToSockaddr(&rsa) } +func GetsockoptByte(fd, level, opt int) (value byte, err error) { + var n byte + vallen := _Socklen(1) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + func GetsockoptInt(fd, level, opt int) (value int, err error) { var n int32 vallen := _Socklen(4) @@ -206,6 +213,40 @@ func GetsockoptInt(fd, level, opt int) (value int, err error) { return int(n), err } +func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + return value, err +} + +func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { + var value IPMreq + vallen := _Socklen(SizeofIPMreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { + var value IPv6Mreq + vallen := _Socklen(SizeofIPv6Mreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { + var value IPv6MTUInfo + vallen := _Socklen(SizeofIPv6MTUInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { + var value ICMPv6Filter + vallen := _Socklen(SizeofICMPv6Filter) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + func GetsockoptLinger(fd, level, opt int) (*Linger, error) { var linger Linger vallen := _Socklen(SizeofLinger) diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 1612b660..3eef63f5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -159,6 +159,7 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index c994ab61..40c870be 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -159,6 +159,7 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index a8f9efed..43c4add5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -151,6 +151,7 @@ const ( CFLUSH = 0xf CLOCAL = 0x8000 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 04e4f331..f47536dc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -147,6 +147,7 @@ const ( CFLUSH = 0xf CLOCAL = 0x8000 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index c80ff981..c96ca653 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -147,6 +147,7 @@ const ( CFLUSH = 0xf CLOCAL = 0x8000 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 4c320495..4c027352 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -147,6 +147,7 @@ const ( CFLUSH = 0xf CLOCAL = 0x8000 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 5e88619c..39789630 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -128,6 +128,7 @@ import ( //go:cgo_import_dynamic libc___xnet_connect __xnet_connect "libsocket.so" //go:cgo_import_dynamic libc_mmap mmap "libc.so" //go:cgo_import_dynamic libc_munmap munmap "libc.so" +//go:cgo_import_dynamic libc_sendfile sendfile "libsendfile.so" //go:cgo_import_dynamic libc___xnet_sendto __xnet_sendto "libsocket.so" //go:cgo_import_dynamic libc___xnet_socket __xnet_socket "libsocket.so" //go:cgo_import_dynamic libc___xnet_socketpair __xnet_socketpair "libsocket.so" @@ -255,6 +256,7 @@ import ( //go:linkname proc__xnet_connect libc___xnet_connect //go:linkname procmmap libc_mmap //go:linkname procmunmap libc_munmap +//go:linkname procsendfile libc_sendfile //go:linkname proc__xnet_sendto libc___xnet_sendto //go:linkname proc__xnet_socket libc___xnet_socket //go:linkname proc__xnet_socketpair libc___xnet_socketpair @@ -383,6 +385,7 @@ var ( proc__xnet_connect, procmmap, procmunmap, + procsendfile, proc__xnet_sendto, proc__xnet_socket, proc__xnet_socketpair, @@ -1589,6 +1592,15 @@ func munmap(addr uintptr, length uintptr) (err error) { return } +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = e1 + } + return +} + func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { var _p0 *byte if len(buf) > 0 { diff --git a/vendor/k8s.io/client-go/README.md b/vendor/k8s.io/client-go/README.md index 82f41d73..4e5e4220 100644 --- a/vendor/k8s.io/client-go/README.md +++ b/vendor/k8s.io/client-go/README.md @@ -2,7 +2,7 @@ Go clients for talking to a [kubernetes](http://kubernetes.io/) cluster. -We currently recommend using the v6.0.0 tag. See [INSTALL.md](/INSTALL.md) for +We currently recommend using the v7.0.0 tag. See [INSTALL.md](/INSTALL.md) for detailed installation instructions. `go get k8s.io/client-go/...` works, but will give you head and doesn't handle the dependencies well. @@ -91,16 +91,17 @@ We will backport bugfixes--but not new features--into older versions of #### Compatibility matrix -| | Kubernetes 1.4 | Kubernetes 1.5 | Kubernetes 1.6 | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 | -|---------------------|----------------|----------------|----------------|----------------|----------------|----------------| -| client-go 1.4 | ✓ | - | - | - | - | - | -| client-go 1.5 | + | - | - | - | - | - | -| client-go 2.0 | +- | ✓ | +- | +- | +- | +- | -| client-go 3.0 | +- | +- | ✓ | - | +- | +- | -| client-go 4.0 | +- | +- | +- | ✓ | +- | +- | -| client-go 5.0 | +- | +- | +- | +- | ✓ | +- | -| client-go 6.0 | +- | +- | +- | +- | +- | ✓ | -| client-go HEAD | +- | +- | +- | +- | +- | + | +| | Kubernetes 1.4 | Kubernetes 1.5 | Kubernetes 1.6 | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 | Kubernetes 1.10 | +|---------------------|----------------|----------------|----------------|----------------|----------------|----------------|-----------------| +| client-go 1.4 | ✓ | - | - | - | - | - | - | +| client-go 1.5 | + | - | - | - | - | - | - | +| client-go 2.0 | +- | ✓ | +- | +- | +- | +- | +- | +| client-go 3.0 | +- | +- | ✓ | - | +- | +- | +- | +| client-go 4.0 | +- | +- | +- | ✓ | +- | +- | +- | +| client-go 5.0 | +- | +- | +- | +- | ✓ | +- | +- | +| client-go 6.0 | +- | +- | +- | +- | +- | ✓ | +- | +| client-go 7.0 | +- | +- | +- | +- | +- | +- | ✓ | +| client-go HEAD | +- | +- | +- | +- | +- | + | + | Key: @@ -125,9 +126,10 @@ between client-go versions. | client-go 1.5 | Kubernetes main repo, 1.5 branch | = - | | client-go 2.0 | Kubernetes main repo, 1.5 branch | = - | | client-go 3.0 | Kubernetes main repo, 1.6 branch | = - | -| client-go 4.0 | Kubernetes main repo, 1.7 branch | ✓ | +| client-go 4.0 | Kubernetes main repo, 1.7 branch | = - | | client-go 5.0 | Kubernetes main repo, 1.8 branch | ✓ | | client-go 6.0 | Kubernetes main repo, 1.9 branch | ✓ | +| client-go 7.0 | Kubernetes main repo, 1.10 branch | ✓ | | client-go HEAD | Kubernetes main repo, master branch | ✓ | Key: @@ -152,7 +154,7 @@ existing users won't be broken. ### Kubernetes tags -As of October 2017, client-go is still a mirror of +As of April 2018, client-go is still a mirror of [k8s.io/kubernetes/staging/src/client-go](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/client-go), the code development is still done in the staging area. Since Kubernetes 1.8 release, when syncing the code from the staging area, we also sync the Kubernetes