mirror of
https://github.com/containers/skopeo.git
synced 2025-06-28 15:47:34 +00:00
bump golang.org/x/net to v0.8.0
Resolves: CVE-2022-41723 Ref: https://cve.mitre.org/cgi-bin/cvename.cgi?name=2022-41723 Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
This commit is contained in:
parent
d15ecce269
commit
20447df139
8
go.mod
8
go.mod
@ -115,13 +115,13 @@ require (
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/net v0.6.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.5.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/tools v0.4.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
||||
google.golang.org/grpc v1.51.0 // indirect
|
||||
|
16
go.sum
16
go.sum
@ -1009,8 +1009,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -1056,8 +1056,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -1179,8 +1179,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -1237,8 +1237,8 @@ golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4X
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@ -662,6 +662,15 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
|
||||
// It is the caller's responsibility not to violate the maximum frame size
|
||||
// and to not call other Write methods concurrently.
|
||||
func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||
if err := f.startWriteDataPadded(streamID, endStream, data, pad); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.endWrite()
|
||||
}
|
||||
|
||||
// startWriteDataPadded is WriteDataPadded, but only writes the frame to the Framer's internal buffer.
|
||||
// The caller should call endWrite to flush the frame to the underlying writer.
|
||||
func (f *Framer) startWriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||
if !validStreamID(streamID) && !f.AllowIllegalWrites {
|
||||
return errStreamID
|
||||
}
|
||||
@ -691,7 +700,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
|
||||
}
|
||||
f.wbuf = append(f.wbuf, data...)
|
||||
f.wbuf = append(f.wbuf, pad...)
|
||||
return f.endWrite()
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SettingsFrame conveys configuration parameters that affect how
|
||||
|
75
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
75
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
@ -359,6 +359,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
var undecodedName undecodedString
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
@ -366,15 +367,27 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||
undecodedName, buf, err = d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||
undecodedValue, buf, err := d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if wantStr {
|
||||
if nameIdx <= 0 {
|
||||
hf.Name, err = d.decodeString(undecodedName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, err = d.decodeString(undecodedValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
@ -459,46 +472,52 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
// readString decodes an hpack string from p.
|
||||
// readString reads an hpack string from p.
|
||||
//
|
||||
// wantStr is whether s will be used. If false, decompression and
|
||||
// []byte->string garbage are skipped if s will be ignored
|
||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||
// is returning an error anyway, and because they're not indexed, the error
|
||||
// won't affect the decoding state.
|
||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||
// It returns a reference to the encoded string data to permit deferring decode costs
|
||||
// until after the caller verifies all data is present.
|
||||
func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return "", p, err
|
||||
return u, p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
return "", nil, ErrStringLength
|
||||
// Returning an error here means Huffman decoding errors
|
||||
// for non-indexed strings past the maximum string length
|
||||
// are ignored, but the server is returning an error anyway
|
||||
// and because the string is not indexed the error will not
|
||||
// affect the decoding state.
|
||||
return u, nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
if wantStr {
|
||||
s = string(p[:strLen])
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
u.isHuff = isHuff
|
||||
u.b = p[:strLen]
|
||||
return u, p[strLen:], nil
|
||||
}
|
||||
|
||||
if wantStr {
|
||||
type undecodedString struct {
|
||||
isHuff bool
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeString(u undecodedString) (string, error) {
|
||||
if !u.isHuff {
|
||||
return string(u.b), nil
|
||||
}
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||
buf.Reset()
|
||||
return "", nil, err
|
||||
}
|
||||
var s string
|
||||
err := huffmanDecode(buf, d.maxStrLen, u.b)
|
||||
if err == nil {
|
||||
s = buf.String()
|
||||
buf.Reset() // be nice to GC
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
buf.Reset() // be nice to GC
|
||||
bufPool.Put(buf)
|
||||
return s, err
|
||||
}
|
||||
|
18
vendor/golang.org/x/net/http2/server.go
generated
vendored
18
vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -843,8 +843,13 @@ type frameWriteResult struct {
|
||||
// and then reports when it's done.
|
||||
// At most one goroutine can be running writeFrameAsync at a time per
|
||||
// serverConn.
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
|
||||
err := wr.write.writeFrame(sc)
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
|
||||
var err error
|
||||
if wd == nil {
|
||||
err = wr.write.writeFrame(sc)
|
||||
} else {
|
||||
err = sc.framer.endWrite()
|
||||
}
|
||||
sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
|
||||
}
|
||||
|
||||
@ -1251,9 +1256,16 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
|
||||
sc.writingFrameAsync = false
|
||||
err := wr.write.writeFrame(sc)
|
||||
sc.wroteFrame(frameWriteResult{wr: wr, err: err})
|
||||
} else if wd, ok := wr.write.(*writeData); ok {
|
||||
// Encode the frame in the serve goroutine, to ensure we don't have
|
||||
// any lingering asynchronous references to data passed to Write.
|
||||
// See https://go.dev/issue/58446.
|
||||
sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr, wd)
|
||||
} else {
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr)
|
||||
go sc.writeFrameAsync(wr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
2
vendor/golang.org/x/text/unicode/norm/forminfo.go
generated
vendored
2
vendor/golang.org/x/text/unicode/norm/forminfo.go
generated
vendored
@ -13,7 +13,7 @@ import "encoding/binary"
|
||||
// a rune to a uint16. The values take two forms. For v >= 0x8000:
|
||||
// bits
|
||||
// 15: 1 (inverse of NFD_QC bit of qcInfo)
|
||||
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
|
||||
// 13..7: qcInfo (see below). isYesD is always true (no decomposition).
|
||||
// 6..0: ccc (compressed CCC value).
|
||||
// For v < 0x8000, the respective rune has a decomposition and v is an index
|
||||
// into a byte array of UTF-8 decomposition sequences and additional info and
|
||||
|
24
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
24
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
@ -27,7 +27,6 @@ import (
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
|
||||
"golang.org/x/tools/internal/gcimporter"
|
||||
@ -85,6 +84,19 @@ func NewReader(r io.Reader) (io.Reader, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// readAll works the same way as io.ReadAll, but avoids allocations and copies
|
||||
// by preallocating a byte slice of the necessary size if the size is known up
|
||||
// front. This is always possible when the input is an archive. In that case,
|
||||
// NewReader will return the known size using an io.LimitedReader.
|
||||
func readAll(r io.Reader) ([]byte, error) {
|
||||
if lr, ok := r.(*io.LimitedReader); ok {
|
||||
data := make([]byte, lr.N)
|
||||
_, err := io.ReadFull(lr, data)
|
||||
return data, err
|
||||
}
|
||||
return io.ReadAll(r)
|
||||
}
|
||||
|
||||
// Read reads export data from in, decodes it, and returns type
|
||||
// information for the package.
|
||||
//
|
||||
@ -102,7 +114,7 @@ func NewReader(r io.Reader) (io.Reader, error) {
|
||||
//
|
||||
// On return, the state of the reader is undefined.
|
||||
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := readAll(in)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
|
||||
}
|
||||
@ -111,12 +123,6 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
||||
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
|
||||
}
|
||||
|
||||
// The App Engine Go runtime v1.6 uses the old export data format.
|
||||
// TODO(adonovan): delete once v1.7 has been around for a while.
|
||||
if bytes.HasPrefix(data, []byte("package ")) {
|
||||
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
|
||||
}
|
||||
|
||||
// The indexed export format starts with an 'i'; the older
|
||||
// binary export format starts with a 'c', 'd', or 'v'
|
||||
// (from "version"). Select appropriate importer.
|
||||
@ -165,7 +171,7 @@ func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
|
||||
//
|
||||
// Experimental: This API is experimental and may change in the future.
|
||||
func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := readAll(in)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading export bundle: %v", err)
|
||||
}
|
||||
|
39
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
39
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
@ -877,12 +878,19 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
// never has to create a types.Package for an indirect dependency,
|
||||
// which would then require that such created packages be explicitly
|
||||
// inserted back into the Import graph as a final step after export data loading.
|
||||
// (Hence this return is after the Types assignment.)
|
||||
// The Diamond test exercises this case.
|
||||
if !lpkg.needtypes && !lpkg.needsrc {
|
||||
return
|
||||
}
|
||||
if !lpkg.needsrc {
|
||||
ld.loadFromExportData(lpkg)
|
||||
if err := ld.loadFromExportData(lpkg); err != nil {
|
||||
lpkg.Errors = append(lpkg.Errors, Error{
|
||||
Pos: "-",
|
||||
Msg: err.Error(),
|
||||
Kind: UnknownError, // e.g. can't find/open/parse export data
|
||||
})
|
||||
}
|
||||
return // not a source package, don't get syntax trees
|
||||
}
|
||||
|
||||
@ -950,6 +958,8 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
// - golang.org/issue/52078 (flag to set release tags)
|
||||
// - golang.org/issue/50825 (gopls legacy version support)
|
||||
// - golang.org/issue/55883 (go/packages confusing error)
|
||||
//
|
||||
// Should we assert a hard minimum of (currently) go1.16 here?
|
||||
var runtimeVersion int
|
||||
if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion {
|
||||
defer func() {
|
||||
@ -967,7 +977,8 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
// The config requested loading sources and types, but sources are missing.
|
||||
// Add an error to the package and fall back to loading from export data.
|
||||
appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
|
||||
ld.loadFromExportData(lpkg)
|
||||
_ = ld.loadFromExportData(lpkg) // ignore any secondary errors
|
||||
|
||||
return // can't get syntax trees for this package
|
||||
}
|
||||
|
||||
@ -1191,9 +1202,10 @@ func sameFile(x, y string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// loadFromExportData returns type information for the specified
|
||||
// loadFromExportData ensures that type information is present for the specified
|
||||
// package, loading it from an export data file on the first request.
|
||||
func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
|
||||
// On success it sets lpkg.Types to a new Package.
|
||||
func (ld *loader) loadFromExportData(lpkg *loaderPackage) error {
|
||||
if lpkg.PkgPath == "" {
|
||||
log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
|
||||
}
|
||||
@ -1204,8 +1216,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
// must be sequential. (Finer-grained locking would require
|
||||
// changes to the gcexportdata API.)
|
||||
//
|
||||
// The exportMu lock guards the Package.Pkg field and the
|
||||
// types.Package it points to, for each Package in the graph.
|
||||
// The exportMu lock guards the lpkg.Types field and the
|
||||
// types.Package it points to, for each loaderPackage in the graph.
|
||||
//
|
||||
// Not all accesses to Package.Pkg need to be protected by exportMu:
|
||||
// graph ordering ensures that direct dependencies of source
|
||||
@ -1214,18 +1226,18 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
defer ld.exportMu.Unlock()
|
||||
|
||||
if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
|
||||
return tpkg, nil // cache hit
|
||||
return nil // cache hit
|
||||
}
|
||||
|
||||
lpkg.IllTyped = true // fail safe
|
||||
|
||||
if lpkg.ExportFile == "" {
|
||||
// Errors while building export data will have been printed to stderr.
|
||||
return nil, fmt.Errorf("no export data file")
|
||||
return fmt.Errorf("no export data file")
|
||||
}
|
||||
f, err := os.Open(lpkg.ExportFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
@ -1237,7 +1249,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
// queries.)
|
||||
r, err := gcexportdata.NewReader(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
}
|
||||
|
||||
// Build the view.
|
||||
@ -1281,7 +1293,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
// (May modify incomplete packages in view but not create new ones.)
|
||||
tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
}
|
||||
if _, ok := view["go.shape"]; ok {
|
||||
// Account for the pseudopackage "go.shape" that gets
|
||||
@ -1294,8 +1306,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
|
||||
lpkg.Types = tpkg
|
||||
lpkg.IllTyped = false
|
||||
|
||||
return tpkg, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// impliedLoadMode returns loadMode with its dependencies.
|
||||
@ -1311,3 +1322,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
|
||||
func usesExportData(cfg *Config) bool {
|
||||
return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
|
||||
}
|
||||
|
||||
var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
|
||||
|
919
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
919
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
@ -2,9 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
|
||||
// but it also contains the original source-based importer code for Go1.6.
|
||||
// Once we stop supporting 1.6, we can remove that code.
|
||||
// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
|
||||
|
||||
// Package gcimporter provides various functions for reading
|
||||
// gc-generated object files that can be used to implement the
|
||||
@ -14,10 +12,8 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter"
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
@ -25,11 +21,8 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/scanner"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -154,37 +147,6 @@ func FindPkg(path, srcDir string) (filename, id string) {
|
||||
return
|
||||
}
|
||||
|
||||
// ImportData imports a package by reading the gc-generated export data,
|
||||
// adds the corresponding package object to the packages map indexed by id,
|
||||
// and returns the object.
|
||||
//
|
||||
// The packages map must contains all packages already imported. The data
|
||||
// reader position must be the beginning of the export data section. The
|
||||
// filename is only used in error messages.
|
||||
//
|
||||
// If packages[id] contains the completely imported package, that package
|
||||
// can be used directly, and there is no need to call this function (but
|
||||
// there is also no harm but for extra time used).
|
||||
func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) {
|
||||
// support for parser error handling
|
||||
defer func() {
|
||||
switch r := recover().(type) {
|
||||
case nil:
|
||||
// nothing to do
|
||||
case importError:
|
||||
err = r
|
||||
default:
|
||||
panic(r) // internal error
|
||||
}
|
||||
}()
|
||||
|
||||
var p parser
|
||||
p.init(filename, id, data, packages)
|
||||
pkg = p.parseExport()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Import imports a gc-generated package given its import path and srcDir, adds
|
||||
// the corresponding package object to the packages map, and returns the object.
|
||||
// The packages map must contain all packages already imported.
|
||||
@ -245,15 +207,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
||||
}
|
||||
|
||||
switch hdr {
|
||||
case "$$\n":
|
||||
// Work-around if we don't have a filename; happens only if lookup != nil.
|
||||
// Either way, the filename is only needed for importer error messages, so
|
||||
// this is fine.
|
||||
if filename == "" {
|
||||
filename = path
|
||||
}
|
||||
return ImportData(packages, filename, id, buf)
|
||||
|
||||
case "$$B\n":
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(buf)
|
||||
@ -298,319 +251,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
||||
return
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parser
|
||||
|
||||
// TODO(gri) Imported objects don't have position information.
|
||||
// Ideally use the debug table line info; alternatively
|
||||
// create some fake position (or the position of the
|
||||
// import). That way error messages referring to imported
|
||||
// objects can print meaningful information.
|
||||
|
||||
// parser parses the exports inside a gc compiler-produced
|
||||
// object/archive file and populates its scope with the results.
|
||||
type parser struct {
|
||||
scanner scanner.Scanner
|
||||
tok rune // current token
|
||||
lit string // literal string; only valid for Ident, Int, String tokens
|
||||
id string // package id of imported package
|
||||
sharedPkgs map[string]*types.Package // package id -> package object (across importer)
|
||||
localPkgs map[string]*types.Package // package id -> package object (just this package)
|
||||
}
|
||||
|
||||
func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) {
|
||||
p.scanner.Init(src)
|
||||
p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
|
||||
p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
|
||||
p.scanner.Whitespace = 1<<'\t' | 1<<' '
|
||||
p.scanner.Filename = filename // for good error messages
|
||||
p.next()
|
||||
p.id = id
|
||||
p.sharedPkgs = packages
|
||||
if debug {
|
||||
// check consistency of packages map
|
||||
for _, pkg := range packages {
|
||||
if pkg.Name() == "" {
|
||||
fmt.Printf("no package name for %s\n", pkg.Path())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) next() {
|
||||
p.tok = p.scanner.Scan()
|
||||
switch p.tok {
|
||||
case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
|
||||
p.lit = p.scanner.TokenText()
|
||||
default:
|
||||
p.lit = ""
|
||||
}
|
||||
if debug {
|
||||
fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
|
||||
}
|
||||
}
|
||||
|
||||
func declTypeName(pkg *types.Package, name string) *types.TypeName {
|
||||
scope := pkg.Scope()
|
||||
if obj := scope.Lookup(name); obj != nil {
|
||||
return obj.(*types.TypeName)
|
||||
}
|
||||
obj := types.NewTypeName(token.NoPos, pkg, name, nil)
|
||||
// a named type may be referred to before the underlying type
|
||||
// is known - set it up
|
||||
types.NewNamed(obj, nil, nil)
|
||||
scope.Insert(obj)
|
||||
return obj
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Error handling
|
||||
|
||||
// Internal errors are boxed as importErrors.
|
||||
type importError struct {
|
||||
pos scanner.Position
|
||||
err error
|
||||
}
|
||||
|
||||
func (e importError) Error() string {
|
||||
return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
|
||||
}
|
||||
|
||||
func (p *parser) error(err interface{}) {
|
||||
if s, ok := err.(string); ok {
|
||||
err = errors.New(s)
|
||||
}
|
||||
// panic with a runtime.Error if err is not an error
|
||||
panic(importError{p.scanner.Pos(), err.(error)})
|
||||
}
|
||||
|
||||
func (p *parser) errorf(format string, args ...interface{}) {
|
||||
p.error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(tok rune) string {
|
||||
lit := p.lit
|
||||
if p.tok != tok {
|
||||
p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
|
||||
}
|
||||
p.next()
|
||||
return lit
|
||||
}
|
||||
|
||||
func (p *parser) expectSpecial(tok string) {
|
||||
sep := 'x' // not white space
|
||||
i := 0
|
||||
for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
|
||||
sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
|
||||
p.next()
|
||||
i++
|
||||
}
|
||||
if i < len(tok) {
|
||||
p.errorf("expected %q, got %q", tok, tok[0:i])
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) expectKeyword(keyword string) {
|
||||
lit := p.expect(scanner.Ident)
|
||||
if lit != keyword {
|
||||
p.errorf("expected keyword %s, got %q", keyword, lit)
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Qualified and unqualified names
|
||||
|
||||
// parsePackageID parses a PackageId:
|
||||
//
|
||||
// PackageId = string_lit .
|
||||
func (p *parser) parsePackageID() string {
|
||||
id, err := strconv.Unquote(p.expect(scanner.String))
|
||||
if err != nil {
|
||||
p.error(err)
|
||||
}
|
||||
// id == "" stands for the imported package id
|
||||
// (only known at time of package installation)
|
||||
if id == "" {
|
||||
id = p.id
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// parsePackageName parse a PackageName:
|
||||
//
|
||||
// PackageName = ident .
|
||||
func (p *parser) parsePackageName() string {
|
||||
return p.expect(scanner.Ident)
|
||||
}
|
||||
|
||||
// parseDotIdent parses a dotIdentifier:
|
||||
//
|
||||
// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
|
||||
func (p *parser) parseDotIdent() string {
|
||||
ident := ""
|
||||
if p.tok != scanner.Int {
|
||||
sep := 'x' // not white space
|
||||
for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
|
||||
ident += p.lit
|
||||
sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
|
||||
p.next()
|
||||
}
|
||||
}
|
||||
if ident == "" {
|
||||
p.expect(scanner.Ident) // use expect() for error handling
|
||||
}
|
||||
return ident
|
||||
}
|
||||
|
||||
// parseQualifiedName parses a QualifiedName:
|
||||
//
|
||||
// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) .
|
||||
func (p *parser) parseQualifiedName() (id, name string) {
|
||||
p.expect('@')
|
||||
id = p.parsePackageID()
|
||||
p.expect('.')
|
||||
// Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
|
||||
if p.tok == '?' {
|
||||
p.next()
|
||||
} else {
|
||||
name = p.parseDotIdent()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getPkg returns the package for a given id. If the package is
|
||||
// not found, create the package and add it to the p.localPkgs
|
||||
// and p.sharedPkgs maps. name is the (expected) name of the
|
||||
// package. If name == "", the package name is expected to be
|
||||
// set later via an import clause in the export data.
|
||||
//
|
||||
// id identifies a package, usually by a canonical package path like
|
||||
// "encoding/json" but possibly by a non-canonical import path like
|
||||
// "./json".
|
||||
func (p *parser) getPkg(id, name string) *types.Package {
|
||||
// package unsafe is not in the packages maps - handle explicitly
|
||||
if id == "unsafe" {
|
||||
return types.Unsafe
|
||||
}
|
||||
|
||||
pkg := p.localPkgs[id]
|
||||
if pkg == nil {
|
||||
// first import of id from this package
|
||||
pkg = p.sharedPkgs[id]
|
||||
if pkg == nil {
|
||||
// first import of id by this importer;
|
||||
// add (possibly unnamed) pkg to shared packages
|
||||
pkg = types.NewPackage(id, name)
|
||||
p.sharedPkgs[id] = pkg
|
||||
}
|
||||
// add (possibly unnamed) pkg to local packages
|
||||
if p.localPkgs == nil {
|
||||
p.localPkgs = make(map[string]*types.Package)
|
||||
}
|
||||
p.localPkgs[id] = pkg
|
||||
} else if name != "" {
|
||||
// package exists already and we have an expected package name;
|
||||
// make sure names match or set package name if necessary
|
||||
if pname := pkg.Name(); pname == "" {
|
||||
pkg.SetName(name)
|
||||
} else if pname != name {
|
||||
p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name)
|
||||
}
|
||||
}
|
||||
return pkg
|
||||
}
|
||||
|
||||
// parseExportedName is like parseQualifiedName, but
|
||||
// the package id is resolved to an imported *types.Package.
|
||||
func (p *parser) parseExportedName() (pkg *types.Package, name string) {
|
||||
id, name := p.parseQualifiedName()
|
||||
pkg = p.getPkg(id, "")
|
||||
return
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Types
|
||||
|
||||
// parseBasicType parses a BasicType:
|
||||
//
|
||||
// BasicType = identifier .
|
||||
func (p *parser) parseBasicType() types.Type {
|
||||
id := p.expect(scanner.Ident)
|
||||
obj := types.Universe.Lookup(id)
|
||||
if obj, ok := obj.(*types.TypeName); ok {
|
||||
return obj.Type()
|
||||
}
|
||||
p.errorf("not a basic type: %s", id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseArrayType parses an ArrayType:
|
||||
//
|
||||
// ArrayType = "[" int_lit "]" Type .
|
||||
func (p *parser) parseArrayType(parent *types.Package) types.Type {
|
||||
// "[" already consumed and lookahead known not to be "]"
|
||||
lit := p.expect(scanner.Int)
|
||||
p.expect(']')
|
||||
elem := p.parseType(parent)
|
||||
n, err := strconv.ParseInt(lit, 10, 64)
|
||||
if err != nil {
|
||||
p.error(err)
|
||||
}
|
||||
return types.NewArray(elem, n)
|
||||
}
|
||||
|
||||
// parseMapType parses a MapType:
|
||||
//
|
||||
// MapType = "map" "[" Type "]" Type .
|
||||
func (p *parser) parseMapType(parent *types.Package) types.Type {
|
||||
p.expectKeyword("map")
|
||||
p.expect('[')
|
||||
key := p.parseType(parent)
|
||||
p.expect(']')
|
||||
elem := p.parseType(parent)
|
||||
return types.NewMap(key, elem)
|
||||
}
|
||||
|
||||
// parseName parses a Name:
|
||||
//
|
||||
// Name = identifier | "?" | QualifiedName .
|
||||
//
|
||||
// For unqualified and anonymous names, the returned package is the parent
|
||||
// package unless parent == nil, in which case the returned package is the
|
||||
// package being imported. (The parent package is not nil if the name
|
||||
// is an unqualified struct field or interface method name belonging to a
|
||||
// type declared in another package.)
|
||||
//
|
||||
// For qualified names, the returned package is nil (and not created if
|
||||
// it doesn't exist yet) unless materializePkg is set (which creates an
|
||||
// unnamed package with valid package path). In the latter case, a
|
||||
// subsequent import clause is expected to provide a name for the package.
|
||||
func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) {
|
||||
pkg = parent
|
||||
if pkg == nil {
|
||||
pkg = p.sharedPkgs[p.id]
|
||||
}
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
name = p.lit
|
||||
p.next()
|
||||
case '?':
|
||||
// anonymous
|
||||
p.next()
|
||||
case '@':
|
||||
// exported name prefixed with package path
|
||||
pkg = nil
|
||||
var id string
|
||||
id, name = p.parseQualifiedName()
|
||||
if materializePkg {
|
||||
pkg = p.getPkg(id, "")
|
||||
}
|
||||
default:
|
||||
p.error("name expected")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func deref(typ types.Type) types.Type {
|
||||
if p, _ := typ.(*types.Pointer); p != nil {
|
||||
return p.Elem()
|
||||
@ -618,563 +258,6 @@ func deref(typ types.Type) types.Type {
|
||||
return typ
|
||||
}
|
||||
|
||||
// parseField parses a Field:
|
||||
//
|
||||
// Field = Name Type [ string_lit ] .
|
||||
func (p *parser) parseField(parent *types.Package) (*types.Var, string) {
|
||||
pkg, name := p.parseName(parent, true)
|
||||
|
||||
if name == "_" {
|
||||
// Blank fields should be package-qualified because they
|
||||
// are unexported identifiers, but gc does not qualify them.
|
||||
// Assuming that the ident belongs to the current package
|
||||
// causes types to change during re-exporting, leading
|
||||
// to spurious "can't assign A to B" errors from go/types.
|
||||
// As a workaround, pretend all blank fields belong
|
||||
// to the same unique dummy package.
|
||||
const blankpkg = "<_>"
|
||||
pkg = p.getPkg(blankpkg, blankpkg)
|
||||
}
|
||||
|
||||
typ := p.parseType(parent)
|
||||
anonymous := false
|
||||
if name == "" {
|
||||
// anonymous field - typ must be T or *T and T must be a type name
|
||||
switch typ := deref(typ).(type) {
|
||||
case *types.Basic: // basic types are named types
|
||||
pkg = nil // objects defined in Universe scope have no package
|
||||
name = typ.Name()
|
||||
case *types.Named:
|
||||
name = typ.Obj().Name()
|
||||
default:
|
||||
p.errorf("anonymous field expected")
|
||||
}
|
||||
anonymous = true
|
||||
}
|
||||
tag := ""
|
||||
if p.tok == scanner.String {
|
||||
s := p.expect(scanner.String)
|
||||
var err error
|
||||
tag, err = strconv.Unquote(s)
|
||||
if err != nil {
|
||||
p.errorf("invalid struct tag %s: %s", s, err)
|
||||
}
|
||||
}
|
||||
return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag
|
||||
}
|
||||
|
||||
// parseStructType parses a StructType:
|
||||
//
|
||||
// StructType = "struct" "{" [ FieldList ] "}" .
|
||||
// FieldList = Field { ";" Field } .
|
||||
func (p *parser) parseStructType(parent *types.Package) types.Type {
|
||||
var fields []*types.Var
|
||||
var tags []string
|
||||
|
||||
p.expectKeyword("struct")
|
||||
p.expect('{')
|
||||
for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
|
||||
if i > 0 {
|
||||
p.expect(';')
|
||||
}
|
||||
fld, tag := p.parseField(parent)
|
||||
if tag != "" && tags == nil {
|
||||
tags = make([]string, i)
|
||||
}
|
||||
if tags != nil {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
fields = append(fields, fld)
|
||||
}
|
||||
p.expect('}')
|
||||
|
||||
return types.NewStruct(fields, tags)
|
||||
}
|
||||
|
||||
// parseParameter parses a Parameter:
|
||||
//
|
||||
// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
|
||||
func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
|
||||
_, name := p.parseName(nil, false)
|
||||
// remove gc-specific parameter numbering
|
||||
if i := strings.Index(name, "·"); i >= 0 {
|
||||
name = name[:i]
|
||||
}
|
||||
if p.tok == '.' {
|
||||
p.expectSpecial("...")
|
||||
isVariadic = true
|
||||
}
|
||||
typ := p.parseType(nil)
|
||||
if isVariadic {
|
||||
typ = types.NewSlice(typ)
|
||||
}
|
||||
// ignore argument tag (e.g. "noescape")
|
||||
if p.tok == scanner.String {
|
||||
p.next()
|
||||
}
|
||||
// TODO(gri) should we provide a package?
|
||||
par = types.NewVar(token.NoPos, nil, name, typ)
|
||||
return
|
||||
}
|
||||
|
||||
// parseParameters parses a Parameters:
|
||||
//
|
||||
// Parameters = "(" [ ParameterList ] ")" .
|
||||
// ParameterList = { Parameter "," } Parameter .
|
||||
func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) {
|
||||
p.expect('(')
|
||||
for p.tok != ')' && p.tok != scanner.EOF {
|
||||
if len(list) > 0 {
|
||||
p.expect(',')
|
||||
}
|
||||
par, variadic := p.parseParameter()
|
||||
list = append(list, par)
|
||||
if variadic {
|
||||
if isVariadic {
|
||||
p.error("... not on final argument")
|
||||
}
|
||||
isVariadic = true
|
||||
}
|
||||
}
|
||||
p.expect(')')
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseSignature parses a Signature:
|
||||
//
|
||||
// Signature = Parameters [ Result ] .
|
||||
// Result = Type | Parameters .
|
||||
func (p *parser) parseSignature(recv *types.Var) *types.Signature {
|
||||
params, isVariadic := p.parseParameters()
|
||||
|
||||
// optional result type
|
||||
var results []*types.Var
|
||||
if p.tok == '(' {
|
||||
var variadic bool
|
||||
results, variadic = p.parseParameters()
|
||||
if variadic {
|
||||
p.error("... not permitted on result type")
|
||||
}
|
||||
}
|
||||
|
||||
return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic)
|
||||
}
|
||||
|
||||
// parseInterfaceType parses an InterfaceType:
|
||||
//
|
||||
// InterfaceType = "interface" "{" [ MethodList ] "}" .
|
||||
// MethodList = Method { ";" Method } .
|
||||
// Method = Name Signature .
|
||||
//
|
||||
// The methods of embedded interfaces are always "inlined"
|
||||
// by the compiler and thus embedded interfaces are never
|
||||
// visible in the export data.
|
||||
func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
|
||||
var methods []*types.Func
|
||||
|
||||
p.expectKeyword("interface")
|
||||
p.expect('{')
|
||||
for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
|
||||
if i > 0 {
|
||||
p.expect(';')
|
||||
}
|
||||
pkg, name := p.parseName(parent, true)
|
||||
sig := p.parseSignature(nil)
|
||||
methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
|
||||
}
|
||||
p.expect('}')
|
||||
|
||||
// Complete requires the type's embedded interfaces to be fully defined,
|
||||
// but we do not define any
|
||||
return newInterface(methods, nil).Complete()
|
||||
}
|
||||
|
||||
// parseChanType parses a ChanType:
|
||||
//
|
||||
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
|
||||
func (p *parser) parseChanType(parent *types.Package) types.Type {
|
||||
dir := types.SendRecv
|
||||
if p.tok == scanner.Ident {
|
||||
p.expectKeyword("chan")
|
||||
if p.tok == '<' {
|
||||
p.expectSpecial("<-")
|
||||
dir = types.SendOnly
|
||||
}
|
||||
} else {
|
||||
p.expectSpecial("<-")
|
||||
p.expectKeyword("chan")
|
||||
dir = types.RecvOnly
|
||||
}
|
||||
elem := p.parseType(parent)
|
||||
return types.NewChan(dir, elem)
|
||||
}
|
||||
|
||||
// parseType parses a Type:
|
||||
//
|
||||
// Type =
|
||||
// BasicType | TypeName | ArrayType | SliceType | StructType |
|
||||
// PointerType | FuncType | InterfaceType | MapType | ChanType |
|
||||
// "(" Type ")" .
|
||||
//
|
||||
// BasicType = ident .
|
||||
// TypeName = ExportedName .
|
||||
// SliceType = "[" "]" Type .
|
||||
// PointerType = "*" Type .
|
||||
// FuncType = "func" Signature .
|
||||
func (p *parser) parseType(parent *types.Package) types.Type {
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
switch p.lit {
|
||||
default:
|
||||
return p.parseBasicType()
|
||||
case "struct":
|
||||
return p.parseStructType(parent)
|
||||
case "func":
|
||||
// FuncType
|
||||
p.next()
|
||||
return p.parseSignature(nil)
|
||||
case "interface":
|
||||
return p.parseInterfaceType(parent)
|
||||
case "map":
|
||||
return p.parseMapType(parent)
|
||||
case "chan":
|
||||
return p.parseChanType(parent)
|
||||
}
|
||||
case '@':
|
||||
// TypeName
|
||||
pkg, name := p.parseExportedName()
|
||||
return declTypeName(pkg, name).Type()
|
||||
case '[':
|
||||
p.next() // look ahead
|
||||
if p.tok == ']' {
|
||||
// SliceType
|
||||
p.next()
|
||||
return types.NewSlice(p.parseType(parent))
|
||||
}
|
||||
return p.parseArrayType(parent)
|
||||
case '*':
|
||||
// PointerType
|
||||
p.next()
|
||||
return types.NewPointer(p.parseType(parent))
|
||||
case '<':
|
||||
return p.parseChanType(parent)
|
||||
case '(':
|
||||
// "(" Type ")"
|
||||
p.next()
|
||||
typ := p.parseType(parent)
|
||||
p.expect(')')
|
||||
return typ
|
||||
}
|
||||
p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Declarations
|
||||
|
||||
// parseImportDecl parses an ImportDecl:
|
||||
//
|
||||
// ImportDecl = "import" PackageName PackageId .
|
||||
func (p *parser) parseImportDecl() {
|
||||
p.expectKeyword("import")
|
||||
name := p.parsePackageName()
|
||||
p.getPkg(p.parsePackageID(), name)
|
||||
}
|
||||
|
||||
// parseInt parses an int_lit:
|
||||
//
|
||||
// int_lit = [ "+" | "-" ] { "0" ... "9" } .
|
||||
func (p *parser) parseInt() string {
|
||||
s := ""
|
||||
switch p.tok {
|
||||
case '-':
|
||||
s = "-"
|
||||
p.next()
|
||||
case '+':
|
||||
p.next()
|
||||
}
|
||||
return s + p.expect(scanner.Int)
|
||||
}
|
||||
|
||||
// parseNumber parses a number:
|
||||
//
|
||||
// number = int_lit [ "p" int_lit ] .
|
||||
func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) {
|
||||
// mantissa
|
||||
mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0)
|
||||
if mant == nil {
|
||||
panic("invalid mantissa")
|
||||
}
|
||||
|
||||
if p.lit == "p" {
|
||||
// exponent (base 2)
|
||||
p.next()
|
||||
exp, err := strconv.ParseInt(p.parseInt(), 10, 0)
|
||||
if err != nil {
|
||||
p.error(err)
|
||||
}
|
||||
if exp < 0 {
|
||||
denom := constant.MakeInt64(1)
|
||||
denom = constant.Shift(denom, token.SHL, uint(-exp))
|
||||
typ = types.Typ[types.UntypedFloat]
|
||||
val = constant.BinaryOp(mant, token.QUO, denom)
|
||||
return
|
||||
}
|
||||
if exp > 0 {
|
||||
mant = constant.Shift(mant, token.SHL, uint(exp))
|
||||
}
|
||||
typ = types.Typ[types.UntypedFloat]
|
||||
val = mant
|
||||
return
|
||||
}
|
||||
|
||||
typ = types.Typ[types.UntypedInt]
|
||||
val = mant
|
||||
return
|
||||
}
|
||||
|
||||
// parseConstDecl parses a ConstDecl:
|
||||
//
|
||||
// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
|
||||
// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
|
||||
// bool_lit = "true" | "false" .
|
||||
// complex_lit = "(" float_lit "+" float_lit "i" ")" .
|
||||
// rune_lit = "(" int_lit "+" int_lit ")" .
|
||||
// string_lit = `"` { unicode_char } `"` .
|
||||
func (p *parser) parseConstDecl() {
|
||||
p.expectKeyword("const")
|
||||
pkg, name := p.parseExportedName()
|
||||
|
||||
var typ0 types.Type
|
||||
if p.tok != '=' {
|
||||
// constant types are never structured - no need for parent type
|
||||
typ0 = p.parseType(nil)
|
||||
}
|
||||
|
||||
p.expect('=')
|
||||
var typ types.Type
|
||||
var val constant.Value
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
// bool_lit
|
||||
if p.lit != "true" && p.lit != "false" {
|
||||
p.error("expected true or false")
|
||||
}
|
||||
typ = types.Typ[types.UntypedBool]
|
||||
val = constant.MakeBool(p.lit == "true")
|
||||
p.next()
|
||||
|
||||
case '-', scanner.Int:
|
||||
// int_lit
|
||||
typ, val = p.parseNumber()
|
||||
|
||||
case '(':
|
||||
// complex_lit or rune_lit
|
||||
p.next()
|
||||
if p.tok == scanner.Char {
|
||||
p.next()
|
||||
p.expect('+')
|
||||
typ = types.Typ[types.UntypedRune]
|
||||
_, val = p.parseNumber()
|
||||
p.expect(')')
|
||||
break
|
||||
}
|
||||
_, re := p.parseNumber()
|
||||
p.expect('+')
|
||||
_, im := p.parseNumber()
|
||||
p.expectKeyword("i")
|
||||
p.expect(')')
|
||||
typ = types.Typ[types.UntypedComplex]
|
||||
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
|
||||
|
||||
case scanner.Char:
|
||||
// rune_lit
|
||||
typ = types.Typ[types.UntypedRune]
|
||||
val = constant.MakeFromLiteral(p.lit, token.CHAR, 0)
|
||||
p.next()
|
||||
|
||||
case scanner.String:
|
||||
// string_lit
|
||||
typ = types.Typ[types.UntypedString]
|
||||
val = constant.MakeFromLiteral(p.lit, token.STRING, 0)
|
||||
p.next()
|
||||
|
||||
default:
|
||||
p.errorf("expected literal got %s", scanner.TokenString(p.tok))
|
||||
}
|
||||
|
||||
if typ0 == nil {
|
||||
typ0 = typ
|
||||
}
|
||||
|
||||
pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val))
|
||||
}
|
||||
|
||||
// parseTypeDecl parses a TypeDecl:
|
||||
//
|
||||
// TypeDecl = "type" ExportedName Type .
|
||||
func (p *parser) parseTypeDecl() {
|
||||
p.expectKeyword("type")
|
||||
pkg, name := p.parseExportedName()
|
||||
obj := declTypeName(pkg, name)
|
||||
|
||||
// The type object may have been imported before and thus already
|
||||
// have a type associated with it. We still need to parse the type
|
||||
// structure, but throw it away if the object already has a type.
|
||||
// This ensures that all imports refer to the same type object for
|
||||
// a given type declaration.
|
||||
typ := p.parseType(pkg)
|
||||
|
||||
if name := obj.Type().(*types.Named); name.Underlying() == nil {
|
||||
name.SetUnderlying(typ)
|
||||
}
|
||||
}
|
||||
|
||||
// parseVarDecl parses a VarDecl:
|
||||
//
|
||||
// VarDecl = "var" ExportedName Type .
|
||||
func (p *parser) parseVarDecl() {
|
||||
p.expectKeyword("var")
|
||||
pkg, name := p.parseExportedName()
|
||||
typ := p.parseType(pkg)
|
||||
pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
|
||||
}
|
||||
|
||||
// parseFunc parses a Func:
|
||||
//
|
||||
// Func = Signature [ Body ] .
|
||||
// Body = "{" ... "}" .
|
||||
func (p *parser) parseFunc(recv *types.Var) *types.Signature {
|
||||
sig := p.parseSignature(recv)
|
||||
if p.tok == '{' {
|
||||
p.next()
|
||||
for i := 1; i > 0; p.next() {
|
||||
switch p.tok {
|
||||
case '{':
|
||||
i++
|
||||
case '}':
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
return sig
|
||||
}
|
||||
|
||||
// parseMethodDecl parses a MethodDecl:
|
||||
//
|
||||
// MethodDecl = "func" Receiver Name Func .
|
||||
// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
|
||||
func (p *parser) parseMethodDecl() {
|
||||
// "func" already consumed
|
||||
p.expect('(')
|
||||
recv, _ := p.parseParameter() // receiver
|
||||
p.expect(')')
|
||||
|
||||
// determine receiver base type object
|
||||
base := deref(recv.Type()).(*types.Named)
|
||||
|
||||
// parse method name, signature, and possibly inlined body
|
||||
_, name := p.parseName(nil, false)
|
||||
sig := p.parseFunc(recv)
|
||||
|
||||
// methods always belong to the same package as the base type object
|
||||
pkg := base.Obj().Pkg()
|
||||
|
||||
// add method to type unless type was imported before
|
||||
// and method exists already
|
||||
// TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small.
|
||||
base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig))
|
||||
}
|
||||
|
||||
// parseFuncDecl parses a FuncDecl:
|
||||
//
|
||||
// FuncDecl = "func" ExportedName Func .
|
||||
func (p *parser) parseFuncDecl() {
|
||||
// "func" already consumed
|
||||
pkg, name := p.parseExportedName()
|
||||
typ := p.parseFunc(nil)
|
||||
pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ))
|
||||
}
|
||||
|
||||
// parseDecl parses a Decl:
|
||||
//
|
||||
// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
|
||||
func (p *parser) parseDecl() {
|
||||
if p.tok == scanner.Ident {
|
||||
switch p.lit {
|
||||
case "import":
|
||||
p.parseImportDecl()
|
||||
case "const":
|
||||
p.parseConstDecl()
|
||||
case "type":
|
||||
p.parseTypeDecl()
|
||||
case "var":
|
||||
p.parseVarDecl()
|
||||
case "func":
|
||||
p.next() // look ahead
|
||||
if p.tok == '(' {
|
||||
p.parseMethodDecl()
|
||||
} else {
|
||||
p.parseFuncDecl()
|
||||
}
|
||||
}
|
||||
}
|
||||
p.expect('\n')
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Export
|
||||
|
||||
// parseExport parses an Export:
|
||||
//
|
||||
// Export = "PackageClause { Decl } "$$" .
|
||||
// PackageClause = "package" PackageName [ "safe" ] "\n" .
|
||||
func (p *parser) parseExport() *types.Package {
|
||||
p.expectKeyword("package")
|
||||
name := p.parsePackageName()
|
||||
if p.tok == scanner.Ident && p.lit == "safe" {
|
||||
// package was compiled with -u option - ignore
|
||||
p.next()
|
||||
}
|
||||
p.expect('\n')
|
||||
|
||||
pkg := p.getPkg(p.id, name)
|
||||
|
||||
for p.tok != '$' && p.tok != scanner.EOF {
|
||||
p.parseDecl()
|
||||
}
|
||||
|
||||
if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
|
||||
// don't call next()/expect() since reading past the
|
||||
// export data may cause scanner errors (e.g. NUL chars)
|
||||
p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
|
||||
}
|
||||
|
||||
if n := p.scanner.ErrorCount; n != 0 {
|
||||
p.errorf("expected no scanner errors, got %d", n)
|
||||
}
|
||||
|
||||
// Record all locally referenced packages as imports.
|
||||
var imports []*types.Package
|
||||
for id, pkg2 := range p.localPkgs {
|
||||
if pkg2.Name() == "" {
|
||||
p.errorf("%s package has no name", id)
|
||||
}
|
||||
if id == p.id {
|
||||
continue // avoid self-edge
|
||||
}
|
||||
imports = append(imports, pkg2)
|
||||
}
|
||||
sort.Sort(byPath(imports))
|
||||
pkg.SetImports(imports)
|
||||
|
||||
// package was imported completely and without errors
|
||||
pkg.MarkComplete()
|
||||
|
||||
return pkg
|
||||
}
|
||||
|
||||
type byPath []*types.Package
|
||||
|
||||
func (a byPath) Len() int { return len(a) }
|
||||
|
126
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
126
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/tokeninternal"
|
||||
"golang.org/x/tools/internal/typeparams"
|
||||
)
|
||||
|
||||
@ -138,6 +139,17 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, ver
|
||||
p.doDecl(p.declTodo.popHead())
|
||||
}
|
||||
|
||||
// Produce index of offset of each file record in files.
|
||||
var files intWriter
|
||||
var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i
|
||||
if p.shallow {
|
||||
fileOffset = make([]uint64, len(p.fileInfos))
|
||||
for i, info := range p.fileInfos {
|
||||
fileOffset[i] = uint64(files.Len())
|
||||
p.encodeFile(&files, info.file, info.needed)
|
||||
}
|
||||
}
|
||||
|
||||
// Append indices to data0 section.
|
||||
dataLen := uint64(p.data0.Len())
|
||||
w := p.newWriter()
|
||||
@ -163,16 +175,75 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, ver
|
||||
}
|
||||
hdr.uint64(uint64(p.version))
|
||||
hdr.uint64(uint64(p.strings.Len()))
|
||||
if p.shallow {
|
||||
hdr.uint64(uint64(files.Len()))
|
||||
hdr.uint64(uint64(len(fileOffset)))
|
||||
for _, offset := range fileOffset {
|
||||
hdr.uint64(offset)
|
||||
}
|
||||
}
|
||||
hdr.uint64(dataLen)
|
||||
|
||||
// Flush output.
|
||||
io.Copy(out, &hdr)
|
||||
io.Copy(out, &p.strings)
|
||||
if p.shallow {
|
||||
io.Copy(out, &files)
|
||||
}
|
||||
io.Copy(out, &p.data0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeFile writes to w a representation of the file sufficient to
|
||||
// faithfully restore position information about all needed offsets.
|
||||
// Mutates the needed array.
|
||||
func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) {
|
||||
_ = needed[0] // precondition: needed is non-empty
|
||||
|
||||
w.uint64(p.stringOff(file.Name()))
|
||||
|
||||
size := uint64(file.Size())
|
||||
w.uint64(size)
|
||||
|
||||
// Sort the set of needed offsets. Duplicates are harmless.
|
||||
sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
|
||||
|
||||
lines := tokeninternal.GetLines(file) // byte offset of each line start
|
||||
w.uint64(uint64(len(lines)))
|
||||
|
||||
// Rather than record the entire array of line start offsets,
|
||||
// we save only a sparse list of (index, offset) pairs for
|
||||
// the start of each line that contains a needed position.
|
||||
var sparse [][2]int // (index, offset) pairs
|
||||
outer:
|
||||
for i, lineStart := range lines {
|
||||
lineEnd := size
|
||||
if i < len(lines)-1 {
|
||||
lineEnd = uint64(lines[i+1])
|
||||
}
|
||||
// Does this line contains a needed offset?
|
||||
if needed[0] < lineEnd {
|
||||
sparse = append(sparse, [2]int{i, lineStart})
|
||||
for needed[0] < lineEnd {
|
||||
needed = needed[1:]
|
||||
if len(needed) == 0 {
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delta-encode the columns.
|
||||
w.uint64(uint64(len(sparse)))
|
||||
var prev [2]int
|
||||
for _, pair := range sparse {
|
||||
w.uint64(uint64(pair[0] - prev[0]))
|
||||
w.uint64(uint64(pair[1] - prev[1]))
|
||||
prev = pair
|
||||
}
|
||||
}
|
||||
|
||||
// writeIndex writes out an object index. mainIndex indicates whether
|
||||
// we're writing out the main index, which is also read by
|
||||
// non-compiler tools and includes a complete package description
|
||||
@ -255,6 +326,12 @@ type iexporter struct {
|
||||
strings intWriter
|
||||
stringIndex map[string]uint64
|
||||
|
||||
// In shallow mode, object positions are encoded as (file, offset).
|
||||
// Each file is recorded as a line-number table.
|
||||
// Only the lines of needed positions are saved faithfully.
|
||||
fileInfo map[*token.File]uint64 // value is index in fileInfos
|
||||
fileInfos []*filePositions
|
||||
|
||||
data0 intWriter
|
||||
declIndex map[types.Object]uint64
|
||||
tparamNames map[types.Object]string // typeparam->exported name
|
||||
@ -263,6 +340,11 @@ type iexporter struct {
|
||||
indent int // for tracing support
|
||||
}
|
||||
|
||||
type filePositions struct {
|
||||
file *token.File
|
||||
needed []uint64 // unordered list of needed file offsets
|
||||
}
|
||||
|
||||
func (p *iexporter) trace(format string, args ...interface{}) {
|
||||
if !trace {
|
||||
// Call sites should also be guarded, but having this check here allows
|
||||
@ -286,6 +368,25 @@ func (p *iexporter) stringOff(s string) uint64 {
|
||||
return off
|
||||
}
|
||||
|
||||
// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
|
||||
func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) {
|
||||
index, ok := p.fileInfo[file]
|
||||
if !ok {
|
||||
index = uint64(len(p.fileInfo))
|
||||
p.fileInfos = append(p.fileInfos, &filePositions{file: file})
|
||||
if p.fileInfo == nil {
|
||||
p.fileInfo = make(map[*token.File]uint64)
|
||||
}
|
||||
p.fileInfo[file] = index
|
||||
}
|
||||
// Record each needed offset.
|
||||
info := p.fileInfos[index]
|
||||
offset := uint64(file.Offset(pos))
|
||||
info.needed = append(info.needed, offset)
|
||||
|
||||
return index, offset
|
||||
}
|
||||
|
||||
// pushDecl adds n to the declaration work queue, if not already present.
|
||||
func (p *iexporter) pushDecl(obj types.Object) {
|
||||
// Package unsafe is known to the compiler and predeclared.
|
||||
@ -346,8 +447,14 @@ func (p *iexporter) doDecl(obj types.Object) {
|
||||
case *types.Func:
|
||||
sig, _ := obj.Type().(*types.Signature)
|
||||
if sig.Recv() != nil {
|
||||
// We shouldn't see methods in the package scope,
|
||||
// but the type checker may repair "func () F() {}"
|
||||
// to "func (Invalid) F()" and then treat it like "func F()",
|
||||
// so allow that. See golang/go#57729.
|
||||
if sig.Recv().Type() != types.Typ[types.Invalid] {
|
||||
panic(internalErrorf("unexpected method: %v", sig))
|
||||
}
|
||||
}
|
||||
|
||||
// Function.
|
||||
if typeparams.ForSignature(sig).Len() == 0 {
|
||||
@ -458,13 +565,30 @@ func (w *exportWriter) tag(tag byte) {
|
||||
}
|
||||
|
||||
func (w *exportWriter) pos(pos token.Pos) {
|
||||
if w.p.version >= iexportVersionPosCol {
|
||||
if w.p.shallow {
|
||||
w.posV2(pos)
|
||||
} else if w.p.version >= iexportVersionPosCol {
|
||||
w.posV1(pos)
|
||||
} else {
|
||||
w.posV0(pos)
|
||||
}
|
||||
}
|
||||
|
||||
// posV2 encoding (used only in shallow mode) records positions as
|
||||
// (file, offset), where file is the index in the token.File table
|
||||
// (which records the file name and newline offsets) and offset is a
|
||||
// byte offset. It effectively ignores //line directives.
|
||||
func (w *exportWriter) posV2(pos token.Pos) {
|
||||
if pos == token.NoPos {
|
||||
w.uint64(0)
|
||||
return
|
||||
}
|
||||
file := w.p.fset.File(pos) // fset must be non-nil
|
||||
index, offset := w.p.fileIndexAndOffset(file, pos)
|
||||
w.uint64(1 + index)
|
||||
w.uint64(offset)
|
||||
}
|
||||
|
||||
func (w *exportWriter) posV1(pos token.Pos) {
|
||||
if w.p.fset == nil {
|
||||
w.int64(0)
|
||||
|
82
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
82
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
@ -137,12 +137,23 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
|
||||
}
|
||||
|
||||
sLen := int64(r.uint64())
|
||||
var fLen int64
|
||||
var fileOffset []uint64
|
||||
if insert != nil {
|
||||
// Shallow mode uses a different position encoding.
|
||||
fLen = int64(r.uint64())
|
||||
fileOffset = make([]uint64, r.uint64())
|
||||
for i := range fileOffset {
|
||||
fileOffset[i] = r.uint64()
|
||||
}
|
||||
}
|
||||
dLen := int64(r.uint64())
|
||||
|
||||
whence, _ := r.Seek(0, io.SeekCurrent)
|
||||
stringData := data[whence : whence+sLen]
|
||||
declData := data[whence+sLen : whence+sLen+dLen]
|
||||
r.Seek(sLen+dLen, io.SeekCurrent)
|
||||
fileData := data[whence+sLen : whence+sLen+fLen]
|
||||
declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen]
|
||||
r.Seek(sLen+fLen+dLen, io.SeekCurrent)
|
||||
|
||||
p := iimporter{
|
||||
version: int(version),
|
||||
@ -151,6 +162,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
|
||||
|
||||
stringData: stringData,
|
||||
stringCache: make(map[uint64]string),
|
||||
fileOffset: fileOffset,
|
||||
fileData: fileData,
|
||||
fileCache: make([]*token.File, len(fileOffset)),
|
||||
pkgCache: make(map[uint64]*types.Package),
|
||||
|
||||
declData: declData,
|
||||
@ -280,6 +294,9 @@ type iimporter struct {
|
||||
|
||||
stringData []byte
|
||||
stringCache map[uint64]string
|
||||
fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i
|
||||
fileData []byte
|
||||
fileCache []*token.File // memoized decoding of file encoded as i
|
||||
pkgCache map[uint64]*types.Package
|
||||
|
||||
declData []byte
|
||||
@ -352,6 +369,55 @@ func (p *iimporter) stringAt(off uint64) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *iimporter) fileAt(index uint64) *token.File {
|
||||
file := p.fileCache[index]
|
||||
if file == nil {
|
||||
off := p.fileOffset[index]
|
||||
file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath})
|
||||
p.fileCache[index] = file
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
func (p *iimporter) decodeFile(rd intReader) *token.File {
|
||||
filename := p.stringAt(rd.uint64())
|
||||
size := int(rd.uint64())
|
||||
file := p.fake.fset.AddFile(filename, -1, size)
|
||||
|
||||
// SetLines requires a nondecreasing sequence.
|
||||
// Because it is common for clients to derive the interval
|
||||
// [start, start+len(name)] from a start position, and we
|
||||
// want to ensure that the end offset is on the same line,
|
||||
// we fill in the gaps of the sparse encoding with values
|
||||
// that strictly increase by the largest possible amount.
|
||||
// This allows us to avoid having to record the actual end
|
||||
// offset of each needed line.
|
||||
|
||||
lines := make([]int, int(rd.uint64()))
|
||||
var index, offset int
|
||||
for i, n := 0, int(rd.uint64()); i < n; i++ {
|
||||
index += int(rd.uint64())
|
||||
offset += int(rd.uint64())
|
||||
lines[index] = offset
|
||||
|
||||
// Ensure monotonicity between points.
|
||||
for j := index - 1; j > 0 && lines[j] == 0; j-- {
|
||||
lines[j] = lines[j+1] - 1
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure monotonicity after last point.
|
||||
for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- {
|
||||
size--
|
||||
lines[j] = size
|
||||
}
|
||||
|
||||
if !file.SetLines(lines) {
|
||||
errorf("SetLines failed: %d", lines) // can't happen
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
func (p *iimporter) pkgAt(off uint64) *types.Package {
|
||||
if pkg, ok := p.pkgCache[off]; ok {
|
||||
return pkg
|
||||
@ -645,6 +711,9 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) {
|
||||
}
|
||||
|
||||
func (r *importReader) pos() token.Pos {
|
||||
if r.p.insert != nil { // shallow mode
|
||||
return r.posv2()
|
||||
}
|
||||
if r.p.version >= iexportVersionPosCol {
|
||||
r.posv1()
|
||||
} else {
|
||||
@ -681,6 +750,15 @@ func (r *importReader) posv1() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *importReader) posv2() token.Pos {
|
||||
file := r.uint64()
|
||||
if file == 0 {
|
||||
return token.NoPos
|
||||
}
|
||||
tf := r.p.fileAt(file - 1)
|
||||
return tf.Pos(int(r.uint64()))
|
||||
}
|
||||
|
||||
func (r *importReader) typ() types.Type {
|
||||
return r.p.typAt(r.uint64(), nil)
|
||||
}
|
||||
|
37
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
37
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
@ -559,18 +559,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
|
||||
|
||||
named.SetTypeParams(r.typeParamNames())
|
||||
|
||||
rhs := r.typ()
|
||||
pk := r.p
|
||||
pk.laterFor(named, func() {
|
||||
// First be sure that the rhs is initialized, if it needs to be initialized.
|
||||
delete(pk.laterFors, named) // prevent cycles
|
||||
if i, ok := pk.laterFors[rhs]; ok {
|
||||
f := pk.laterFns[i]
|
||||
pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
|
||||
f() // initialize RHS
|
||||
}
|
||||
underlying := rhs.Underlying()
|
||||
|
||||
setUnderlying := func(underlying types.Type) {
|
||||
// If the underlying type is an interface, we need to
|
||||
// duplicate its methods so we can replace the receiver
|
||||
// parameter's type (#49906).
|
||||
@ -595,7 +584,31 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
|
||||
}
|
||||
|
||||
named.SetUnderlying(underlying)
|
||||
}
|
||||
|
||||
// Since go.dev/cl/455279, we can assume rhs.Underlying() will
|
||||
// always be non-nil. However, to temporarily support users of
|
||||
// older snapshot releases, we continue to fallback to the old
|
||||
// behavior for now.
|
||||
//
|
||||
// TODO(mdempsky): Remove fallback code and simplify after
|
||||
// allowing time for snapshot users to upgrade.
|
||||
rhs := r.typ()
|
||||
if underlying := rhs.Underlying(); underlying != nil {
|
||||
setUnderlying(underlying)
|
||||
} else {
|
||||
pk := r.p
|
||||
pk.laterFor(named, func() {
|
||||
// First be sure that the rhs is initialized, if it needs to be initialized.
|
||||
delete(pk.laterFors, named) // prevent cycles
|
||||
if i, ok := pk.laterFors[rhs]; ok {
|
||||
f := pk.laterFns[i]
|
||||
pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
|
||||
f() // initialize RHS
|
||||
}
|
||||
setUnderlying(rhs.Underlying())
|
||||
})
|
||||
}
|
||||
|
||||
for i, n := 0, r.Len(); i < n; i++ {
|
||||
named.AddMethod(r.method())
|
||||
|
16
vendor/golang.org/x/tools/internal/gocommand/version.go
generated
vendored
16
vendor/golang.org/x/tools/internal/gocommand/version.go
generated
vendored
@ -58,22 +58,24 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
|
||||
return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
|
||||
}
|
||||
|
||||
// GoVersionString reports the go version string as shown in `go version` command output.
|
||||
// When `go version` outputs in non-standard form, this returns an empty string.
|
||||
func GoVersionString(ctx context.Context, inv Invocation, r *Runner) (string, error) {
|
||||
// GoVersionOutput returns the complete output of the go version command.
|
||||
func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) {
|
||||
inv.Verb = "version"
|
||||
goVersion, err := r.Run(ctx, inv)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return parseGoVersionOutput(goVersion.Bytes()), nil
|
||||
return goVersion.String(), nil
|
||||
}
|
||||
|
||||
func parseGoVersionOutput(data []byte) string {
|
||||
// ParseGoVersionOutput extracts the Go version string
|
||||
// from the output of the "go version" command.
|
||||
// Given an unrecognized form, it returns an empty string.
|
||||
func ParseGoVersionOutput(data string) string {
|
||||
re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
|
||||
m := re.FindSubmatch(data)
|
||||
m := re.FindStringSubmatch(data)
|
||||
if len(m) != 2 {
|
||||
return "" // unrecognized version
|
||||
}
|
||||
return string(m[1])
|
||||
return m[1]
|
||||
}
|
||||
|
2
vendor/golang.org/x/tools/internal/pkgbits/decoder.go
generated
vendored
2
vendor/golang.org/x/tools/internal/pkgbits/decoder.go
generated
vendored
@ -373,7 +373,7 @@ func (r *Decoder) Int64() int64 {
|
||||
return r.rawVarint()
|
||||
}
|
||||
|
||||
// Int64 decodes and returns a uint64 value from the element bitstream.
|
||||
// Uint64 decodes and returns a uint64 value from the element bitstream.
|
||||
func (r *Decoder) Uint64() uint64 {
|
||||
r.Sync(SyncUint64)
|
||||
return r.rawUvarint()
|
||||
|
2
vendor/golang.org/x/tools/internal/pkgbits/encoder.go
generated
vendored
2
vendor/golang.org/x/tools/internal/pkgbits/encoder.go
generated
vendored
@ -293,7 +293,7 @@ func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
|
||||
// Int encodes and writes an int value into the element bitstream.
|
||||
func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
|
||||
|
||||
// Len encodes and writes a uint value into the element bitstream.
|
||||
// Uint encodes and writes a uint value into the element bitstream.
|
||||
func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
|
||||
|
||||
// Reloc encodes and writes a relocation for the given (section,
|
||||
|
59
vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
generated
vendored
Normal file
59
vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// package tokeninternal provides access to some internal features of the token
|
||||
// package.
|
||||
package tokeninternal
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// GetLines returns the table of line-start offsets from a token.File.
|
||||
func GetLines(file *token.File) []int {
|
||||
// token.File has a Lines method on Go 1.21 and later.
|
||||
if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
|
||||
return file.Lines()
|
||||
}
|
||||
|
||||
// This declaration must match that of token.File.
|
||||
// This creates a risk of dependency skew.
|
||||
// For now we check that the size of the two
|
||||
// declarations is the same, on the (fragile) assumption
|
||||
// that future changes would add fields.
|
||||
type tokenFile119 struct {
|
||||
_ string
|
||||
_ int
|
||||
_ int
|
||||
mu sync.Mutex // we're not complete monsters
|
||||
lines []int
|
||||
_ []struct{}
|
||||
}
|
||||
type tokenFile118 struct {
|
||||
_ *token.FileSet // deleted in go1.19
|
||||
tokenFile119
|
||||
}
|
||||
|
||||
type uP = unsafe.Pointer
|
||||
switch unsafe.Sizeof(*file) {
|
||||
case unsafe.Sizeof(tokenFile118{}):
|
||||
var ptr *tokenFile118
|
||||
*(*uP)(uP(&ptr)) = uP(file)
|
||||
ptr.mu.Lock()
|
||||
defer ptr.mu.Unlock()
|
||||
return ptr.lines
|
||||
|
||||
case unsafe.Sizeof(tokenFile119{}):
|
||||
var ptr *tokenFile119
|
||||
*(*uP)(uP(&ptr)) = uP(file)
|
||||
ptr.mu.Lock()
|
||||
defer ptr.mu.Unlock()
|
||||
return ptr.lines
|
||||
|
||||
default:
|
||||
panic("unexpected token.File size")
|
||||
}
|
||||
}
|
9
vendor/modules.txt
vendored
9
vendor/modules.txt
vendored
@ -601,11 +601,11 @@ golang.org/x/crypto/sha3
|
||||
golang.org/x/exp/constraints
|
||||
golang.org/x/exp/maps
|
||||
golang.org/x/exp/slices
|
||||
# golang.org/x/mod v0.7.0
|
||||
# golang.org/x/mod v0.8.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/mod/semver
|
||||
golang.org/x/mod/sumdb/note
|
||||
# golang.org/x/net v0.6.0
|
||||
# golang.org/x/net v0.8.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/http/httpguts
|
||||
@ -635,7 +635,7 @@ golang.org/x/sys/windows
|
||||
# golang.org/x/term v0.6.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/term
|
||||
# golang.org/x/text v0.7.0
|
||||
# golang.org/x/text v0.8.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/text/internal/language
|
||||
golang.org/x/text/internal/language/compact
|
||||
@ -645,7 +645,7 @@ golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
# golang.org/x/tools v0.4.0
|
||||
# golang.org/x/tools v0.6.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/tools/cmd/stringer
|
||||
golang.org/x/tools/go/gcexportdata
|
||||
@ -659,6 +659,7 @@ golang.org/x/tools/internal/gcimporter
|
||||
golang.org/x/tools/internal/gocommand
|
||||
golang.org/x/tools/internal/packagesinternal
|
||||
golang.org/x/tools/internal/pkgbits
|
||||
golang.org/x/tools/internal/tokeninternal
|
||||
golang.org/x/tools/internal/typeparams
|
||||
golang.org/x/tools/internal/typesinternal
|
||||
# google.golang.org/appengine v1.6.7
|
||||
|
Loading…
Reference in New Issue
Block a user