mirror of
https://github.com/containers/skopeo.git
synced 2025-08-02 07:17:46 +00:00
Update module github.com/containers/common to v0.54.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
parent
7f5b970fb1
commit
bfa04ea246
9
go.mod
9
go.mod
@ -3,7 +3,7 @@ module github.com/containers/skopeo
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.53.0
|
||||
github.com/containers/common v0.54.0
|
||||
github.com/containers/image/v5 v5.26.0
|
||||
github.com/containers/ocicrypt v1.1.7
|
||||
github.com/containers/storage v1.47.0
|
||||
@ -23,14 +23,14 @@ require (
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/containerd/containerd v1.7.2 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.6.0 // indirect
|
||||
@ -60,7 +60,6 @@ require (
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-containerregistry v0.15.2 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@ -122,7 +121,7 @@ require (
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
golang.org/x/text v0.10.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
golang.org/x/tools v0.9.3 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
google.golang.org/grpc v1.55.0 // indirect
|
||||
|
23
go.sum
23
go.sum
@ -3,15 +3,14 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
@ -28,10 +27,12 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
|
||||
github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
|
||||
github.com/containers/common v0.53.0 h1:Ax814cLeX5VXSnkKUdxz762g+27fJj1st4UvKoXmkKs=
|
||||
github.com/containers/common v0.53.0/go.mod h1:pABPxJwlTE8oYk9/2BW0e0mumkuhJHIPsABHTGRXN3w=
|
||||
github.com/containers/common v0.54.0 h1:jJ2QVuliTa/40QxyDe1ZS1U/7BsDea7qdBeZE0VPu3E=
|
||||
github.com/containers/common v0.54.0/go.mod h1:xbA3bUfth8p2xmqSg01oxHNDRJA71SAVUCqhyEISKic=
|
||||
github.com/containers/image/v5 v5.26.0 h1:P9H4+N/7fTTClnFthIWgJU+0LBkhGlW2tCWR+UNG0Vs=
|
||||
github.com/containers/image/v5 v5.26.0/go.mod h1:QSW67adLL/B4eYsFPG6TjH5Ye4LiLazPAGWk5oQnUdQ=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
@ -176,8 +177,7 @@ github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdY
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c h1:lvddKcYTQ545ADhBujtIJmqQrZBDsGo7XIMbAQe/sNY=
|
||||
github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@ -261,8 +261,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
|
||||
github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
@ -468,7 +468,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -498,8 +497,8 @@ golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
||||
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
18
vendor/github.com/Microsoft/hcsshim/Makefile
generated
vendored
18
vendor/github.com/Microsoft/hcsshim/Makefile
generated
vendored
@ -94,23 +94,9 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho
|
||||
tar -zcf $@ -C rootfs .
|
||||
rm -rf rootfs
|
||||
|
||||
-include deps/cmd/gcs.gomake
|
||||
-include deps/cmd/gcstools.gomake
|
||||
-include deps/cmd/hooks/wait-paths.gomake
|
||||
-include deps/cmd/tar2ext4.gomake
|
||||
-include deps/internal/tools/snp-report.gomake
|
||||
|
||||
# Implicit rule for includes that define Go targets.
|
||||
%.gomake: $(SRCROOT)/Makefile
|
||||
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
|
||||
@mkdir -p $(dir $@)
|
||||
@/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new
|
||||
@/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new
|
||||
@/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new
|
||||
@/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new
|
||||
@/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new
|
||||
@/bin/echo -e '\tmv $$@.new $$@' >> $@.new
|
||||
@/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
|
||||
mv $@.new $@
|
||||
GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
|
||||
|
||||
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
|
||||
@mkdir -p bin
|
||||
|
85
vendor/github.com/Microsoft/hcsshim/internal/log/format.go
generated
vendored
Normal file
85
vendor/github.com/Microsoft/hcsshim/internal/log/format.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
)
|
||||
|
||||
const TimeFormat = log.RFC3339NanoFixed
|
||||
|
||||
func FormatTime(t time.Time) string {
|
||||
return t.Format(TimeFormat)
|
||||
}
|
||||
|
||||
// DurationFormat formats a [time.Duration] log entry.
|
||||
//
|
||||
// A nil value signals an error with the formatting.
|
||||
type DurationFormat func(time.Duration) interface{}
|
||||
|
||||
func DurationFormatString(d time.Duration) interface{} { return d.String() }
|
||||
func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() }
|
||||
func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() }
|
||||
|
||||
// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`.
|
||||
//
|
||||
// See FormatEnabled for more information.
|
||||
func FormatIO(ctx context.Context, v interface{}) string {
|
||||
m := make(map[string]string)
|
||||
m["type"] = reflect.TypeOf(v).String()
|
||||
|
||||
switch t := v.(type) {
|
||||
case net.Conn:
|
||||
m["localAddress"] = formatAddr(t.LocalAddr())
|
||||
m["remoteAddress"] = formatAddr(t.RemoteAddr())
|
||||
case interface{ Addr() net.Addr }:
|
||||
m["address"] = formatAddr(t.Addr())
|
||||
default:
|
||||
return Format(ctx, t)
|
||||
}
|
||||
|
||||
return Format(ctx, m)
|
||||
}
|
||||
|
||||
func formatAddr(a net.Addr) string {
|
||||
return a.Network() + "://" + a.String()
|
||||
}
|
||||
|
||||
// Format formats an object into a JSON string, without any indendtation or
|
||||
// HTML escapes.
|
||||
// Context is used to output a log waring if the conversion fails.
|
||||
//
|
||||
// This is intended primarily for `trace.StringAttribute()`
|
||||
func Format(ctx context.Context, v interface{}) string {
|
||||
b, err := encode(v)
|
||||
if err != nil {
|
||||
G(ctx).WithError(err).Warning("could not format value")
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func encode(v interface{}) ([]byte, error) {
|
||||
return encodeBuffer(&bytes.Buffer{}, v)
|
||||
}
|
||||
|
||||
func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) {
|
||||
enc := json.NewEncoder(buf)
|
||||
enc.SetEscapeHTML(false)
|
||||
enc.SetIndent("", "")
|
||||
|
||||
if err := enc.Encode(v); err != nil {
|
||||
err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// encoder.Encode appends a newline to the end
|
||||
return bytes.TrimSpace(buf.Bytes()), nil
|
||||
}
|
145
vendor/github.com/Microsoft/hcsshim/internal/log/hook.go
generated
vendored
145
vendor/github.com/Microsoft/hcsshim/internal/log/hook.go
generated
vendored
@ -1,23 +1,58 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Hook serves to intercept and format `logrus.Entry`s before they are passed
|
||||
// to the ETW hook.
|
||||
const nullString = "null"
|
||||
|
||||
// Hook intercepts and formats a [logrus.Entry] before it logged.
|
||||
//
|
||||
// The containerd shim discards the (formatted) logrus output, and outputs only via ETW.
|
||||
// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and
|
||||
// then re-output via the ETW hook.
|
||||
type Hook struct{}
|
||||
// The shim either outputs the logs through an ETW hook, discarding the (formatted) output
|
||||
// or logs output to a pipe for logging binaries to consume.
|
||||
// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output
|
||||
// by the shim.
|
||||
type Hook struct {
|
||||
// EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON.
|
||||
// Variables of [bytes.Buffer] will be converted to []byte.
|
||||
//
|
||||
// Default is false.
|
||||
EncodeAsJSON bool
|
||||
|
||||
// FormatTime specifies the format for [time.Time] variables.
|
||||
// An empty string disables formatting.
|
||||
// When disabled, the fall back will the JSON encoding, if enabled.
|
||||
//
|
||||
// Default is [github.com/containerd/containerd/log.RFC3339NanoFixed].
|
||||
TimeFormat string
|
||||
|
||||
// Duration format converts a [time.Duration] fields to an appropriate encoding.
|
||||
// nil disables formatting.
|
||||
// When disabled, the fall back will the JSON encoding, if enabled.
|
||||
//
|
||||
// Default is [DurationFormatString], which appends a duration unit after the value.
|
||||
DurationFormat DurationFormat
|
||||
|
||||
// AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to
|
||||
// the entry from the span context stored in [logrus.Entry.Context], if it exists.
|
||||
AddSpanContext bool
|
||||
}
|
||||
|
||||
var _ logrus.Hook = &Hook{}
|
||||
|
||||
func NewHook() *Hook {
|
||||
return &Hook{}
|
||||
return &Hook{
|
||||
TimeFormat: log.RFC3339NanoFixed,
|
||||
DurationFormat: DurationFormatString,
|
||||
AddSpanContext: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hook) Levels() []logrus.Level {
|
||||
@ -25,14 +60,108 @@ func (h *Hook) Levels() []logrus.Level {
|
||||
}
|
||||
|
||||
func (h *Hook) Fire(e *logrus.Entry) (err error) {
|
||||
// JSON encode, if necessary, then add span information
|
||||
h.encode(e)
|
||||
h.addSpanContext(e)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encode loops through all the fields in the [logrus.Entry] and encodes them according to
|
||||
// the settings in [Hook].
|
||||
// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for
|
||||
// fields of type [time.Time].
|
||||
//
|
||||
// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or
|
||||
// errors will be encoded via a [json.Marshal] (with HTML escaping disabled).
|
||||
// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded.
|
||||
//
|
||||
// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false,
|
||||
// then this is a no-op.
|
||||
func (h *Hook) encode(e *logrus.Entry) {
|
||||
d := e.Data
|
||||
|
||||
formatTime := h.TimeFormat != ""
|
||||
formatDuration := h.DurationFormat != nil
|
||||
if !(h.EncodeAsJSON || formatTime || formatDuration) {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range d {
|
||||
// encode types with dedicated formatting options first
|
||||
|
||||
if vv, ok := v.(time.Time); formatTime && ok {
|
||||
d[k] = vv.Format(h.TimeFormat)
|
||||
continue
|
||||
}
|
||||
|
||||
if vv, ok := v.(time.Duration); formatDuration && ok {
|
||||
d[k] = h.DurationFormat(vv)
|
||||
continue
|
||||
}
|
||||
|
||||
// general case JSON encoding
|
||||
|
||||
if !h.EncodeAsJSON {
|
||||
continue
|
||||
}
|
||||
|
||||
switch vv := v.(type) {
|
||||
// built in types
|
||||
// "json" marshals errors as "{}", so leave alone here
|
||||
case bool, string, error, uintptr,
|
||||
int8, int16, int32, int64, int,
|
||||
uint8, uint32, uint64, uint,
|
||||
float32, float64:
|
||||
continue
|
||||
|
||||
// Rather than setting d[k] = vv.String(), JSON encode []byte value, since it
|
||||
// may be a binary payload and not representable as a string.
|
||||
// `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`,
|
||||
// so cannot use `vv.Bytes`.
|
||||
// Could move to below the `reflect.Indirect()` call below, but
|
||||
// that would require additional typematching and dereferencing.
|
||||
// Easier to keep these duplicate branches here.
|
||||
case bytes.Buffer:
|
||||
v = vv.Bytes()
|
||||
case *bytes.Buffer:
|
||||
v = vv.Bytes()
|
||||
}
|
||||
|
||||
// dereference pointer or interface variables
|
||||
rv := reflect.Indirect(reflect.ValueOf(v))
|
||||
// check if `v` is a null pointer
|
||||
if !rv.IsValid() {
|
||||
d[k] = nullString
|
||||
continue
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice:
|
||||
default:
|
||||
// Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal
|
||||
// Chan, Func: not supported by json
|
||||
// Interface, Pointer: dereferenced above
|
||||
// UnsafePointer: not supported by json, not safe to de-reference; leave alone
|
||||
continue
|
||||
}
|
||||
|
||||
b, err := encode(v)
|
||||
if err != nil {
|
||||
// Errors are written to stderr (ie, to `panic.log`) and stops the remaining
|
||||
// hooks (ie, exporting to ETW) from firing. So add encoding errors to
|
||||
// the entry data to be written out, but keep on processing.
|
||||
d[k+"-"+logrus.ErrorKey] = err.Error()
|
||||
// keep the original `v` as the value,
|
||||
continue
|
||||
}
|
||||
d[k] = string(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hook) addSpanContext(e *logrus.Entry) {
|
||||
ctx := e.Context
|
||||
if ctx == nil {
|
||||
if !h.AddSpanContext || ctx == nil {
|
||||
return
|
||||
}
|
||||
span := trace.FromContext(ctx)
|
||||
|
22
vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go
generated
vendored
22
vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
|
||||
@ -56,11 +55,11 @@ func ScrubProcessParameters(s string) (string, error) {
|
||||
}
|
||||
pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
|
||||
|
||||
buf := bytes.NewBuffer(b[:0])
|
||||
if err := encode(buf, pp); err != nil {
|
||||
b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(buf.String()), nil
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
// ScrubBridgeCreate scrubs requests sent over the bridge of type
|
||||
@ -150,21 +149,12 @@ func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
if err := encode(buf, m); err != nil {
|
||||
b, err := encode(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bytes.TrimSpace(buf.Bytes()), nil
|
||||
}
|
||||
|
||||
func encode(buf *bytes.Buffer, v interface{}) error {
|
||||
enc := json.NewEncoder(buf)
|
||||
enc.SetEscapeHTML(false)
|
||||
if err := enc.Encode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func isRequestBase(m genMap) bool {
|
||||
|
69
vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
generated
vendored
Normal file
69
vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
package oc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there
|
||||
// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error)
|
||||
|
||||
func toStatusCode(err error) codes.Code {
|
||||
// checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status,
|
||||
// wraps an error defined in "github.com/containerd/containerd/errdefs", or is a
|
||||
// context timeout or cancelled error
|
||||
if s, ok := status.FromError(errdefs.ToGRPC(err)); ok {
|
||||
return s.Code()
|
||||
}
|
||||
|
||||
switch {
|
||||
// case isAny(err):
|
||||
// return codes.Cancelled
|
||||
case isAny(err, os.ErrInvalid):
|
||||
return codes.InvalidArgument
|
||||
case isAny(err, os.ErrDeadlineExceeded):
|
||||
return codes.DeadlineExceeded
|
||||
case isAny(err, os.ErrNotExist):
|
||||
return codes.NotFound
|
||||
case isAny(err, os.ErrExist):
|
||||
return codes.AlreadyExists
|
||||
case isAny(err, os.ErrPermission):
|
||||
return codes.PermissionDenied
|
||||
// case isAny(err):
|
||||
// return codes.ResourceExhausted
|
||||
case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer):
|
||||
return codes.FailedPrecondition
|
||||
// case isAny(err):
|
||||
// return codes.Aborted
|
||||
// case isAny(err):
|
||||
// return codes.OutOfRange
|
||||
// case isAny(err):
|
||||
// return codes.Unimplemented
|
||||
case isAny(err, io.ErrNoProgress):
|
||||
return codes.Internal
|
||||
// case isAny(err):
|
||||
// return codes.Unavailable
|
||||
case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF):
|
||||
return codes.DataLoss
|
||||
// case isAny(err):
|
||||
// return codes.Unauthenticated
|
||||
default:
|
||||
return codes.Unknown
|
||||
}
|
||||
}
|
||||
|
||||
// isAny returns true if errors.Is is true for any of the provided errors, errs.
|
||||
func isAny(err error, errs ...error) bool {
|
||||
for _, e := range errs {
|
||||
if errors.Is(err, e) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
77
vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
generated
vendored
77
vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go
generated
vendored
@ -3,19 +3,26 @@ package oc
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
)
|
||||
|
||||
var _ = (trace.Exporter)(&LogrusExporter{})
|
||||
const spanMessage = "Span"
|
||||
|
||||
var _errorCodeKey = logrus.ErrorKey + "Code"
|
||||
|
||||
// LogrusExporter is an OpenCensus `trace.Exporter` that exports
|
||||
// `trace.SpanData` to logrus output.
|
||||
type LogrusExporter struct {
|
||||
}
|
||||
type LogrusExporter struct{}
|
||||
|
||||
var _ trace.Exporter = &LogrusExporter{}
|
||||
|
||||
// ExportSpan exports `s` based on the the following rules:
|
||||
//
|
||||
// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`,
|
||||
// `s.ParentSpanID` for correlation
|
||||
// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`,
|
||||
// `s.SpanID`, and `s.ParentSpanID` for correlation
|
||||
//
|
||||
// 2. Any calls to .Annotate will not be supported.
|
||||
//
|
||||
@ -23,21 +30,57 @@ type LogrusExporter struct {
|
||||
// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel`
|
||||
// providing `s.Status.Message` as the error value.
|
||||
func (le *LogrusExporter) ExportSpan(s *trace.SpanData) {
|
||||
// Combine all span annotations with traceID, spanID, parentSpanID
|
||||
baseEntry := logrus.WithFields(logrus.Fields(s.Attributes))
|
||||
baseEntry.Data["traceID"] = s.TraceID.String()
|
||||
baseEntry.Data["spanID"] = s.SpanID.String()
|
||||
baseEntry.Data["parentSpanID"] = s.ParentSpanID.String()
|
||||
baseEntry.Data["startTime"] = s.StartTime
|
||||
baseEntry.Data["endTime"] = s.EndTime
|
||||
baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String()
|
||||
baseEntry.Data["name"] = s.Name
|
||||
baseEntry.Time = s.StartTime
|
||||
if s.DroppedAnnotationCount > 0 {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"name": s.Name,
|
||||
logfields.TraceID: s.TraceID.String(),
|
||||
logfields.SpanID: s.SpanID.String(),
|
||||
"dropped": s.DroppedAttributeCount,
|
||||
"maxAttributes": len(s.Attributes),
|
||||
}).Warning("span had dropped attributes")
|
||||
}
|
||||
|
||||
entry := log.L.Dup()
|
||||
// Combine all span annotations with span data (eg, trace ID, span ID, parent span ID,
|
||||
// error, status code)
|
||||
// (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can
|
||||
// can skip overhead in entry.WithFields() and add them directly to entry.Data.
|
||||
// Preallocate ahead of time, since we should add, at most, 10 additional entries
|
||||
data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10)
|
||||
|
||||
// Default log entry may have prexisting/application-wide data
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range s.Attributes {
|
||||
data[k] = v
|
||||
}
|
||||
|
||||
data[logfields.Name] = s.Name
|
||||
data[logfields.TraceID] = s.TraceID.String()
|
||||
data[logfields.SpanID] = s.SpanID.String()
|
||||
data[logfields.ParentSpanID] = s.ParentSpanID.String()
|
||||
data[logfields.StartTime] = s.StartTime
|
||||
data[logfields.EndTime] = s.EndTime
|
||||
data[logfields.Duration] = s.EndTime.Sub(s.StartTime)
|
||||
if sk := spanKindToString(s.SpanKind); sk != "" {
|
||||
data["spanKind"] = sk
|
||||
}
|
||||
|
||||
level := logrus.InfoLevel
|
||||
if s.Status.Code != 0 {
|
||||
level = logrus.ErrorLevel
|
||||
baseEntry.Data[logrus.ErrorKey] = s.Status.Message
|
||||
|
||||
// don't overwrite an existing "error" or "errorCode" attributes
|
||||
if _, ok := data[logrus.ErrorKey]; !ok {
|
||||
data[logrus.ErrorKey] = s.Status.Message
|
||||
}
|
||||
if _, ok := data[_errorCodeKey]; !ok {
|
||||
data[_errorCodeKey] = codes.Code(s.Status.Code).String()
|
||||
}
|
||||
}
|
||||
baseEntry.Log(level, "Span")
|
||||
|
||||
entry.Data = data
|
||||
entry.Time = s.StartTime
|
||||
entry.Log(level, spanMessage)
|
||||
}
|
||||
|
14
vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
generated
vendored
14
vendor/github.com/Microsoft/hcsshim/internal/oc/span.go
generated
vendored
@ -14,8 +14,7 @@ var DefaultSampler = trace.AlwaysSample()
|
||||
func SetSpanStatus(span *trace.Span, err error) {
|
||||
status := trace.Status{}
|
||||
if err != nil {
|
||||
// TODO: JTERRY75 - Handle errors in a non-generic way
|
||||
status.Code = trace.StatusCodeUnknown
|
||||
status.Code = int32(toStatusCode(err))
|
||||
status.Message = err.Error()
|
||||
}
|
||||
span.SetStatus(status)
|
||||
@ -46,3 +45,14 @@ func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) {
|
||||
|
||||
var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer)
|
||||
var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient)
|
||||
|
||||
func spanKindToString(sk int) string {
|
||||
switch sk {
|
||||
case trace.SpanKindClient:
|
||||
return "client"
|
||||
case trace.SpanKindServer:
|
||||
return "server"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
14
vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go
generated
vendored
14
vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go
generated
vendored
@ -23,20 +23,14 @@ type (
|
||||
)
|
||||
|
||||
type explicitAccess struct {
|
||||
//nolint:structcheck
|
||||
accessPermissions accessMask
|
||||
//nolint:structcheck
|
||||
accessMode accessMode
|
||||
//nolint:structcheck
|
||||
inheritance inheritMode
|
||||
//nolint:structcheck
|
||||
trustee trustee
|
||||
accessMode accessMode
|
||||
inheritance inheritMode
|
||||
trustee trustee
|
||||
}
|
||||
|
||||
type trustee struct {
|
||||
//nolint:unused,structcheck
|
||||
multipleTrustee *trustee
|
||||
//nolint:unused,structcheck
|
||||
multipleTrustee *trustee
|
||||
multipleTrusteeOperation int32
|
||||
trusteeForm trusteeForm
|
||||
trusteeType trusteeType
|
||||
|
191
vendor/github.com/containerd/containerd/LICENSE
generated
vendored
Normal file
191
vendor/github.com/containerd/containerd/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
Normal file
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
Docker
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
92
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
92
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with fmt.Errorf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
//
|
||||
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
// errors returned by most packages will map into one of these errors classes.
|
||||
// Packages should return errors of these types when they want to instruct a
|
||||
// client to take a particular action.
|
||||
//
|
||||
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||
// map very well to those defined by grpc.
|
||||
var (
|
||||
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||
ErrInvalidArgument = errors.New("invalid argument")
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrFailedPrecondition = errors.New("failed precondition")
|
||||
ErrUnavailable = errors.New("unavailable")
|
||||
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||
)
|
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Is(err, ErrInvalidArgument)
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound)
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Is(err, ErrAlreadyExists)
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||
// lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Is(err, ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Is(err, ErrUnavailable)
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Is(err, ErrNotImplemented)
|
||||
}
|
||||
|
||||
// IsCanceled returns true if the error is due to `context.Canceled`.
|
||||
func IsCanceled(err error) bool {
|
||||
return errors.Is(err, context.Canceled)
|
||||
}
|
||||
|
||||
// IsDeadlineExceeded returns true if the error is due to
|
||||
// `context.DeadlineExceeded`.
|
||||
func IsDeadlineExceeded(err error) bool {
|
||||
return errors.Is(err, context.DeadlineExceeded)
|
||||
}
|
147
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
147
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||
// using the original error message as a description.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type.
|
||||
//
|
||||
// If the error is unmapped, the original error will be returned to be handled
|
||||
// by the regular grpc error handling stack.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isGRPCError(err) {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case IsInvalidArgument(err):
|
||||
return status.Errorf(codes.InvalidArgument, err.Error())
|
||||
case IsNotFound(err):
|
||||
return status.Errorf(codes.NotFound, err.Error())
|
||||
case IsAlreadyExists(err):
|
||||
return status.Errorf(codes.AlreadyExists, err.Error())
|
||||
case IsFailedPrecondition(err):
|
||||
return status.Errorf(codes.FailedPrecondition, err.Error())
|
||||
case IsUnavailable(err):
|
||||
return status.Errorf(codes.Unavailable, err.Error())
|
||||
case IsNotImplemented(err):
|
||||
return status.Errorf(codes.Unimplemented, err.Error())
|
||||
case IsCanceled(err):
|
||||
return status.Errorf(codes.Canceled, err.Error())
|
||||
case IsDeadlineExceeded(err):
|
||||
return status.Errorf(codes.DeadlineExceeded, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
func FromGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code(err) {
|
||||
case codes.InvalidArgument:
|
||||
cls = ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
cls = ErrFailedPrecondition
|
||||
case codes.Unimplemented:
|
||||
cls = ErrNotImplemented
|
||||
case codes.Canceled:
|
||||
cls = context.Canceled
|
||||
case codes.DeadlineExceeded:
|
||||
cls = context.DeadlineExceeded
|
||||
default:
|
||||
cls = ErrUnknown
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else {
|
||||
err = cls
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, err error) string {
|
||||
desc := errDesc(err)
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
|
||||
func isGRPCError(err error) bool {
|
||||
_, ok := status.FromError(err)
|
||||
return ok
|
||||
}
|
||||
|
||||
func code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
func errDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
72
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
Normal file
72
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// G is an alias for GetLogger.
|
||||
//
|
||||
// We may want to define this locally to a package to get package tagged log
|
||||
// messages.
|
||||
G = GetLogger
|
||||
|
||||
// L is an alias for the standard logger.
|
||||
L = logrus.NewEntry(logrus.StandardLogger())
|
||||
)
|
||||
|
||||
type (
|
||||
loggerKey struct{}
|
||||
|
||||
// Fields type to pass to `WithFields`, alias from `logrus`.
|
||||
Fields = logrus.Fields
|
||||
)
|
||||
|
||||
const (
|
||||
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
|
||||
// ensure the formatted time is always the same number of characters.
|
||||
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
|
||||
// TextFormat represents the text logging format
|
||||
TextFormat = "text"
|
||||
|
||||
// JSONFormat represents the JSON logging format
|
||||
JSONFormat = "json"
|
||||
)
|
||||
|
||||
// WithLogger returns a new context with the provided logger. Use in
|
||||
// combination with logger.WithField(s) for great effect.
|
||||
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
|
||||
e := logger.WithContext(ctx)
|
||||
return context.WithValue(ctx, loggerKey{}, e)
|
||||
}
|
||||
|
||||
// GetLogger retrieves the current logger from the context. If no logger is
|
||||
// available, the default logger is returned.
|
||||
func GetLogger(ctx context.Context) *logrus.Entry {
|
||||
logger := ctx.Value(loggerKey{})
|
||||
|
||||
if logger == nil {
|
||||
return L.WithContext(ctx)
|
||||
}
|
||||
|
||||
return logger.(*logrus.Entry)
|
||||
}
|
20
vendor/github.com/containers/common/pkg/completion/completion.go
generated
vendored
20
vendor/github.com/containers/common/pkg/completion/completion.go
generated
vendored
@ -23,19 +23,19 @@ func CompleteCommandFlags(cmd *cobra.Command, flags FlagCompletions) {
|
||||
/* Autocomplete Functions for cobra ValidArgsFunction */
|
||||
|
||||
// AutocompleteNone - Block the default shell completion (no paths)
|
||||
func AutocompleteNone(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompleteNone(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
// AutocompleteDefault - Use the default shell completion,
|
||||
// allows path completion.
|
||||
func AutocompleteDefault(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompleteDefault(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// AutocompleteCapabilities - Autocomplete linux capabilities options.
|
||||
// Used by --cap-add and --cap-drop.
|
||||
func AutocompleteCapabilities(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompleteCapabilities(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
caps := capabilities.AllCapabilities()
|
||||
|
||||
// convertCase will convert a string to lowercase only if the user input is lowercase
|
||||
@ -83,17 +83,17 @@ func autocompleteSubIDName(filename string) ([]string, cobra.ShellCompDirective)
|
||||
}
|
||||
|
||||
// AutocompleteSubgidName - Autocomplete subgidname based on the names in the /etc/subgid file.
|
||||
func AutocompleteSubgidName(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompleteSubgidName(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocompleteSubIDName("/etc/subgid")
|
||||
}
|
||||
|
||||
// AutocompleteSubuidName - Autocomplete subuidname based on the names in the /etc/subuid file.
|
||||
func AutocompleteSubuidName(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompleteSubuidName(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocompleteSubIDName("/etc/subuid")
|
||||
}
|
||||
|
||||
// AutocompleteArch - Autocomplete platform supported by container engines
|
||||
func AutocompletePlatform(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompletePlatform(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
|
||||
completions := []string{
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
@ -115,7 +115,7 @@ func AutocompletePlatform(cmd *cobra.Command, args []string, toComplete string)
|
||||
}
|
||||
|
||||
// AutocompleteArch - Autocomplete architectures supported by container engines
|
||||
func AutocompleteArch(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompleteArch(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
|
||||
completions := []string{
|
||||
"386",
|
||||
"amd64",
|
||||
@ -135,19 +135,19 @@ func AutocompleteArch(cmd *cobra.Command, args []string, toComplete string) ([]s
|
||||
}
|
||||
|
||||
// AutocompleteOS - Autocomplete OS supported by container engines
|
||||
func AutocompleteOS(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompleteOS(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
|
||||
completions := []string{"linux", "windows"}
|
||||
return completions, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
// AutocompleteJSONFormat - Autocomplete format flag option.
|
||||
// -> "json"
|
||||
func AutocompleteJSONFormat(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompleteJSONFormat(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
|
||||
return []string{"json"}, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
// AutocompleteOneArg - Autocomplete one random arg
|
||||
func AutocompleteOneArg(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
func AutocompleteOneArg(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) {
|
||||
if len(args) == 1 {
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
29
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
29
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
@ -17,8 +17,9 @@ import (
|
||||
|
||||
// Options defines the option to retry.
|
||||
type Options struct {
|
||||
MaxRetry int // The number of times to possibly retry.
|
||||
Delay time.Duration // The delay to use between retries, if set.
|
||||
MaxRetry int // The number of times to possibly retry.
|
||||
Delay time.Duration // The delay to use between retries, if set.
|
||||
IsErrorRetryable func(error) bool
|
||||
}
|
||||
|
||||
// RetryOptions is deprecated, use Options.
|
||||
@ -31,6 +32,12 @@ func RetryIfNecessary(ctx context.Context, operation func() error, options *Opti
|
||||
|
||||
// IfNecessary retries the operation in exponential backoff with the retry Options.
|
||||
func IfNecessary(ctx context.Context, operation func() error, options *Options) error {
|
||||
var isRetryable func(error) bool
|
||||
if options.IsErrorRetryable != nil {
|
||||
isRetryable = options.IsErrorRetryable
|
||||
} else {
|
||||
isRetryable = IsErrorRetryable
|
||||
}
|
||||
err := operation()
|
||||
for attempt := 0; err != nil && isRetryable(err) && attempt < options.MaxRetry; attempt++ {
|
||||
delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
|
||||
@ -49,7 +56,11 @@ func IfNecessary(ctx context.Context, operation func() error, options *Options)
|
||||
return err
|
||||
}
|
||||
|
||||
func isRetryable(err error) bool {
|
||||
// IsErrorRetryable makes a HEURISTIC determination whether it is worth retrying upon encountering an error.
|
||||
// That heuristic is NOT STABLE and it CAN CHANGE AT ANY TIME.
|
||||
// Callers that have a hard requirement for specific treatment of a class of errors should make their own check
|
||||
// instead of relying on this function maintaining its past behavior.
|
||||
func IsErrorRetryable(err error) bool {
|
||||
switch err {
|
||||
case nil:
|
||||
return false
|
||||
@ -72,18 +83,18 @@ func isRetryable(err error) bool {
|
||||
}
|
||||
return true
|
||||
case *net.OpError:
|
||||
return isRetryable(e.Err)
|
||||
return IsErrorRetryable(e.Err)
|
||||
case *url.Error: // This includes errors returned by the net/http client.
|
||||
if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF
|
||||
return true
|
||||
}
|
||||
return isRetryable(e.Err)
|
||||
return IsErrorRetryable(e.Err)
|
||||
case syscall.Errno:
|
||||
return isErrnoRetryable(e)
|
||||
case errcode.Errors:
|
||||
// if this error is a group of errors, process them all in turn
|
||||
for i := range e {
|
||||
if !isRetryable(e[i]) {
|
||||
if !IsErrorRetryable(e[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -91,7 +102,7 @@ func isRetryable(err error) bool {
|
||||
case *multierror.Error:
|
||||
// if this error is a group of errors, process them all in turn
|
||||
for i := range e.Errors {
|
||||
if !isRetryable(e.Errors[i]) {
|
||||
if !IsErrorRetryable(e.Errors[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -102,11 +113,11 @@ func isRetryable(err error) bool {
|
||||
}
|
||||
if unwrappable, ok := e.(unwrapper); ok {
|
||||
err = unwrappable.Unwrap()
|
||||
return isRetryable(err)
|
||||
return IsErrorRetryable(err)
|
||||
}
|
||||
case unwrapper: // Test this last, because various error types might implement .Unwrap()
|
||||
err = e.Unwrap()
|
||||
return isRetryable(err)
|
||||
return IsErrorRetryable(err)
|
||||
}
|
||||
|
||||
return false
|
||||
|
11
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
11
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
||||
// (from "version"). Select appropriate importer.
|
||||
if len(data) > 0 {
|
||||
switch data[0] {
|
||||
case 'i':
|
||||
case 'v', 'c', 'd': // binary, till go1.10
|
||||
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
|
||||
|
||||
case 'i': // indexed, till go1.19
|
||||
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
|
||||
return pkg, err
|
||||
|
||||
case 'v', 'c', 'd':
|
||||
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
|
||||
return pkg, err
|
||||
|
||||
case 'u':
|
||||
case 'u': // unified, from go1.20
|
||||
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
|
||||
return pkg, err
|
||||
|
||||
|
23
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
23
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
@ -625,7 +625,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
|
||||
}
|
||||
|
||||
if pkg.PkgPath == "unsafe" {
|
||||
pkg.GoFiles = nil // ignore fake unsafe.go file
|
||||
pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929)
|
||||
} else if len(pkg.CompiledGoFiles) == 0 {
|
||||
// Work around for pre-go.1.11 versions of go list.
|
||||
// TODO(matloob): they should be handled by the fallback.
|
||||
// Can we delete this?
|
||||
pkg.CompiledGoFiles = pkg.GoFiles
|
||||
}
|
||||
|
||||
// Assume go list emits only absolute paths for Dir.
|
||||
@ -663,13 +668,6 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
|
||||
response.Roots = append(response.Roots, pkg.ID)
|
||||
}
|
||||
|
||||
// Work around for pre-go.1.11 versions of go list.
|
||||
// TODO(matloob): they should be handled by the fallback.
|
||||
// Can we delete this?
|
||||
if len(pkg.CompiledGoFiles) == 0 {
|
||||
pkg.CompiledGoFiles = pkg.GoFiles
|
||||
}
|
||||
|
||||
// Temporary work-around for golang/go#39986. Parse filenames out of
|
||||
// error messages. This happens if there are unrecoverable syntax
|
||||
// errors in the source, so we can't match on a specific error message.
|
||||
@ -891,6 +889,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string {
|
||||
// probably because you'd just get the TestMain.
|
||||
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)),
|
||||
}
|
||||
|
||||
// golang/go#60456: with go1.21 and later, go list serves pgo variants, which
|
||||
// can be costly to compute and may result in redundant processing for the
|
||||
// caller. Disable these variants. If someone wants to add e.g. a NeedPGO
|
||||
// mode flag, that should be a separate proposal.
|
||||
if goVersion >= 21 {
|
||||
fullargs = append(fullargs, "-pgo=off")
|
||||
}
|
||||
|
||||
fullargs = append(fullargs, cfg.BuildFlags...)
|
||||
fullargs = append(fullargs, "--")
|
||||
fullargs = append(fullargs, words...)
|
||||
|
3
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
3
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@ -308,6 +308,9 @@ type Package struct {
|
||||
TypeErrors []types.Error
|
||||
|
||||
// GoFiles lists the absolute file paths of the package's Go source files.
|
||||
// It may include files that should not be compiled, for example because
|
||||
// they contain non-matching build tags, are documentary pseudo-files such as
|
||||
// unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing.
|
||||
GoFiles []string
|
||||
|
||||
// CompiledGoFiles lists the absolute file paths of the package's source
|
||||
|
762
vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
generated
vendored
762
vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
generated
vendored
@ -1,762 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package objectpath defines a naming scheme for types.Objects
|
||||
// (that is, named entities in Go programs) relative to their enclosing
|
||||
// package.
|
||||
//
|
||||
// Type-checker objects are canonical, so they are usually identified by
|
||||
// their address in memory (a pointer), but a pointer has meaning only
|
||||
// within one address space. By contrast, objectpath names allow the
|
||||
// identity of an object to be sent from one program to another,
|
||||
// establishing a correspondence between types.Object variables that are
|
||||
// distinct but logically equivalent.
|
||||
//
|
||||
// A single object may have multiple paths. In this example,
|
||||
//
|
||||
// type A struct{ X int }
|
||||
// type B A
|
||||
//
|
||||
// the field X has two paths due to its membership of both A and B.
|
||||
// The For(obj) function always returns one of these paths, arbitrarily
|
||||
// but consistently.
|
||||
package objectpath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/typeparams"
|
||||
|
||||
_ "unsafe" // for go:linkname
|
||||
)
|
||||
|
||||
// A Path is an opaque name that identifies a types.Object
|
||||
// relative to its package. Conceptually, the name consists of a
|
||||
// sequence of destructuring operations applied to the package scope
|
||||
// to obtain the original object.
|
||||
// The name does not include the package itself.
|
||||
type Path string
|
||||
|
||||
// Encoding
|
||||
//
|
||||
// An object path is a textual and (with training) human-readable encoding
|
||||
// of a sequence of destructuring operators, starting from a types.Package.
|
||||
// The sequences represent a path through the package/object/type graph.
|
||||
// We classify these operators by their type:
|
||||
//
|
||||
// PO package->object Package.Scope.Lookup
|
||||
// OT object->type Object.Type
|
||||
// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
|
||||
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
|
||||
//
|
||||
// All valid paths start with a package and end at an object
|
||||
// and thus may be defined by the regular language:
|
||||
//
|
||||
// objectpath = PO (OT TT* TO)*
|
||||
//
|
||||
// The concrete encoding follows directly:
|
||||
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
|
||||
// - The only OT operator is Object.Type,
|
||||
// which we encode as '.' because dot cannot appear in an identifier.
|
||||
// - The TT operators are encoded as [EKPRUTC];
|
||||
// one of these (TypeParam) requires an integer operand,
|
||||
// which is encoded as a string of decimal digits.
|
||||
// - The TO operators are encoded as [AFMO];
|
||||
// three of these (At,Field,Method) require an integer operand,
|
||||
// which is encoded as a string of decimal digits.
|
||||
// These indices are stable across different representations
|
||||
// of the same package, even source and export data.
|
||||
// The indices used are implementation specific and may not correspond to
|
||||
// the argument to the go/types function.
|
||||
//
|
||||
// In the example below,
|
||||
//
|
||||
// package p
|
||||
//
|
||||
// type T interface {
|
||||
// f() (a string, b struct{ X int })
|
||||
// }
|
||||
//
|
||||
// field X has the path "T.UM0.RA1.F0",
|
||||
// representing the following sequence of operations:
|
||||
//
|
||||
// p.Lookup("T") T
|
||||
// .Type().Underlying().Method(0). f
|
||||
// .Type().Results().At(1) b
|
||||
// .Type().Field(0) X
|
||||
//
|
||||
// The encoding is not maximally compact---every R or P is
|
||||
// followed by an A, for example---but this simplifies the
|
||||
// encoder and decoder.
|
||||
const (
|
||||
// object->type operators
|
||||
opType = '.' // .Type() (Object)
|
||||
|
||||
// type->type operators
|
||||
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
|
||||
opKey = 'K' // .Key() (Map)
|
||||
opParams = 'P' // .Params() (Signature)
|
||||
opResults = 'R' // .Results() (Signature)
|
||||
opUnderlying = 'U' // .Underlying() (Named)
|
||||
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
|
||||
opConstraint = 'C' // .Constraint() (TypeParam)
|
||||
|
||||
// type->object operators
|
||||
opAt = 'A' // .At(i) (Tuple)
|
||||
opField = 'F' // .Field(i) (Struct)
|
||||
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
|
||||
opObj = 'O' // .Obj() (Named, TypeParam)
|
||||
)
|
||||
|
||||
// For returns the path to an object relative to its package,
|
||||
// or an error if the object is not accessible from the package's Scope.
|
||||
//
|
||||
// The For function guarantees to return a path only for the following objects:
|
||||
// - package-level types
|
||||
// - exported package-level non-types
|
||||
// - methods
|
||||
// - parameter and result variables
|
||||
// - struct fields
|
||||
// These objects are sufficient to define the API of their package.
|
||||
// The objects described by a package's export data are drawn from this set.
|
||||
//
|
||||
// For does not return a path for predeclared names, imported package
|
||||
// names, local names, and unexported package-level names (except
|
||||
// types).
|
||||
//
|
||||
// Example: given this definition,
|
||||
//
|
||||
// package p
|
||||
//
|
||||
// type T interface {
|
||||
// f() (a string, b struct{ X int })
|
||||
// }
|
||||
//
|
||||
// For(X) would return a path that denotes the following sequence of operations:
|
||||
//
|
||||
// p.Scope().Lookup("T") (TypeName T)
|
||||
// .Type().Underlying().Method(0). (method Func f)
|
||||
// .Type().Results().At(1) (field Var b)
|
||||
// .Type().Field(0) (field Var X)
|
||||
//
|
||||
// where p is the package (*types.Package) to which X belongs.
|
||||
func For(obj types.Object) (Path, error) {
|
||||
return newEncoderFor()(obj)
|
||||
}
|
||||
|
||||
// An encoder amortizes the cost of encoding the paths of multiple objects.
|
||||
// Nonexported pending approval of proposal 58668.
|
||||
type encoder struct {
|
||||
scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names()
|
||||
namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods()
|
||||
}
|
||||
|
||||
// Exposed to gopls via golang.org/x/tools/internal/typesinternal
|
||||
// pending approval of proposal 58668.
|
||||
//
|
||||
//go:linkname newEncoderFor
|
||||
func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For }
|
||||
|
||||
func (enc *encoder) For(obj types.Object) (Path, error) {
|
||||
pkg := obj.Pkg()
|
||||
|
||||
// This table lists the cases of interest.
|
||||
//
|
||||
// Object Action
|
||||
// ------ ------
|
||||
// nil reject
|
||||
// builtin reject
|
||||
// pkgname reject
|
||||
// label reject
|
||||
// var
|
||||
// package-level accept
|
||||
// func param/result accept
|
||||
// local reject
|
||||
// struct field accept
|
||||
// const
|
||||
// package-level accept
|
||||
// local reject
|
||||
// func
|
||||
// package-level accept
|
||||
// init functions reject
|
||||
// concrete method accept
|
||||
// interface method accept
|
||||
// type
|
||||
// package-level accept
|
||||
// local reject
|
||||
//
|
||||
// The only accessible package-level objects are members of pkg itself.
|
||||
//
|
||||
// The cases are handled in four steps:
|
||||
//
|
||||
// 1. reject nil and builtin
|
||||
// 2. accept package-level objects
|
||||
// 3. reject obviously invalid objects
|
||||
// 4. search the API for the path to the param/result/field/method.
|
||||
|
||||
// 1. reference to nil or builtin?
|
||||
if pkg == nil {
|
||||
return "", fmt.Errorf("predeclared %s has no path", obj)
|
||||
}
|
||||
scope := pkg.Scope()
|
||||
|
||||
// 2. package-level object?
|
||||
if scope.Lookup(obj.Name()) == obj {
|
||||
// Only exported objects (and non-exported types) have a path.
|
||||
// Non-exported types may be referenced by other objects.
|
||||
if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
|
||||
return "", fmt.Errorf("no path for non-exported %v", obj)
|
||||
}
|
||||
return Path(obj.Name()), nil
|
||||
}
|
||||
|
||||
// 3. Not a package-level object.
|
||||
// Reject obviously non-viable cases.
|
||||
switch obj := obj.(type) {
|
||||
case *types.TypeName:
|
||||
if _, ok := obj.Type().(*typeparams.TypeParam); !ok {
|
||||
// With the exception of type parameters, only package-level type names
|
||||
// have a path.
|
||||
return "", fmt.Errorf("no path for %v", obj)
|
||||
}
|
||||
case *types.Const, // Only package-level constants have a path.
|
||||
*types.Label, // Labels are function-local.
|
||||
*types.PkgName: // PkgNames are file-local.
|
||||
return "", fmt.Errorf("no path for %v", obj)
|
||||
|
||||
case *types.Var:
|
||||
// Could be:
|
||||
// - a field (obj.IsField())
|
||||
// - a func parameter or result
|
||||
// - a local var.
|
||||
// Sadly there is no way to distinguish
|
||||
// a param/result from a local
|
||||
// so we must proceed to the find.
|
||||
|
||||
case *types.Func:
|
||||
// A func, if not package-level, must be a method.
|
||||
if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
|
||||
return "", fmt.Errorf("func is not a method: %v", obj)
|
||||
}
|
||||
|
||||
if path, ok := enc.concreteMethod(obj); ok {
|
||||
// Fast path for concrete methods that avoids looping over scope.
|
||||
return path, nil
|
||||
}
|
||||
|
||||
default:
|
||||
panic(obj)
|
||||
}
|
||||
|
||||
// 4. Search the API for the path to the var (field/param/result) or method.
|
||||
|
||||
// First inspect package-level named types.
|
||||
// In the presence of path aliases, these give
|
||||
// the best paths because non-types may
|
||||
// refer to types, but not the reverse.
|
||||
empty := make([]byte, 0, 48) // initial space
|
||||
names := enc.scopeNames(scope)
|
||||
for _, name := range names {
|
||||
o := scope.Lookup(name)
|
||||
tname, ok := o.(*types.TypeName)
|
||||
if !ok {
|
||||
continue // handle non-types in second pass
|
||||
}
|
||||
|
||||
path := append(empty, name...)
|
||||
path = append(path, opType)
|
||||
|
||||
T := o.Type()
|
||||
|
||||
if tname.IsAlias() {
|
||||
// type alias
|
||||
if r := find(obj, T, path, nil); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
} else {
|
||||
if named, _ := T.(*types.Named); named != nil {
|
||||
if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil {
|
||||
// generic named type
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
// defined (named) type
|
||||
if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Then inspect everything else:
|
||||
// non-types, and declared methods of defined types.
|
||||
for _, name := range names {
|
||||
o := scope.Lookup(name)
|
||||
path := append(empty, name...)
|
||||
if _, ok := o.(*types.TypeName); !ok {
|
||||
if o.Exported() {
|
||||
// exported non-type (const, var, func)
|
||||
if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Inspect declared methods of defined types.
|
||||
if T, ok := o.Type().(*types.Named); ok {
|
||||
path = append(path, opType)
|
||||
// Note that method index here is always with respect
|
||||
// to canonical ordering of methods, regardless of how
|
||||
// they appear in the underlying type.
|
||||
for i, m := range enc.namedMethods(T) {
|
||||
path2 := appendOpArg(path, opMethod, i)
|
||||
if m == obj {
|
||||
return Path(path2), nil // found declared method
|
||||
}
|
||||
if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
|
||||
}
|
||||
|
||||
func appendOpArg(path []byte, op byte, arg int) []byte {
|
||||
path = append(path, op)
|
||||
path = strconv.AppendInt(path, int64(arg), 10)
|
||||
return path
|
||||
}
|
||||
|
||||
// concreteMethod returns the path for meth, which must have a non-nil receiver.
|
||||
// The second return value indicates success and may be false if the method is
|
||||
// an interface method or if it is an instantiated method.
|
||||
//
|
||||
// This function is just an optimization that avoids the general scope walking
|
||||
// approach. You are expected to fall back to the general approach if this
|
||||
// function fails.
|
||||
func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) {
|
||||
// Concrete methods can only be declared on package-scoped named types. For
|
||||
// that reason we can skip the expensive walk over the package scope: the
|
||||
// path will always be package -> named type -> method. We can trivially get
|
||||
// the type name from the receiver, and only have to look over the type's
|
||||
// methods to find the method index.
|
||||
//
|
||||
// Methods on generic types require special consideration, however. Consider
|
||||
// the following package:
|
||||
//
|
||||
// L1: type S[T any] struct{}
|
||||
// L2: func (recv S[A]) Foo() { recv.Bar() }
|
||||
// L3: func (recv S[B]) Bar() { }
|
||||
// L4: type Alias = S[int]
|
||||
// L5: func _[T any]() { var s S[int]; s.Foo() }
|
||||
//
|
||||
// The receivers of methods on generic types are instantiations. L2 and L3
|
||||
// instantiate S with the type-parameters A and B, which are scoped to the
|
||||
// respective methods. L4 and L5 each instantiate S with int. Each of these
|
||||
// instantiations has its own method set, full of methods (and thus objects)
|
||||
// with receivers whose types are the respective instantiations. In other
|
||||
// words, we have
|
||||
//
|
||||
// S[A].Foo, S[A].Bar
|
||||
// S[B].Foo, S[B].Bar
|
||||
// S[int].Foo, S[int].Bar
|
||||
//
|
||||
// We may thus be trying to produce object paths for any of these objects.
|
||||
//
|
||||
// S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
|
||||
// and S.Bar, which are the paths that this function naturally produces.
|
||||
//
|
||||
// S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
|
||||
// don't correspond to the origin methods. For S[int], this is significant.
|
||||
// The most precise object path for S[int].Foo, for example, is Alias.Foo,
|
||||
// not S.Foo. Our function, however, would produce S.Foo, which would
|
||||
// resolve to a different object.
|
||||
//
|
||||
// For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
|
||||
// still the correct paths, since only the origin methods have meaningful
|
||||
// paths. But this is likely only true for trivial cases and has edge cases.
|
||||
// Since this function is only an optimization, we err on the side of giving
|
||||
// up, deferring to the slower but definitely correct algorithm. Most users
|
||||
// of objectpath will only be giving us origin methods, anyway, as referring
|
||||
// to instantiated methods is usually not useful.
|
||||
|
||||
if typeparams.OriginMethod(meth) != meth {
|
||||
return "", false
|
||||
}
|
||||
|
||||
recvT := meth.Type().(*types.Signature).Recv().Type()
|
||||
if ptr, ok := recvT.(*types.Pointer); ok {
|
||||
recvT = ptr.Elem()
|
||||
}
|
||||
|
||||
named, ok := recvT.(*types.Named)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if types.IsInterface(named) {
|
||||
// Named interfaces don't have to be package-scoped
|
||||
//
|
||||
// TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
|
||||
// methods, too, I think.
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Preallocate space for the name, opType, opMethod, and some digits.
|
||||
name := named.Obj().Name()
|
||||
path := make([]byte, 0, len(name)+8)
|
||||
path = append(path, name...)
|
||||
path = append(path, opType)
|
||||
for i, m := range enc.namedMethods(named) {
|
||||
if m == meth {
|
||||
path = appendOpArg(path, opMethod, i)
|
||||
return Path(path), true
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named))
|
||||
}
|
||||
|
||||
// find finds obj within type T, returning the path to it, or nil if not found.
|
||||
//
|
||||
// The seen map is used to short circuit cycles through type parameters. If
|
||||
// nil, it will be allocated as necessary.
|
||||
func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
|
||||
switch T := T.(type) {
|
||||
case *types.Basic, *types.Named:
|
||||
// Named types belonging to pkg were handled already,
|
||||
// so T must belong to another package. No path.
|
||||
return nil
|
||||
case *types.Pointer:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
case *types.Slice:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
case *types.Array:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
case *types.Chan:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
case *types.Map:
|
||||
if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
|
||||
return r
|
||||
}
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
case *types.Signature:
|
||||
if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil {
|
||||
return r
|
||||
}
|
||||
if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
|
||||
return r
|
||||
}
|
||||
return find(obj, T.Results(), append(path, opResults), seen)
|
||||
case *types.Struct:
|
||||
for i := 0; i < T.NumFields(); i++ {
|
||||
fld := T.Field(i)
|
||||
path2 := appendOpArg(path, opField, i)
|
||||
if fld == obj {
|
||||
return path2 // found field var
|
||||
}
|
||||
if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case *types.Tuple:
|
||||
for i := 0; i < T.Len(); i++ {
|
||||
v := T.At(i)
|
||||
path2 := appendOpArg(path, opAt, i)
|
||||
if v == obj {
|
||||
return path2 // found param/result var
|
||||
}
|
||||
if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case *types.Interface:
|
||||
for i := 0; i < T.NumMethods(); i++ {
|
||||
m := T.Method(i)
|
||||
path2 := appendOpArg(path, opMethod, i)
|
||||
if m == obj {
|
||||
return path2 // found interface method
|
||||
}
|
||||
if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case *typeparams.TypeParam:
|
||||
name := T.Obj()
|
||||
if name == obj {
|
||||
return append(path, opObj)
|
||||
}
|
||||
if seen[name] {
|
||||
return nil
|
||||
}
|
||||
if seen == nil {
|
||||
seen = make(map[*types.TypeName]bool)
|
||||
}
|
||||
seen[name] = true
|
||||
if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
|
||||
return r
|
||||
}
|
||||
return nil
|
||||
}
|
||||
panic(T)
|
||||
}
|
||||
|
||||
func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
|
||||
for i := 0; i < list.Len(); i++ {
|
||||
tparam := list.At(i)
|
||||
path2 := appendOpArg(path, opTypeParam, i)
|
||||
if r := find(obj, tparam, path2, seen); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object returns the object denoted by path p within the package pkg.
|
||||
func Object(pkg *types.Package, p Path) (types.Object, error) {
|
||||
if p == "" {
|
||||
return nil, fmt.Errorf("empty path")
|
||||
}
|
||||
|
||||
pathstr := string(p)
|
||||
var pkgobj, suffix string
|
||||
if dot := strings.IndexByte(pathstr, opType); dot < 0 {
|
||||
pkgobj = pathstr
|
||||
} else {
|
||||
pkgobj = pathstr[:dot]
|
||||
suffix = pathstr[dot:] // suffix starts with "."
|
||||
}
|
||||
|
||||
obj := pkg.Scope().Lookup(pkgobj)
|
||||
if obj == nil {
|
||||
return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
|
||||
}
|
||||
|
||||
// abstraction of *types.{Pointer,Slice,Array,Chan,Map}
|
||||
type hasElem interface {
|
||||
Elem() types.Type
|
||||
}
|
||||
// abstraction of *types.{Named,Signature}
|
||||
type hasTypeParams interface {
|
||||
TypeParams() *typeparams.TypeParamList
|
||||
}
|
||||
// abstraction of *types.{Named,TypeParam}
|
||||
type hasObj interface {
|
||||
Obj() *types.TypeName
|
||||
}
|
||||
|
||||
// The loop state is the pair (t, obj),
|
||||
// exactly one of which is non-nil, initially obj.
|
||||
// All suffixes start with '.' (the only object->type operation),
|
||||
// followed by optional type->type operations,
|
||||
// then a type->object operation.
|
||||
// The cycle then repeats.
|
||||
var t types.Type
|
||||
for suffix != "" {
|
||||
code := suffix[0]
|
||||
suffix = suffix[1:]
|
||||
|
||||
// Codes [AFM] have an integer operand.
|
||||
var index int
|
||||
switch code {
|
||||
case opAt, opField, opMethod, opTypeParam:
|
||||
rest := strings.TrimLeft(suffix, "0123456789")
|
||||
numerals := suffix[:len(suffix)-len(rest)]
|
||||
suffix = rest
|
||||
i, err := strconv.Atoi(numerals)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
|
||||
}
|
||||
index = int(i)
|
||||
case opObj:
|
||||
// no operand
|
||||
default:
|
||||
// The suffix must end with a type->object operation.
|
||||
if suffix == "" {
|
||||
return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
|
||||
}
|
||||
}
|
||||
|
||||
if code == opType {
|
||||
if t != nil {
|
||||
return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
|
||||
}
|
||||
t = obj.Type()
|
||||
obj = nil
|
||||
continue
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("invalid path: code %q in object context", code)
|
||||
}
|
||||
|
||||
// Inv: t != nil, obj == nil
|
||||
|
||||
switch code {
|
||||
case opElem:
|
||||
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
|
||||
}
|
||||
t = hasElem.Elem()
|
||||
|
||||
case opKey:
|
||||
mapType, ok := t.(*types.Map)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
|
||||
}
|
||||
t = mapType.Key()
|
||||
|
||||
case opParams:
|
||||
sig, ok := t.(*types.Signature)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
|
||||
}
|
||||
t = sig.Params()
|
||||
|
||||
case opResults:
|
||||
sig, ok := t.(*types.Signature)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
|
||||
}
|
||||
t = sig.Results()
|
||||
|
||||
case opUnderlying:
|
||||
named, ok := t.(*types.Named)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
|
||||
}
|
||||
t = named.Underlying()
|
||||
|
||||
case opTypeParam:
|
||||
hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
|
||||
}
|
||||
tparams := hasTypeParams.TypeParams()
|
||||
if n := tparams.Len(); index >= n {
|
||||
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
|
||||
}
|
||||
t = tparams.At(index)
|
||||
|
||||
case opConstraint:
|
||||
tparam, ok := t.(*typeparams.TypeParam)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
|
||||
}
|
||||
t = tparam.Constraint()
|
||||
|
||||
case opAt:
|
||||
tuple, ok := t.(*types.Tuple)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
|
||||
}
|
||||
if n := tuple.Len(); index >= n {
|
||||
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
|
||||
}
|
||||
obj = tuple.At(index)
|
||||
t = nil
|
||||
|
||||
case opField:
|
||||
structType, ok := t.(*types.Struct)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
|
||||
}
|
||||
if n := structType.NumFields(); index >= n {
|
||||
return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
|
||||
}
|
||||
obj = structType.Field(index)
|
||||
t = nil
|
||||
|
||||
case opMethod:
|
||||
switch t := t.(type) {
|
||||
case *types.Interface:
|
||||
if index >= t.NumMethods() {
|
||||
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
|
||||
}
|
||||
obj = t.Method(index) // Id-ordered
|
||||
|
||||
case *types.Named:
|
||||
methods := namedMethods(t) // (unmemoized)
|
||||
if index >= len(methods) {
|
||||
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods))
|
||||
}
|
||||
obj = methods[index] // Id-ordered
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
|
||||
}
|
||||
t = nil
|
||||
|
||||
case opObj:
|
||||
hasObj, ok := t.(hasObj)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
|
||||
}
|
||||
obj = hasObj.Obj()
|
||||
t = nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid path: unknown code %q", code)
|
||||
}
|
||||
}
|
||||
|
||||
if obj.Pkg() != pkg {
|
||||
return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
|
||||
}
|
||||
|
||||
return obj, nil // success
|
||||
}
|
||||
|
||||
// namedMethods returns the methods of a Named type in ascending Id order.
|
||||
func namedMethods(named *types.Named) []*types.Func {
|
||||
methods := make([]*types.Func, named.NumMethods())
|
||||
for i := range methods {
|
||||
methods[i] = named.Method(i)
|
||||
}
|
||||
sort.Slice(methods, func(i, j int) bool {
|
||||
return methods[i].Id() < methods[j].Id()
|
||||
})
|
||||
return methods
|
||||
}
|
||||
|
||||
// scopeNames is a memoization of scope.Names. Callers must not modify the result.
|
||||
func (enc *encoder) scopeNames(scope *types.Scope) []string {
|
||||
m := enc.scopeNamesMemo
|
||||
if m == nil {
|
||||
m = make(map[*types.Scope][]string)
|
||||
enc.scopeNamesMemo = m
|
||||
}
|
||||
names, ok := m[scope]
|
||||
if !ok {
|
||||
names = scope.Names() // allocates and sorts
|
||||
m[scope] = names
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// namedMethods is a memoization of the namedMethods function. Callers must not modify the result.
|
||||
func (enc *encoder) namedMethods(named *types.Named) []*types.Func {
|
||||
m := enc.namedMethodsMemo
|
||||
if m == nil {
|
||||
m = make(map[*types.Named][]*types.Func)
|
||||
enc.namedMethodsMemo = m
|
||||
}
|
||||
methods, ok := m[named]
|
||||
if !ok {
|
||||
methods = namedMethods(named) // allocates and sorts
|
||||
m[named] = methods
|
||||
}
|
||||
return methods
|
||||
|
||||
}
|
59
vendor/golang.org/x/tools/internal/event/tag/tag.go
generated
vendored
Normal file
59
vendor/golang.org/x/tools/internal/event/tag/tag.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tag provides the labels used for telemetry throughout gopls.
|
||||
package tag
|
||||
|
||||
import (
|
||||
"golang.org/x/tools/internal/event/keys"
|
||||
)
|
||||
|
||||
var (
|
||||
// create the label keys we use
|
||||
Method = keys.NewString("method", "")
|
||||
StatusCode = keys.NewString("status.code", "")
|
||||
StatusMessage = keys.NewString("status.message", "")
|
||||
RPCID = keys.NewString("id", "")
|
||||
RPCDirection = keys.NewString("direction", "")
|
||||
File = keys.NewString("file", "")
|
||||
Directory = keys.New("directory", "")
|
||||
URI = keys.New("URI", "")
|
||||
Package = keys.NewString("package", "") // Package ID
|
||||
PackagePath = keys.NewString("package_path", "")
|
||||
Query = keys.New("query", "")
|
||||
Snapshot = keys.NewUInt64("snapshot", "")
|
||||
Operation = keys.NewString("operation", "")
|
||||
|
||||
Position = keys.New("position", "")
|
||||
Category = keys.NewString("category", "")
|
||||
PackageCount = keys.NewInt("packages", "")
|
||||
Files = keys.New("files", "")
|
||||
Port = keys.NewInt("port", "")
|
||||
Type = keys.New("type", "")
|
||||
HoverKind = keys.NewString("hoverkind", "")
|
||||
|
||||
NewServer = keys.NewString("new_server", "A new server was added")
|
||||
EndServer = keys.NewString("end_server", "A server was shut down")
|
||||
|
||||
ServerID = keys.NewString("server", "The server ID an event is related to")
|
||||
Logfile = keys.NewString("logfile", "")
|
||||
DebugAddress = keys.NewString("debug_address", "")
|
||||
GoplsPath = keys.NewString("gopls_path", "")
|
||||
ClientID = keys.NewString("client_id", "")
|
||||
|
||||
Level = keys.NewInt("level", "The logging level")
|
||||
)
|
||||
|
||||
var (
|
||||
// create the stats we measure
|
||||
Started = keys.NewInt64("started", "Count of started RPCs.")
|
||||
ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes)
|
||||
SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes)
|
||||
Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds)
|
||||
)
|
||||
|
||||
const (
|
||||
Inbound = "in"
|
||||
Outbound = "out"
|
||||
)
|
852
vendor/golang.org/x/tools/internal/gcimporter/bexport.go
generated
vendored
852
vendor/golang.org/x/tools/internal/gcimporter/bexport.go
generated
vendored
@ -1,852 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Binary package export.
|
||||
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
|
||||
// see that file for specification of the format.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// If debugFormat is set, each integer and string value is preceded by a marker
|
||||
// and position information in the encoding. This mechanism permits an importer
|
||||
// to recognize immediately when it is out of sync. The importer recognizes this
|
||||
// mode automatically (i.e., it can import export data produced with debugging
|
||||
// support even if debugFormat is not set at the time of import). This mode will
|
||||
// lead to massively larger export data (by a factor of 2 to 3) and should only
|
||||
// be enabled during development and debugging.
|
||||
//
|
||||
// NOTE: This flag is the first flag to enable if importing dies because of
|
||||
// (suspected) format errors, and whenever a change is made to the format.
|
||||
const debugFormat = false // default: false
|
||||
|
||||
// Current export format version. Increase with each format change.
|
||||
//
|
||||
// Note: The latest binary (non-indexed) export format is at version 6.
|
||||
// This exporter is still at level 4, but it doesn't matter since
|
||||
// the binary importer can handle older versions just fine.
|
||||
//
|
||||
// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
|
||||
// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE
|
||||
// 4: type name objects support type aliases, uses aliasTag
|
||||
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
|
||||
// 2: removed unused bool in ODCL export (compiler only)
|
||||
// 1: header format change (more regular), export package for _ struct fields
|
||||
// 0: Go1.7 encoding
|
||||
const exportVersion = 4
|
||||
|
||||
// trackAllTypes enables cycle tracking for all types, not just named
|
||||
// types. The existing compiler invariants assume that unnamed types
|
||||
// that are not completely set up are not used, or else there are spurious
|
||||
// errors.
|
||||
// If disabled, only named types are tracked, possibly leading to slightly
|
||||
// less efficient encoding in rare cases. It also prevents the export of
|
||||
// some corner-case type declarations (but those are not handled correctly
|
||||
// with with the textual export format either).
|
||||
// TODO(gri) enable and remove once issues caused by it are fixed
|
||||
const trackAllTypes = false
|
||||
|
||||
type exporter struct {
|
||||
fset *token.FileSet
|
||||
out bytes.Buffer
|
||||
|
||||
// object -> index maps, indexed in order of serialization
|
||||
strIndex map[string]int
|
||||
pkgIndex map[*types.Package]int
|
||||
typIndex map[types.Type]int
|
||||
|
||||
// position encoding
|
||||
posInfoFormat bool
|
||||
prevFile string
|
||||
prevLine int
|
||||
|
||||
// debugging support
|
||||
written int // bytes written
|
||||
indent int // for trace
|
||||
}
|
||||
|
||||
// internalError represents an error generated inside this package.
|
||||
type internalError string
|
||||
|
||||
func (e internalError) Error() string { return "gcimporter: " + string(e) }
|
||||
|
||||
func internalErrorf(format string, args ...interface{}) error {
|
||||
return internalError(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// BExportData returns binary export data for pkg.
|
||||
// If no file set is provided, position info will be missing.
|
||||
func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
||||
if !debug {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if ierr, ok := e.(internalError); ok {
|
||||
err = ierr
|
||||
return
|
||||
}
|
||||
// Not an internal error; panic again.
|
||||
panic(e)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
p := exporter{
|
||||
fset: fset,
|
||||
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
|
||||
pkgIndex: make(map[*types.Package]int),
|
||||
typIndex: make(map[types.Type]int),
|
||||
posInfoFormat: true, // TODO(gri) might become a flag, eventually
|
||||
}
|
||||
|
||||
// write version info
|
||||
// The version string must start with "version %d" where %d is the version
|
||||
// number. Additional debugging information may follow after a blank; that
|
||||
// text is ignored by the importer.
|
||||
p.rawStringln(fmt.Sprintf("version %d", exportVersion))
|
||||
var debug string
|
||||
if debugFormat {
|
||||
debug = "debug"
|
||||
}
|
||||
p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
|
||||
p.bool(trackAllTypes)
|
||||
p.bool(p.posInfoFormat)
|
||||
|
||||
// --- generic export data ---
|
||||
|
||||
// populate type map with predeclared "known" types
|
||||
for index, typ := range predeclared() {
|
||||
p.typIndex[typ] = index
|
||||
}
|
||||
if len(p.typIndex) != len(predeclared()) {
|
||||
return nil, internalError("duplicate entries in type map?")
|
||||
}
|
||||
|
||||
// write package data
|
||||
p.pkg(pkg, true)
|
||||
if trace {
|
||||
p.tracef("\n")
|
||||
}
|
||||
|
||||
// write objects
|
||||
objcount := 0
|
||||
scope := pkg.Scope()
|
||||
for _, name := range scope.Names() {
|
||||
if !token.IsExported(name) {
|
||||
continue
|
||||
}
|
||||
if trace {
|
||||
p.tracef("\n")
|
||||
}
|
||||
p.obj(scope.Lookup(name))
|
||||
objcount++
|
||||
}
|
||||
|
||||
// indicate end of list
|
||||
if trace {
|
||||
p.tracef("\n")
|
||||
}
|
||||
p.tag(endTag)
|
||||
|
||||
// for self-verification only (redundant)
|
||||
p.int(objcount)
|
||||
|
||||
if trace {
|
||||
p.tracef("\n")
|
||||
}
|
||||
|
||||
// --- end of export data ---
|
||||
|
||||
return p.out.Bytes(), nil
|
||||
}
|
||||
|
||||
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
|
||||
if pkg == nil {
|
||||
panic(internalError("unexpected nil pkg"))
|
||||
}
|
||||
|
||||
// if we saw the package before, write its index (>= 0)
|
||||
if i, ok := p.pkgIndex[pkg]; ok {
|
||||
p.index('P', i)
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise, remember the package, write the package tag (< 0) and package data
|
||||
if trace {
|
||||
p.tracef("P%d = { ", len(p.pkgIndex))
|
||||
defer p.tracef("} ")
|
||||
}
|
||||
p.pkgIndex[pkg] = len(p.pkgIndex)
|
||||
|
||||
p.tag(packageTag)
|
||||
p.string(pkg.Name())
|
||||
if emptypath {
|
||||
p.string("")
|
||||
} else {
|
||||
p.string(pkg.Path())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) obj(obj types.Object) {
|
||||
switch obj := obj.(type) {
|
||||
case *types.Const:
|
||||
p.tag(constTag)
|
||||
p.pos(obj)
|
||||
p.qualifiedName(obj)
|
||||
p.typ(obj.Type())
|
||||
p.value(obj.Val())
|
||||
|
||||
case *types.TypeName:
|
||||
if obj.IsAlias() {
|
||||
p.tag(aliasTag)
|
||||
p.pos(obj)
|
||||
p.qualifiedName(obj)
|
||||
} else {
|
||||
p.tag(typeTag)
|
||||
}
|
||||
p.typ(obj.Type())
|
||||
|
||||
case *types.Var:
|
||||
p.tag(varTag)
|
||||
p.pos(obj)
|
||||
p.qualifiedName(obj)
|
||||
p.typ(obj.Type())
|
||||
|
||||
case *types.Func:
|
||||
p.tag(funcTag)
|
||||
p.pos(obj)
|
||||
p.qualifiedName(obj)
|
||||
sig := obj.Type().(*types.Signature)
|
||||
p.paramList(sig.Params(), sig.Variadic())
|
||||
p.paramList(sig.Results(), false)
|
||||
|
||||
default:
|
||||
panic(internalErrorf("unexpected object %v (%T)", obj, obj))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) pos(obj types.Object) {
|
||||
if !p.posInfoFormat {
|
||||
return
|
||||
}
|
||||
|
||||
file, line := p.fileLine(obj)
|
||||
if file == p.prevFile {
|
||||
// common case: write line delta
|
||||
// delta == 0 means different file or no line change
|
||||
delta := line - p.prevLine
|
||||
p.int(delta)
|
||||
if delta == 0 {
|
||||
p.int(-1) // -1 means no file change
|
||||
}
|
||||
} else {
|
||||
// different file
|
||||
p.int(0)
|
||||
// Encode filename as length of common prefix with previous
|
||||
// filename, followed by (possibly empty) suffix. Filenames
|
||||
// frequently share path prefixes, so this can save a lot
|
||||
// of space and make export data size less dependent on file
|
||||
// path length. The suffix is unlikely to be empty because
|
||||
// file names tend to end in ".go".
|
||||
n := commonPrefixLen(p.prevFile, file)
|
||||
p.int(n) // n >= 0
|
||||
p.string(file[n:]) // write suffix only
|
||||
p.prevFile = file
|
||||
p.int(line)
|
||||
}
|
||||
p.prevLine = line
|
||||
}
|
||||
|
||||
func (p *exporter) fileLine(obj types.Object) (file string, line int) {
|
||||
if p.fset != nil {
|
||||
pos := p.fset.Position(obj.Pos())
|
||||
file = pos.Filename
|
||||
line = pos.Line
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func commonPrefixLen(a, b string) int {
|
||||
if len(a) > len(b) {
|
||||
a, b = b, a
|
||||
}
|
||||
// len(a) <= len(b)
|
||||
i := 0
|
||||
for i < len(a) && a[i] == b[i] {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (p *exporter) qualifiedName(obj types.Object) {
|
||||
p.string(obj.Name())
|
||||
p.pkg(obj.Pkg(), false)
|
||||
}
|
||||
|
||||
func (p *exporter) typ(t types.Type) {
|
||||
if t == nil {
|
||||
panic(internalError("nil type"))
|
||||
}
|
||||
|
||||
// Possible optimization: Anonymous pointer types *T where
|
||||
// T is a named type are common. We could canonicalize all
|
||||
// such types *T to a single type PT = *T. This would lead
|
||||
// to at most one *T entry in typIndex, and all future *T's
|
||||
// would be encoded as the respective index directly. Would
|
||||
// save 1 byte (pointerTag) per *T and reduce the typIndex
|
||||
// size (at the cost of a canonicalization map). We can do
|
||||
// this later, without encoding format change.
|
||||
|
||||
// if we saw the type before, write its index (>= 0)
|
||||
if i, ok := p.typIndex[t]; ok {
|
||||
p.index('T', i)
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise, remember the type, write the type tag (< 0) and type data
|
||||
if trackAllTypes {
|
||||
if trace {
|
||||
p.tracef("T%d = {>\n", len(p.typIndex))
|
||||
defer p.tracef("<\n} ")
|
||||
}
|
||||
p.typIndex[t] = len(p.typIndex)
|
||||
}
|
||||
|
||||
switch t := t.(type) {
|
||||
case *types.Named:
|
||||
if !trackAllTypes {
|
||||
// if we don't track all types, track named types now
|
||||
p.typIndex[t] = len(p.typIndex)
|
||||
}
|
||||
|
||||
p.tag(namedTag)
|
||||
p.pos(t.Obj())
|
||||
p.qualifiedName(t.Obj())
|
||||
p.typ(t.Underlying())
|
||||
if !types.IsInterface(t) {
|
||||
p.assocMethods(t)
|
||||
}
|
||||
|
||||
case *types.Array:
|
||||
p.tag(arrayTag)
|
||||
p.int64(t.Len())
|
||||
p.typ(t.Elem())
|
||||
|
||||
case *types.Slice:
|
||||
p.tag(sliceTag)
|
||||
p.typ(t.Elem())
|
||||
|
||||
case *dddSlice:
|
||||
p.tag(dddTag)
|
||||
p.typ(t.elem)
|
||||
|
||||
case *types.Struct:
|
||||
p.tag(structTag)
|
||||
p.fieldList(t)
|
||||
|
||||
case *types.Pointer:
|
||||
p.tag(pointerTag)
|
||||
p.typ(t.Elem())
|
||||
|
||||
case *types.Signature:
|
||||
p.tag(signatureTag)
|
||||
p.paramList(t.Params(), t.Variadic())
|
||||
p.paramList(t.Results(), false)
|
||||
|
||||
case *types.Interface:
|
||||
p.tag(interfaceTag)
|
||||
p.iface(t)
|
||||
|
||||
case *types.Map:
|
||||
p.tag(mapTag)
|
||||
p.typ(t.Key())
|
||||
p.typ(t.Elem())
|
||||
|
||||
case *types.Chan:
|
||||
p.tag(chanTag)
|
||||
p.int(int(3 - t.Dir())) // hack
|
||||
p.typ(t.Elem())
|
||||
|
||||
default:
|
||||
panic(internalErrorf("unexpected type %T: %s", t, t))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) assocMethods(named *types.Named) {
|
||||
// Sort methods (for determinism).
|
||||
var methods []*types.Func
|
||||
for i := 0; i < named.NumMethods(); i++ {
|
||||
methods = append(methods, named.Method(i))
|
||||
}
|
||||
sort.Sort(methodsByName(methods))
|
||||
|
||||
p.int(len(methods))
|
||||
|
||||
if trace && methods != nil {
|
||||
p.tracef("associated methods {>\n")
|
||||
}
|
||||
|
||||
for i, m := range methods {
|
||||
if trace && i > 0 {
|
||||
p.tracef("\n")
|
||||
}
|
||||
|
||||
p.pos(m)
|
||||
name := m.Name()
|
||||
p.string(name)
|
||||
if !exported(name) {
|
||||
p.pkg(m.Pkg(), false)
|
||||
}
|
||||
|
||||
sig := m.Type().(*types.Signature)
|
||||
p.paramList(types.NewTuple(sig.Recv()), false)
|
||||
p.paramList(sig.Params(), sig.Variadic())
|
||||
p.paramList(sig.Results(), false)
|
||||
p.int(0) // dummy value for go:nointerface pragma - ignored by importer
|
||||
}
|
||||
|
||||
if trace && methods != nil {
|
||||
p.tracef("<\n} ")
|
||||
}
|
||||
}
|
||||
|
||||
type methodsByName []*types.Func
|
||||
|
||||
func (x methodsByName) Len() int { return len(x) }
|
||||
func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
|
||||
|
||||
func (p *exporter) fieldList(t *types.Struct) {
|
||||
if trace && t.NumFields() > 0 {
|
||||
p.tracef("fields {>\n")
|
||||
defer p.tracef("<\n} ")
|
||||
}
|
||||
|
||||
p.int(t.NumFields())
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
if trace && i > 0 {
|
||||
p.tracef("\n")
|
||||
}
|
||||
p.field(t.Field(i))
|
||||
p.string(t.Tag(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) field(f *types.Var) {
|
||||
if !f.IsField() {
|
||||
panic(internalError("field expected"))
|
||||
}
|
||||
|
||||
p.pos(f)
|
||||
p.fieldName(f)
|
||||
p.typ(f.Type())
|
||||
}
|
||||
|
||||
func (p *exporter) iface(t *types.Interface) {
|
||||
// TODO(gri): enable importer to load embedded interfaces,
|
||||
// then emit Embeddeds and ExplicitMethods separately here.
|
||||
p.int(0)
|
||||
|
||||
n := t.NumMethods()
|
||||
if trace && n > 0 {
|
||||
p.tracef("methods {>\n")
|
||||
defer p.tracef("<\n} ")
|
||||
}
|
||||
p.int(n)
|
||||
for i := 0; i < n; i++ {
|
||||
if trace && i > 0 {
|
||||
p.tracef("\n")
|
||||
}
|
||||
p.method(t.Method(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) method(m *types.Func) {
|
||||
sig := m.Type().(*types.Signature)
|
||||
if sig.Recv() == nil {
|
||||
panic(internalError("method expected"))
|
||||
}
|
||||
|
||||
p.pos(m)
|
||||
p.string(m.Name())
|
||||
if m.Name() != "_" && !token.IsExported(m.Name()) {
|
||||
p.pkg(m.Pkg(), false)
|
||||
}
|
||||
|
||||
// interface method; no need to encode receiver.
|
||||
p.paramList(sig.Params(), sig.Variadic())
|
||||
p.paramList(sig.Results(), false)
|
||||
}
|
||||
|
||||
func (p *exporter) fieldName(f *types.Var) {
|
||||
name := f.Name()
|
||||
|
||||
if f.Anonymous() {
|
||||
// anonymous field - we distinguish between 3 cases:
|
||||
// 1) field name matches base type name and is exported
|
||||
// 2) field name matches base type name and is not exported
|
||||
// 3) field name doesn't match base type name (alias name)
|
||||
bname := basetypeName(f.Type())
|
||||
if name == bname {
|
||||
if token.IsExported(name) {
|
||||
name = "" // 1) we don't need to know the field name or package
|
||||
} else {
|
||||
name = "?" // 2) use unexported name "?" to force package export
|
||||
}
|
||||
} else {
|
||||
// 3) indicate alias and export name as is
|
||||
// (this requires an extra "@" but this is a rare case)
|
||||
p.string("@")
|
||||
}
|
||||
}
|
||||
|
||||
p.string(name)
|
||||
if name != "" && !token.IsExported(name) {
|
||||
p.pkg(f.Pkg(), false)
|
||||
}
|
||||
}
|
||||
|
||||
func basetypeName(typ types.Type) string {
|
||||
switch typ := deref(typ).(type) {
|
||||
case *types.Basic:
|
||||
return typ.Name()
|
||||
case *types.Named:
|
||||
return typ.Obj().Name()
|
||||
default:
|
||||
return "" // unnamed type
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) paramList(params *types.Tuple, variadic bool) {
|
||||
// use negative length to indicate unnamed parameters
|
||||
// (look at the first parameter only since either all
|
||||
// names are present or all are absent)
|
||||
n := params.Len()
|
||||
if n > 0 && params.At(0).Name() == "" {
|
||||
n = -n
|
||||
}
|
||||
p.int(n)
|
||||
for i := 0; i < params.Len(); i++ {
|
||||
q := params.At(i)
|
||||
t := q.Type()
|
||||
if variadic && i == params.Len()-1 {
|
||||
t = &dddSlice{t.(*types.Slice).Elem()}
|
||||
}
|
||||
p.typ(t)
|
||||
if n > 0 {
|
||||
name := q.Name()
|
||||
p.string(name)
|
||||
if name != "_" {
|
||||
p.pkg(q.Pkg(), false)
|
||||
}
|
||||
}
|
||||
p.string("") // no compiler-specific info
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) value(x constant.Value) {
|
||||
if trace {
|
||||
p.tracef("= ")
|
||||
}
|
||||
|
||||
switch x.Kind() {
|
||||
case constant.Bool:
|
||||
tag := falseTag
|
||||
if constant.BoolVal(x) {
|
||||
tag = trueTag
|
||||
}
|
||||
p.tag(tag)
|
||||
|
||||
case constant.Int:
|
||||
if v, exact := constant.Int64Val(x); exact {
|
||||
// common case: x fits into an int64 - use compact encoding
|
||||
p.tag(int64Tag)
|
||||
p.int64(v)
|
||||
return
|
||||
}
|
||||
// uncommon case: large x - use float encoding
|
||||
// (powers of 2 will be encoded efficiently with exponent)
|
||||
p.tag(floatTag)
|
||||
p.float(constant.ToFloat(x))
|
||||
|
||||
case constant.Float:
|
||||
p.tag(floatTag)
|
||||
p.float(x)
|
||||
|
||||
case constant.Complex:
|
||||
p.tag(complexTag)
|
||||
p.float(constant.Real(x))
|
||||
p.float(constant.Imag(x))
|
||||
|
||||
case constant.String:
|
||||
p.tag(stringTag)
|
||||
p.string(constant.StringVal(x))
|
||||
|
||||
case constant.Unknown:
|
||||
// package contains type errors
|
||||
p.tag(unknownTag)
|
||||
|
||||
default:
|
||||
panic(internalErrorf("unexpected value %v (%T)", x, x))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) float(x constant.Value) {
|
||||
if x.Kind() != constant.Float {
|
||||
panic(internalErrorf("unexpected constant %v, want float", x))
|
||||
}
|
||||
// extract sign (there is no -0)
|
||||
sign := constant.Sign(x)
|
||||
if sign == 0 {
|
||||
// x == 0
|
||||
p.int(0)
|
||||
return
|
||||
}
|
||||
// x != 0
|
||||
|
||||
var f big.Float
|
||||
if v, exact := constant.Float64Val(x); exact {
|
||||
// float64
|
||||
f.SetFloat64(v)
|
||||
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
|
||||
// TODO(gri): add big.Rat accessor to constant.Value.
|
||||
r := valueToRat(num)
|
||||
f.SetRat(r.Quo(r, valueToRat(denom)))
|
||||
} else {
|
||||
// Value too large to represent as a fraction => inaccessible.
|
||||
// TODO(gri): add big.Float accessor to constant.Value.
|
||||
f.SetFloat64(math.MaxFloat64) // FIXME
|
||||
}
|
||||
|
||||
// extract exponent such that 0.5 <= m < 1.0
|
||||
var m big.Float
|
||||
exp := f.MantExp(&m)
|
||||
|
||||
// extract mantissa as *big.Int
|
||||
// - set exponent large enough so mant satisfies mant.IsInt()
|
||||
// - get *big.Int from mant
|
||||
m.SetMantExp(&m, int(m.MinPrec()))
|
||||
mant, acc := m.Int(nil)
|
||||
if acc != big.Exact {
|
||||
panic(internalError("internal error"))
|
||||
}
|
||||
|
||||
p.int(sign)
|
||||
p.int(exp)
|
||||
p.string(string(mant.Bytes()))
|
||||
}
|
||||
|
||||
func valueToRat(x constant.Value) *big.Rat {
|
||||
// Convert little-endian to big-endian.
|
||||
// I can't believe this is necessary.
|
||||
bytes := constant.Bytes(x)
|
||||
for i := 0; i < len(bytes)/2; i++ {
|
||||
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
|
||||
}
|
||||
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
|
||||
}
|
||||
|
||||
func (p *exporter) bool(b bool) bool {
|
||||
if trace {
|
||||
p.tracef("[")
|
||||
defer p.tracef("= %v] ", b)
|
||||
}
|
||||
|
||||
x := 0
|
||||
if b {
|
||||
x = 1
|
||||
}
|
||||
p.int(x)
|
||||
return b
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Low-level encoders
|
||||
|
||||
func (p *exporter) index(marker byte, index int) {
|
||||
if index < 0 {
|
||||
panic(internalError("invalid index < 0"))
|
||||
}
|
||||
if debugFormat {
|
||||
p.marker('t')
|
||||
}
|
||||
if trace {
|
||||
p.tracef("%c%d ", marker, index)
|
||||
}
|
||||
p.rawInt64(int64(index))
|
||||
}
|
||||
|
||||
func (p *exporter) tag(tag int) {
|
||||
if tag >= 0 {
|
||||
panic(internalError("invalid tag >= 0"))
|
||||
}
|
||||
if debugFormat {
|
||||
p.marker('t')
|
||||
}
|
||||
if trace {
|
||||
p.tracef("%s ", tagString[-tag])
|
||||
}
|
||||
p.rawInt64(int64(tag))
|
||||
}
|
||||
|
||||
func (p *exporter) int(x int) {
|
||||
p.int64(int64(x))
|
||||
}
|
||||
|
||||
func (p *exporter) int64(x int64) {
|
||||
if debugFormat {
|
||||
p.marker('i')
|
||||
}
|
||||
if trace {
|
||||
p.tracef("%d ", x)
|
||||
}
|
||||
p.rawInt64(x)
|
||||
}
|
||||
|
||||
func (p *exporter) string(s string) {
|
||||
if debugFormat {
|
||||
p.marker('s')
|
||||
}
|
||||
if trace {
|
||||
p.tracef("%q ", s)
|
||||
}
|
||||
// if we saw the string before, write its index (>= 0)
|
||||
// (the empty string is mapped to 0)
|
||||
if i, ok := p.strIndex[s]; ok {
|
||||
p.rawInt64(int64(i))
|
||||
return
|
||||
}
|
||||
// otherwise, remember string and write its negative length and bytes
|
||||
p.strIndex[s] = len(p.strIndex)
|
||||
p.rawInt64(-int64(len(s)))
|
||||
for i := 0; i < len(s); i++ {
|
||||
p.rawByte(s[i])
|
||||
}
|
||||
}
|
||||
|
||||
// marker emits a marker byte and position information which makes
|
||||
// it easy for a reader to detect if it is "out of sync". Used for
|
||||
// debugFormat format only.
|
||||
func (p *exporter) marker(m byte) {
|
||||
p.rawByte(m)
|
||||
// Enable this for help tracking down the location
|
||||
// of an incorrect marker when running in debugFormat.
|
||||
if false && trace {
|
||||
p.tracef("#%d ", p.written)
|
||||
}
|
||||
p.rawInt64(int64(p.written))
|
||||
}
|
||||
|
||||
// rawInt64 should only be used by low-level encoders.
|
||||
func (p *exporter) rawInt64(x int64) {
|
||||
var tmp [binary.MaxVarintLen64]byte
|
||||
n := binary.PutVarint(tmp[:], x)
|
||||
for i := 0; i < n; i++ {
|
||||
p.rawByte(tmp[i])
|
||||
}
|
||||
}
|
||||
|
||||
// rawStringln should only be used to emit the initial version string.
|
||||
func (p *exporter) rawStringln(s string) {
|
||||
for i := 0; i < len(s); i++ {
|
||||
p.rawByte(s[i])
|
||||
}
|
||||
p.rawByte('\n')
|
||||
}
|
||||
|
||||
// rawByte is the bottleneck interface to write to p.out.
|
||||
// rawByte escapes b as follows (any encoding does that
|
||||
// hides '$'):
|
||||
//
|
||||
// '$' => '|' 'S'
|
||||
// '|' => '|' '|'
|
||||
//
|
||||
// Necessary so other tools can find the end of the
|
||||
// export data by searching for "$$".
|
||||
// rawByte should only be used by low-level encoders.
|
||||
func (p *exporter) rawByte(b byte) {
|
||||
switch b {
|
||||
case '$':
|
||||
// write '$' as '|' 'S'
|
||||
b = 'S'
|
||||
fallthrough
|
||||
case '|':
|
||||
// write '|' as '|' '|'
|
||||
p.out.WriteByte('|')
|
||||
p.written++
|
||||
}
|
||||
p.out.WriteByte(b)
|
||||
p.written++
|
||||
}
|
||||
|
||||
// tracef is like fmt.Printf but it rewrites the format string
|
||||
// to take care of indentation.
|
||||
func (p *exporter) tracef(format string, args ...interface{}) {
|
||||
if strings.ContainsAny(format, "<>\n") {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(format); i++ {
|
||||
// no need to deal with runes
|
||||
ch := format[i]
|
||||
switch ch {
|
||||
case '>':
|
||||
p.indent++
|
||||
continue
|
||||
case '<':
|
||||
p.indent--
|
||||
continue
|
||||
}
|
||||
buf.WriteByte(ch)
|
||||
if ch == '\n' {
|
||||
for j := p.indent; j > 0; j-- {
|
||||
buf.WriteString(". ")
|
||||
}
|
||||
}
|
||||
}
|
||||
format = buf.String()
|
||||
}
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Debugging support.
|
||||
// (tagString is only used when tracing is enabled)
|
||||
var tagString = [...]string{
|
||||
// Packages
|
||||
-packageTag: "package",
|
||||
|
||||
// Types
|
||||
-namedTag: "named type",
|
||||
-arrayTag: "array",
|
||||
-sliceTag: "slice",
|
||||
-dddTag: "ddd",
|
||||
-structTag: "struct",
|
||||
-pointerTag: "pointer",
|
||||
-signatureTag: "signature",
|
||||
-interfaceTag: "interface",
|
||||
-mapTag: "map",
|
||||
-chanTag: "chan",
|
||||
|
||||
// Values
|
||||
-falseTag: "false",
|
||||
-trueTag: "true",
|
||||
-int64Tag: "int64",
|
||||
-floatTag: "float",
|
||||
-fractionTag: "fraction",
|
||||
-complexTag: "complex",
|
||||
-stringTag: "string",
|
||||
-unknownTag: "unknown",
|
||||
|
||||
// Type aliases
|
||||
-aliasTag: "alias",
|
||||
}
|
907
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
907
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
@ -2,340 +2,24 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go.
|
||||
// This file contains the remaining vestiges of
|
||||
// $GOROOT/src/go/internal/gcimporter/bimport.go.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type importer struct {
|
||||
imports map[string]*types.Package
|
||||
data []byte
|
||||
importpath string
|
||||
buf []byte // for reading strings
|
||||
version int // export format version
|
||||
|
||||
// object lists
|
||||
strList []string // in order of appearance
|
||||
pathList []string // in order of appearance
|
||||
pkgList []*types.Package // in order of appearance
|
||||
typList []types.Type // in order of appearance
|
||||
interfaceList []*types.Interface // for delayed completion only
|
||||
trackAllTypes bool
|
||||
|
||||
// position encoding
|
||||
posInfoFormat bool
|
||||
prevFile string
|
||||
prevLine int
|
||||
fake fakeFileSet
|
||||
|
||||
// debugging support
|
||||
debugFormat bool
|
||||
read int // bytes read
|
||||
}
|
||||
|
||||
// BImportData imports a package from the serialized package data
|
||||
// and returns the number of bytes consumed and a reference to the package.
|
||||
// If the export data version is not recognized or the format is otherwise
|
||||
// compromised, an error is returned.
|
||||
func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||
// catch panics and return them as errors
|
||||
const currentVersion = 6
|
||||
version := -1 // unknown version
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
// Return a (possibly nil or incomplete) package unchanged (see #16088).
|
||||
if version > currentVersion {
|
||||
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
|
||||
} else {
|
||||
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
p := importer{
|
||||
imports: imports,
|
||||
data: data,
|
||||
importpath: path,
|
||||
version: version,
|
||||
strList: []string{""}, // empty string is mapped to 0
|
||||
pathList: []string{""}, // empty string is mapped to 0
|
||||
fake: fakeFileSet{
|
||||
fset: fset,
|
||||
files: make(map[string]*fileInfo),
|
||||
},
|
||||
}
|
||||
defer p.fake.setLines() // set lines for files in fset
|
||||
|
||||
// read version info
|
||||
var versionstr string
|
||||
if b := p.rawByte(); b == 'c' || b == 'd' {
|
||||
// Go1.7 encoding; first byte encodes low-level
|
||||
// encoding format (compact vs debug).
|
||||
// For backward-compatibility only (avoid problems with
|
||||
// old installed packages). Newly compiled packages use
|
||||
// the extensible format string.
|
||||
// TODO(gri) Remove this support eventually; after Go1.8.
|
||||
if b == 'd' {
|
||||
p.debugFormat = true
|
||||
}
|
||||
p.trackAllTypes = p.rawByte() == 'a'
|
||||
p.posInfoFormat = p.int() != 0
|
||||
versionstr = p.string()
|
||||
if versionstr == "v1" {
|
||||
version = 0
|
||||
}
|
||||
} else {
|
||||
// Go1.8 extensible encoding
|
||||
// read version string and extract version number (ignore anything after the version number)
|
||||
versionstr = p.rawStringln(b)
|
||||
if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
|
||||
if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
|
||||
version = v
|
||||
}
|
||||
}
|
||||
}
|
||||
p.version = version
|
||||
|
||||
// read version specific flags - extend as necessary
|
||||
switch p.version {
|
||||
// case currentVersion:
|
||||
// ...
|
||||
// fallthrough
|
||||
case currentVersion, 5, 4, 3, 2, 1:
|
||||
p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
|
||||
p.trackAllTypes = p.int() != 0
|
||||
p.posInfoFormat = p.int() != 0
|
||||
case 0:
|
||||
// Go1.7 encoding format - nothing to do here
|
||||
default:
|
||||
errorf("unknown bexport format version %d (%q)", p.version, versionstr)
|
||||
}
|
||||
|
||||
// --- generic export data ---
|
||||
|
||||
// populate typList with predeclared "known" types
|
||||
p.typList = append(p.typList, predeclared()...)
|
||||
|
||||
// read package data
|
||||
pkg = p.pkg()
|
||||
|
||||
// read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
|
||||
objcount := 0
|
||||
for {
|
||||
tag := p.tagOrIndex()
|
||||
if tag == endTag {
|
||||
break
|
||||
}
|
||||
p.obj(tag)
|
||||
objcount++
|
||||
}
|
||||
|
||||
// self-verification
|
||||
if count := p.int(); count != objcount {
|
||||
errorf("got %d objects; want %d", objcount, count)
|
||||
}
|
||||
|
||||
// ignore compiler-specific import data
|
||||
|
||||
// complete interfaces
|
||||
// TODO(gri) re-investigate if we still need to do this in a delayed fashion
|
||||
for _, typ := range p.interfaceList {
|
||||
typ.Complete()
|
||||
}
|
||||
|
||||
// record all referenced packages as imports
|
||||
list := append(([]*types.Package)(nil), p.pkgList[1:]...)
|
||||
sort.Sort(byPath(list))
|
||||
pkg.SetImports(list)
|
||||
|
||||
// package was imported completely and without errors
|
||||
pkg.MarkComplete()
|
||||
|
||||
return p.read, pkg, nil
|
||||
}
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *importer) pkg() *types.Package {
|
||||
// if the package was seen before, i is its index (>= 0)
|
||||
i := p.tagOrIndex()
|
||||
if i >= 0 {
|
||||
return p.pkgList[i]
|
||||
}
|
||||
|
||||
// otherwise, i is the package tag (< 0)
|
||||
if i != packageTag {
|
||||
errorf("unexpected package tag %d version %d", i, p.version)
|
||||
}
|
||||
|
||||
// read package data
|
||||
name := p.string()
|
||||
var path string
|
||||
if p.version >= 5 {
|
||||
path = p.path()
|
||||
} else {
|
||||
path = p.string()
|
||||
}
|
||||
if p.version >= 6 {
|
||||
p.int() // package height; unused by go/types
|
||||
}
|
||||
|
||||
// we should never see an empty package name
|
||||
if name == "" {
|
||||
errorf("empty package name in import")
|
||||
}
|
||||
|
||||
// an empty path denotes the package we are currently importing;
|
||||
// it must be the first package we see
|
||||
if (path == "") != (len(p.pkgList) == 0) {
|
||||
errorf("package path %q for pkg index %d", path, len(p.pkgList))
|
||||
}
|
||||
|
||||
// if the package was imported before, use that one; otherwise create a new one
|
||||
if path == "" {
|
||||
path = p.importpath
|
||||
}
|
||||
pkg := p.imports[path]
|
||||
if pkg == nil {
|
||||
pkg = types.NewPackage(path, name)
|
||||
p.imports[path] = pkg
|
||||
} else if pkg.Name() != name {
|
||||
errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path)
|
||||
}
|
||||
p.pkgList = append(p.pkgList, pkg)
|
||||
|
||||
return pkg
|
||||
}
|
||||
|
||||
// objTag returns the tag value for each object kind.
|
||||
func objTag(obj types.Object) int {
|
||||
switch obj.(type) {
|
||||
case *types.Const:
|
||||
return constTag
|
||||
case *types.TypeName:
|
||||
return typeTag
|
||||
case *types.Var:
|
||||
return varTag
|
||||
case *types.Func:
|
||||
return funcTag
|
||||
default:
|
||||
errorf("unexpected object: %v (%T)", obj, obj) // panics
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func sameObj(a, b types.Object) bool {
|
||||
// Because unnamed types are not canonicalized, we cannot simply compare types for
|
||||
// (pointer) identity.
|
||||
// Ideally we'd check equality of constant values as well, but this is good enough.
|
||||
return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type())
|
||||
}
|
||||
|
||||
func (p *importer) declare(obj types.Object) {
|
||||
pkg := obj.Pkg()
|
||||
if alt := pkg.Scope().Insert(obj); alt != nil {
|
||||
// This can only trigger if we import a (non-type) object a second time.
|
||||
// Excluding type aliases, this cannot happen because 1) we only import a package
|
||||
// once; and b) we ignore compiler-specific export data which may contain
|
||||
// functions whose inlined function bodies refer to other functions that
|
||||
// were already imported.
|
||||
// However, type aliases require reexporting the original type, so we need
|
||||
// to allow it (see also the comment in cmd/compile/internal/gc/bimport.go,
|
||||
// method importer.obj, switch case importing functions).
|
||||
// TODO(gri) review/update this comment once the gc compiler handles type aliases.
|
||||
if !sameObj(obj, alt) {
|
||||
errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *importer) obj(tag int) {
|
||||
switch tag {
|
||||
case constTag:
|
||||
pos := p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
typ := p.typ(nil, nil)
|
||||
val := p.value()
|
||||
p.declare(types.NewConst(pos, pkg, name, typ, val))
|
||||
|
||||
case aliasTag:
|
||||
// TODO(gri) verify type alias hookup is correct
|
||||
pos := p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
typ := p.typ(nil, nil)
|
||||
p.declare(types.NewTypeName(pos, pkg, name, typ))
|
||||
|
||||
case typeTag:
|
||||
p.typ(nil, nil)
|
||||
|
||||
case varTag:
|
||||
pos := p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
typ := p.typ(nil, nil)
|
||||
p.declare(types.NewVar(pos, pkg, name, typ))
|
||||
|
||||
case funcTag:
|
||||
pos := p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
params, isddd := p.paramList()
|
||||
result, _ := p.paramList()
|
||||
sig := types.NewSignature(nil, params, result, isddd)
|
||||
p.declare(types.NewFunc(pos, pkg, name, sig))
|
||||
|
||||
default:
|
||||
errorf("unexpected object tag %d", tag)
|
||||
}
|
||||
}
|
||||
|
||||
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
|
||||
|
||||
func (p *importer) pos() token.Pos {
|
||||
if !p.posInfoFormat {
|
||||
return token.NoPos
|
||||
}
|
||||
|
||||
file := p.prevFile
|
||||
line := p.prevLine
|
||||
delta := p.int()
|
||||
line += delta
|
||||
if p.version >= 5 {
|
||||
if delta == deltaNewFile {
|
||||
if n := p.int(); n >= 0 {
|
||||
// file changed
|
||||
file = p.path()
|
||||
line = n
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if delta == 0 {
|
||||
if n := p.int(); n >= 0 {
|
||||
// file changed
|
||||
file = p.prevFile[:n] + p.string()
|
||||
line = p.int()
|
||||
}
|
||||
}
|
||||
}
|
||||
p.prevFile = file
|
||||
p.prevLine = line
|
||||
|
||||
return p.fake.pos(file, line, 0)
|
||||
}
|
||||
|
||||
// Synthesize a token.Pos
|
||||
type fakeFileSet struct {
|
||||
fset *token.FileSet
|
||||
@ -389,205 +73,6 @@ var (
|
||||
fakeLinesOnce sync.Once
|
||||
)
|
||||
|
||||
func (p *importer) qualifiedName() (pkg *types.Package, name string) {
|
||||
name = p.string()
|
||||
pkg = p.pkg()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *importer) record(t types.Type) {
|
||||
p.typList = append(p.typList, t)
|
||||
}
|
||||
|
||||
// A dddSlice is a types.Type representing ...T parameters.
|
||||
// It only appears for parameter types and does not escape
|
||||
// the importer.
|
||||
type dddSlice struct {
|
||||
elem types.Type
|
||||
}
|
||||
|
||||
func (t *dddSlice) Underlying() types.Type { return t }
|
||||
func (t *dddSlice) String() string { return "..." + t.elem.String() }
|
||||
|
||||
// parent is the package which declared the type; parent == nil means
|
||||
// the package currently imported. The parent package is needed for
|
||||
// exported struct fields and interface methods which don't contain
|
||||
// explicit package information in the export data.
|
||||
//
|
||||
// A non-nil tname is used as the "owner" of the result type; i.e.,
|
||||
// the result type is the underlying type of tname. tname is used
|
||||
// to give interface methods a named receiver type where possible.
|
||||
func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
|
||||
// if the type was seen before, i is its index (>= 0)
|
||||
i := p.tagOrIndex()
|
||||
if i >= 0 {
|
||||
return p.typList[i]
|
||||
}
|
||||
|
||||
// otherwise, i is the type tag (< 0)
|
||||
switch i {
|
||||
case namedTag:
|
||||
// read type object
|
||||
pos := p.pos()
|
||||
parent, name := p.qualifiedName()
|
||||
scope := parent.Scope()
|
||||
obj := scope.Lookup(name)
|
||||
|
||||
// if the object doesn't exist yet, create and insert it
|
||||
if obj == nil {
|
||||
obj = types.NewTypeName(pos, parent, name, nil)
|
||||
scope.Insert(obj)
|
||||
}
|
||||
|
||||
if _, ok := obj.(*types.TypeName); !ok {
|
||||
errorf("pkg = %s, name = %s => %s", parent, name, obj)
|
||||
}
|
||||
|
||||
// associate new named type with obj if it doesn't exist yet
|
||||
t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
|
||||
|
||||
// but record the existing type, if any
|
||||
tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
|
||||
p.record(tname)
|
||||
|
||||
// read underlying type
|
||||
t0.SetUnderlying(p.typ(parent, t0))
|
||||
|
||||
// interfaces don't have associated methods
|
||||
if types.IsInterface(t0) {
|
||||
return tname
|
||||
}
|
||||
|
||||
// read associated methods
|
||||
for i := p.int(); i > 0; i-- {
|
||||
// TODO(gri) replace this with something closer to fieldName
|
||||
pos := p.pos()
|
||||
name := p.string()
|
||||
if !exported(name) {
|
||||
p.pkg()
|
||||
}
|
||||
|
||||
recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver?
|
||||
params, isddd := p.paramList()
|
||||
result, _ := p.paramList()
|
||||
p.int() // go:nointerface pragma - discarded
|
||||
|
||||
sig := types.NewSignature(recv.At(0), params, result, isddd)
|
||||
t0.AddMethod(types.NewFunc(pos, parent, name, sig))
|
||||
}
|
||||
|
||||
return tname
|
||||
|
||||
case arrayTag:
|
||||
t := new(types.Array)
|
||||
if p.trackAllTypes {
|
||||
p.record(t)
|
||||
}
|
||||
|
||||
n := p.int64()
|
||||
*t = *types.NewArray(p.typ(parent, nil), n)
|
||||
return t
|
||||
|
||||
case sliceTag:
|
||||
t := new(types.Slice)
|
||||
if p.trackAllTypes {
|
||||
p.record(t)
|
||||
}
|
||||
|
||||
*t = *types.NewSlice(p.typ(parent, nil))
|
||||
return t
|
||||
|
||||
case dddTag:
|
||||
t := new(dddSlice)
|
||||
if p.trackAllTypes {
|
||||
p.record(t)
|
||||
}
|
||||
|
||||
t.elem = p.typ(parent, nil)
|
||||
return t
|
||||
|
||||
case structTag:
|
||||
t := new(types.Struct)
|
||||
if p.trackAllTypes {
|
||||
p.record(t)
|
||||
}
|
||||
|
||||
*t = *types.NewStruct(p.fieldList(parent))
|
||||
return t
|
||||
|
||||
case pointerTag:
|
||||
t := new(types.Pointer)
|
||||
if p.trackAllTypes {
|
||||
p.record(t)
|
||||
}
|
||||
|
||||
*t = *types.NewPointer(p.typ(parent, nil))
|
||||
return t
|
||||
|
||||
case signatureTag:
|
||||
t := new(types.Signature)
|
||||
if p.trackAllTypes {
|
||||
p.record(t)
|
||||
}
|
||||
|
||||
params, isddd := p.paramList()
|
||||
result, _ := p.paramList()
|
||||
*t = *types.NewSignature(nil, params, result, isddd)
|
||||
return t
|
||||
|
||||
case interfaceTag:
|
||||
// Create a dummy entry in the type list. This is safe because we
|
||||
// cannot expect the interface type to appear in a cycle, as any
|
||||
// such cycle must contain a named type which would have been
|
||||
// first defined earlier.
|
||||
// TODO(gri) Is this still true now that we have type aliases?
|
||||
// See issue #23225.
|
||||
n := len(p.typList)
|
||||
if p.trackAllTypes {
|
||||
p.record(nil)
|
||||
}
|
||||
|
||||
var embeddeds []types.Type
|
||||
for n := p.int(); n > 0; n-- {
|
||||
p.pos()
|
||||
embeddeds = append(embeddeds, p.typ(parent, nil))
|
||||
}
|
||||
|
||||
t := newInterface(p.methodList(parent, tname), embeddeds)
|
||||
p.interfaceList = append(p.interfaceList, t)
|
||||
if p.trackAllTypes {
|
||||
p.typList[n] = t
|
||||
}
|
||||
return t
|
||||
|
||||
case mapTag:
|
||||
t := new(types.Map)
|
||||
if p.trackAllTypes {
|
||||
p.record(t)
|
||||
}
|
||||
|
||||
key := p.typ(parent, nil)
|
||||
val := p.typ(parent, nil)
|
||||
*t = *types.NewMap(key, val)
|
||||
return t
|
||||
|
||||
case chanTag:
|
||||
t := new(types.Chan)
|
||||
if p.trackAllTypes {
|
||||
p.record(t)
|
||||
}
|
||||
|
||||
dir := chanDir(p.int())
|
||||
val := p.typ(parent, nil)
|
||||
*t = *types.NewChan(dir, val)
|
||||
return t
|
||||
|
||||
default:
|
||||
errorf("unexpected type tag %d", i) // panics
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func chanDir(d int) types.ChanDir {
|
||||
// tag values must match the constants in cmd/compile/internal/gc/go.go
|
||||
switch d {
|
||||
@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
|
||||
if n := p.int(); n > 0 {
|
||||
fields = make([]*types.Var, n)
|
||||
tags = make([]string, n)
|
||||
for i := range fields {
|
||||
fields[i], tags[i] = p.field(parent)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *importer) field(parent *types.Package) (*types.Var, string) {
|
||||
pos := p.pos()
|
||||
pkg, name, alias := p.fieldName(parent)
|
||||
typ := p.typ(parent, nil)
|
||||
tag := p.string()
|
||||
|
||||
anonymous := false
|
||||
if name == "" {
|
||||
// anonymous field - typ must be T or *T and T must be a type name
|
||||
switch typ := deref(typ).(type) {
|
||||
case *types.Basic: // basic types are named types
|
||||
pkg = nil // // objects defined in Universe scope have no package
|
||||
name = typ.Name()
|
||||
case *types.Named:
|
||||
name = typ.Obj().Name()
|
||||
default:
|
||||
errorf("named base type expected")
|
||||
}
|
||||
anonymous = true
|
||||
} else if alias {
|
||||
// anonymous field: we have an explicit name because it's an alias
|
||||
anonymous = true
|
||||
}
|
||||
|
||||
return types.NewField(pos, pkg, name, typ, anonymous), tag
|
||||
}
|
||||
|
||||
func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
|
||||
if n := p.int(); n > 0 {
|
||||
methods = make([]*types.Func, n)
|
||||
for i := range methods {
|
||||
methods[i] = p.method(parent, baseType)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
|
||||
pos := p.pos()
|
||||
pkg, name, _ := p.fieldName(parent)
|
||||
// If we don't have a baseType, use a nil receiver.
|
||||
// A receiver using the actual interface type (which
|
||||
// we don't know yet) will be filled in when we call
|
||||
// types.Interface.Complete.
|
||||
var recv *types.Var
|
||||
if baseType != nil {
|
||||
recv = types.NewVar(token.NoPos, parent, "", baseType)
|
||||
}
|
||||
params, isddd := p.paramList()
|
||||
result, _ := p.paramList()
|
||||
sig := types.NewSignature(recv, params, result, isddd)
|
||||
return types.NewFunc(pos, pkg, name, sig)
|
||||
}
|
||||
|
||||
func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) {
|
||||
name = p.string()
|
||||
pkg = parent
|
||||
if pkg == nil {
|
||||
// use the imported package instead
|
||||
pkg = p.pkgList[0]
|
||||
}
|
||||
if p.version == 0 && name == "_" {
|
||||
// version 0 didn't export a package for _ fields
|
||||
return
|
||||
}
|
||||
switch name {
|
||||
case "":
|
||||
// 1) field name matches base type name and is exported: nothing to do
|
||||
case "?":
|
||||
// 2) field name matches base type name and is not exported: need package
|
||||
name = ""
|
||||
pkg = p.pkg()
|
||||
case "@":
|
||||
// 3) field name doesn't match type name (alias)
|
||||
name = p.string()
|
||||
alias = true
|
||||
fallthrough
|
||||
default:
|
||||
if !exported(name) {
|
||||
pkg = p.pkg()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *importer) paramList() (*types.Tuple, bool) {
|
||||
n := p.int()
|
||||
if n == 0 {
|
||||
return nil, false
|
||||
}
|
||||
// negative length indicates unnamed parameters
|
||||
named := true
|
||||
if n < 0 {
|
||||
n = -n
|
||||
named = false
|
||||
}
|
||||
// n > 0
|
||||
params := make([]*types.Var, n)
|
||||
isddd := false
|
||||
for i := range params {
|
||||
params[i], isddd = p.param(named)
|
||||
}
|
||||
return types.NewTuple(params...), isddd
|
||||
}
|
||||
|
||||
func (p *importer) param(named bool) (*types.Var, bool) {
|
||||
t := p.typ(nil, nil)
|
||||
td, isddd := t.(*dddSlice)
|
||||
if isddd {
|
||||
t = types.NewSlice(td.elem)
|
||||
}
|
||||
|
||||
var pkg *types.Package
|
||||
var name string
|
||||
if named {
|
||||
name = p.string()
|
||||
if name == "" {
|
||||
errorf("expected named parameter")
|
||||
}
|
||||
if name != "_" {
|
||||
pkg = p.pkg()
|
||||
}
|
||||
if i := strings.Index(name, "·"); i > 0 {
|
||||
name = name[:i] // cut off gc-specific parameter numbering
|
||||
}
|
||||
}
|
||||
|
||||
// read and discard compiler-specific info
|
||||
p.string()
|
||||
|
||||
return types.NewVar(token.NoPos, pkg, name, t), isddd
|
||||
}
|
||||
|
||||
func exported(name string) bool {
|
||||
ch, _ := utf8.DecodeRuneInString(name)
|
||||
return unicode.IsUpper(ch)
|
||||
}
|
||||
|
||||
func (p *importer) value() constant.Value {
|
||||
switch tag := p.tagOrIndex(); tag {
|
||||
case falseTag:
|
||||
return constant.MakeBool(false)
|
||||
case trueTag:
|
||||
return constant.MakeBool(true)
|
||||
case int64Tag:
|
||||
return constant.MakeInt64(p.int64())
|
||||
case floatTag:
|
||||
return p.float()
|
||||
case complexTag:
|
||||
re := p.float()
|
||||
im := p.float()
|
||||
return constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
|
||||
case stringTag:
|
||||
return constant.MakeString(p.string())
|
||||
case unknownTag:
|
||||
return constant.MakeUnknown()
|
||||
default:
|
||||
errorf("unexpected value tag %d", tag) // panics
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *importer) float() constant.Value {
|
||||
sign := p.int()
|
||||
if sign == 0 {
|
||||
return constant.MakeInt64(0)
|
||||
}
|
||||
|
||||
exp := p.int()
|
||||
mant := []byte(p.string()) // big endian
|
||||
|
||||
// remove leading 0's if any
|
||||
for len(mant) > 0 && mant[0] == 0 {
|
||||
mant = mant[1:]
|
||||
}
|
||||
|
||||
// convert to little endian
|
||||
// TODO(gri) go/constant should have a more direct conversion function
|
||||
// (e.g., once it supports a big.Float based implementation)
|
||||
for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 {
|
||||
mant[i], mant[j] = mant[j], mant[i]
|
||||
}
|
||||
|
||||
// adjust exponent (constant.MakeFromBytes creates an integer value,
|
||||
// but mant represents the mantissa bits such that 0.5 <= mant < 1.0)
|
||||
exp -= len(mant) << 3
|
||||
if len(mant) > 0 {
|
||||
for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 {
|
||||
exp++
|
||||
}
|
||||
}
|
||||
|
||||
x := constant.MakeFromBytes(mant)
|
||||
switch {
|
||||
case exp < 0:
|
||||
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
|
||||
x = constant.BinaryOp(x, token.QUO, d)
|
||||
case exp > 0:
|
||||
x = constant.Shift(x, token.SHL, uint(exp))
|
||||
}
|
||||
|
||||
if sign < 0 {
|
||||
x = constant.UnaryOp(token.SUB, x, 0)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Low-level decoders
|
||||
|
||||
func (p *importer) tagOrIndex() int {
|
||||
if p.debugFormat {
|
||||
p.marker('t')
|
||||
}
|
||||
|
||||
return int(p.rawInt64())
|
||||
}
|
||||
|
||||
func (p *importer) int() int {
|
||||
x := p.int64()
|
||||
if int64(int(x)) != x {
|
||||
errorf("exported integer too large")
|
||||
}
|
||||
return int(x)
|
||||
}
|
||||
|
||||
func (p *importer) int64() int64 {
|
||||
if p.debugFormat {
|
||||
p.marker('i')
|
||||
}
|
||||
|
||||
return p.rawInt64()
|
||||
}
|
||||
|
||||
func (p *importer) path() string {
|
||||
if p.debugFormat {
|
||||
p.marker('p')
|
||||
}
|
||||
// if the path was seen before, i is its index (>= 0)
|
||||
// (the empty string is at index 0)
|
||||
i := p.rawInt64()
|
||||
if i >= 0 {
|
||||
return p.pathList[i]
|
||||
}
|
||||
// otherwise, i is the negative path length (< 0)
|
||||
a := make([]string, -i)
|
||||
for n := range a {
|
||||
a[n] = p.string()
|
||||
}
|
||||
s := strings.Join(a, "/")
|
||||
p.pathList = append(p.pathList, s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *importer) string() string {
|
||||
if p.debugFormat {
|
||||
p.marker('s')
|
||||
}
|
||||
// if the string was seen before, i is its index (>= 0)
|
||||
// (the empty string is at index 0)
|
||||
i := p.rawInt64()
|
||||
if i >= 0 {
|
||||
return p.strList[i]
|
||||
}
|
||||
// otherwise, i is the negative string length (< 0)
|
||||
if n := int(-i); n <= cap(p.buf) {
|
||||
p.buf = p.buf[:n]
|
||||
} else {
|
||||
p.buf = make([]byte, n)
|
||||
}
|
||||
for i := range p.buf {
|
||||
p.buf[i] = p.rawByte()
|
||||
}
|
||||
s := string(p.buf)
|
||||
p.strList = append(p.strList, s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *importer) marker(want byte) {
|
||||
if got := p.rawByte(); got != want {
|
||||
errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
|
||||
}
|
||||
|
||||
pos := p.read
|
||||
if n := int(p.rawInt64()); n != pos {
|
||||
errorf("incorrect position: got %d; want %d", n, pos)
|
||||
}
|
||||
}
|
||||
|
||||
// rawInt64 should only be used by low-level decoders.
|
||||
func (p *importer) rawInt64() int64 {
|
||||
i, err := binary.ReadVarint(p)
|
||||
if err != nil {
|
||||
errorf("read error: %v", err)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// rawStringln should only be used to read the initial version string.
|
||||
func (p *importer) rawStringln(b byte) string {
|
||||
p.buf = p.buf[:0]
|
||||
for b != '\n' {
|
||||
p.buf = append(p.buf, b)
|
||||
b = p.rawByte()
|
||||
}
|
||||
return string(p.buf)
|
||||
}
|
||||
|
||||
// needed for binary.ReadVarint in rawInt64
|
||||
func (p *importer) ReadByte() (byte, error) {
|
||||
return p.rawByte(), nil
|
||||
}
|
||||
|
||||
// byte is the bottleneck interface for reading p.data.
|
||||
// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
|
||||
// rawByte should only be used by low-level decoders.
|
||||
func (p *importer) rawByte() byte {
|
||||
b := p.data[0]
|
||||
r := 1
|
||||
if b == '|' {
|
||||
b = p.data[1]
|
||||
r = 2
|
||||
switch b {
|
||||
case 'S':
|
||||
b = '$'
|
||||
case '|':
|
||||
// nothing to do
|
||||
default:
|
||||
errorf("unexpected escape sequence in export data")
|
||||
}
|
||||
}
|
||||
p.data = p.data[r:]
|
||||
p.read += r
|
||||
return b
|
||||
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Export format
|
||||
|
||||
// Tags. Must be < 0.
|
||||
const (
|
||||
// Objects
|
||||
packageTag = -(iota + 1)
|
||||
constTag
|
||||
typeTag
|
||||
varTag
|
||||
funcTag
|
||||
endTag
|
||||
|
||||
// Types
|
||||
namedTag
|
||||
arrayTag
|
||||
sliceTag
|
||||
dddTag
|
||||
structTag
|
||||
pointerTag
|
||||
signatureTag
|
||||
interfaceTag
|
||||
mapTag
|
||||
chanTag
|
||||
|
||||
// Values
|
||||
falseTag
|
||||
trueTag
|
||||
int64Tag
|
||||
floatTag
|
||||
fractionTag // not used by gc
|
||||
complexTag
|
||||
stringTag
|
||||
nilTag // only used by gc (appears in exported inlined function bodies)
|
||||
unknownTag // not used by gc (only appears in packages with errors)
|
||||
|
||||
// Type aliases
|
||||
aliasTag
|
||||
)
|
||||
|
||||
var predeclOnce sync.Once
|
||||
var predecl []types.Type // initialized lazily
|
||||
|
||||
|
15
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
15
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
@ -230,20 +230,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
||||
// Or, define a new standard go/types/gcexportdata package.
|
||||
fset := token.NewFileSet()
|
||||
|
||||
// The indexed export format starts with an 'i'; the older
|
||||
// binary export format starts with a 'c', 'd', or 'v'
|
||||
// (from "version"). Select appropriate importer.
|
||||
// Select appropriate importer.
|
||||
if len(data) > 0 {
|
||||
switch data[0] {
|
||||
case 'i':
|
||||
case 'v', 'c', 'd': // binary, till go1.10
|
||||
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
|
||||
|
||||
case 'i': // indexed, till go1.19
|
||||
_, pkg, err := IImportData(fset, packages, data[1:], id)
|
||||
return pkg, err
|
||||
|
||||
case 'v', 'c', 'd':
|
||||
_, pkg, err := BImportData(fset, packages, data, id)
|
||||
return pkg, err
|
||||
|
||||
case 'u':
|
||||
case 'u': // unified, from go1.20
|
||||
_, pkg, err := UImportData(fset, packages, data[1:size], id)
|
||||
return pkg, err
|
||||
|
||||
|
19
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
19
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
@ -969,6 +969,16 @@ func constantToFloat(x constant.Value) *big.Float {
|
||||
return &f
|
||||
}
|
||||
|
||||
func valueToRat(x constant.Value) *big.Rat {
|
||||
// Convert little-endian to big-endian.
|
||||
// I can't believe this is necessary.
|
||||
bytes := constant.Bytes(x)
|
||||
for i := 0; i < len(bytes)/2; i++ {
|
||||
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
|
||||
}
|
||||
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
|
||||
}
|
||||
|
||||
// mpint exports a multi-precision integer.
|
||||
//
|
||||
// For unsigned types, small values are written out as a single
|
||||
@ -1178,3 +1188,12 @@ func (q *objQueue) popHead() types.Object {
|
||||
q.head++
|
||||
return obj
|
||||
}
|
||||
|
||||
// internalError represents an error generated inside this package.
|
||||
type internalError string
|
||||
|
||||
func (e internalError) Error() string { return "gcimporter: " + string(e) }
|
||||
|
||||
func internalErrorf(format string, args ...interface{}) error {
|
||||
return internalError(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
9
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
9
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
@ -131,7 +131,7 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte,
|
||||
} else if version > currentVersion {
|
||||
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
|
||||
} else {
|
||||
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
|
||||
err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -140,11 +140,8 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte,
|
||||
r := &intReader{bytes.NewReader(data), path}
|
||||
|
||||
if bundle {
|
||||
bundleVersion := r.uint64()
|
||||
switch bundleVersion {
|
||||
case bundleVersion:
|
||||
default:
|
||||
errorf("unknown bundle format version %d", bundleVersion)
|
||||
if v := r.uint64(); v != bundleVersion {
|
||||
errorf("unknown bundle format version %d", v)
|
||||
}
|
||||
}
|
||||
|
||||
|
9
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
9
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
@ -10,6 +10,7 @@
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"sort"
|
||||
@ -63,6 +64,14 @@ type typeInfo struct {
|
||||
}
|
||||
|
||||
func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||
if !debug {
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
s := string(data)
|
||||
s = s[:strings.LastIndex(s, "\n$$\n")]
|
||||
input := pkgbits.NewPkgDecoder(path, s)
|
||||
|
146
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
146
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
@ -8,10 +8,12 @@ package gocommand
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@ -22,6 +24,9 @@ import (
|
||||
exec "golang.org/x/sys/execabs"
|
||||
|
||||
"golang.org/x/tools/internal/event"
|
||||
"golang.org/x/tools/internal/event/keys"
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
"golang.org/x/tools/internal/event/tag"
|
||||
)
|
||||
|
||||
// An Runner will run go command invocations and serialize
|
||||
@ -51,9 +56,19 @@ func (runner *Runner) initialize() {
|
||||
// 1.14: go: updating go.mod: existing contents have changed since last read
|
||||
var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
|
||||
|
||||
// verb is an event label for the go command verb.
|
||||
var verb = keys.NewString("verb", "go command verb")
|
||||
|
||||
func invLabels(inv Invocation) []label.Label {
|
||||
return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)}
|
||||
}
|
||||
|
||||
// Run is a convenience wrapper around RunRaw.
|
||||
// It returns only stdout and a "friendly" error.
|
||||
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
|
||||
ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...)
|
||||
defer done()
|
||||
|
||||
stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
|
||||
return stdout, friendly
|
||||
}
|
||||
@ -61,6 +76,9 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e
|
||||
// RunPiped runs the invocation serially, always waiting for any concurrent
|
||||
// invocations to complete first.
|
||||
func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
|
||||
ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...)
|
||||
defer done()
|
||||
|
||||
_, err := runner.runPiped(ctx, inv, stdout, stderr)
|
||||
return err
|
||||
}
|
||||
@ -68,6 +86,8 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde
|
||||
// RunRaw runs the invocation, serializing requests only if they fight over
|
||||
// go.mod changes.
|
||||
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
||||
ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...)
|
||||
defer done()
|
||||
// Make sure the runner is always initialized.
|
||||
runner.initialize()
|
||||
|
||||
@ -215,6 +235,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
||||
cmd := exec.Command("go", goArgs...)
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
// cmd.WaitDelay was added only in go1.20 (see #50436).
|
||||
if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
|
||||
// https://go.dev/issue/59541: don't wait forever copying stderr
|
||||
// after the command has exited.
|
||||
// After CL 484741 we copy stdout manually, so we we'll stop reading that as
|
||||
// soon as ctx is done. However, we also don't want to wait around forever
|
||||
// for stderr. Give a much-longer-than-reasonable delay and then assume that
|
||||
// something has wedged in the kernel or runtime.
|
||||
waitDelay.Set(reflect.ValueOf(30 * time.Second))
|
||||
}
|
||||
|
||||
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||
// expects the working directory to keep the original path, including the
|
||||
// go command when dealing with modules.
|
||||
@ -229,6 +261,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
||||
cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
|
||||
cmd.Dir = i.WorkingDir
|
||||
}
|
||||
|
||||
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
||||
|
||||
return runCmdContext(ctx, cmd)
|
||||
@ -242,10 +275,85 @@ var DebugHangingGoCommands = false
|
||||
|
||||
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
|
||||
// before os.Kill.
|
||||
func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
|
||||
if err := cmd.Start(); err != nil {
|
||||
func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
|
||||
// If cmd.Stdout is not an *os.File, the exec package will create a pipe and
|
||||
// copy it to the Writer in a goroutine until the process has finished and
|
||||
// either the pipe reaches EOF or command's WaitDelay expires.
|
||||
//
|
||||
// However, the output from 'go list' can be quite large, and we don't want to
|
||||
// keep reading (and allocating buffers) if we've already decided we don't
|
||||
// care about the output. We don't want to wait for the process to finish, and
|
||||
// we don't wait to wait for the WaitDelay to expire either.
|
||||
//
|
||||
// Instead, if cmd.Stdout requires a copying goroutine we explicitly replace
|
||||
// it with a pipe (which is an *os.File), which we can close in order to stop
|
||||
// copying output as soon as we realize we don't care about it.
|
||||
var stdoutW *os.File
|
||||
if cmd.Stdout != nil {
|
||||
if _, ok := cmd.Stdout.(*os.File); !ok {
|
||||
var stdoutR *os.File
|
||||
stdoutR, stdoutW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prevStdout := cmd.Stdout
|
||||
cmd.Stdout = stdoutW
|
||||
|
||||
stdoutErr := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := io.Copy(prevStdout, stdoutR)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("copying stdout: %w", err)
|
||||
}
|
||||
stdoutErr <- err
|
||||
}()
|
||||
defer func() {
|
||||
// We started a goroutine to copy a stdout pipe.
|
||||
// Wait for it to finish, or terminate it if need be.
|
||||
var err2 error
|
||||
select {
|
||||
case err2 = <-stdoutErr:
|
||||
stdoutR.Close()
|
||||
case <-ctx.Done():
|
||||
stdoutR.Close()
|
||||
// Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close
|
||||
// should cause the Read call in io.Copy to unblock and return
|
||||
// immediately, but we still need to receive from stdoutErr to confirm
|
||||
// that that has happened.
|
||||
<-stdoutErr
|
||||
err2 = ctx.Err()
|
||||
}
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
}()
|
||||
|
||||
// Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the
|
||||
// same writer, and have a type that can be compared with ==, at most
|
||||
// one goroutine at a time will call Write.”
|
||||
//
|
||||
// Since we're starting a goroutine that writes to cmd.Stdout, we must
|
||||
// also update cmd.Stderr so that that still holds.
|
||||
func() {
|
||||
defer func() { recover() }()
|
||||
if cmd.Stderr == prevStdout {
|
||||
cmd.Stderr = cmd.Stdout
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
err = cmd.Start()
|
||||
if stdoutW != nil {
|
||||
// The child process has inherited the pipe file,
|
||||
// so close the copy held in this process.
|
||||
stdoutW.Close()
|
||||
stdoutW = nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
go func() {
|
||||
resChan <- cmd.Wait()
|
||||
@ -253,11 +361,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
|
||||
|
||||
// If we're interested in debugging hanging Go commands, stop waiting after a
|
||||
// minute and panic with interesting information.
|
||||
if DebugHangingGoCommands {
|
||||
debug := DebugHangingGoCommands
|
||||
if debug {
|
||||
timer := time.NewTimer(1 * time.Minute)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-time.After(1 * time.Minute):
|
||||
case <-timer.C:
|
||||
HandleHangingGoCommand(cmd.Process)
|
||||
case <-ctx.Done():
|
||||
}
|
||||
@ -270,30 +381,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
|
||||
}
|
||||
|
||||
// Cancelled. Interrupt and see if it ends voluntarily.
|
||||
cmd.Process.Signal(os.Interrupt)
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-time.After(time.Second):
|
||||
if err := cmd.Process.Signal(os.Interrupt); err == nil {
|
||||
// (We used to wait only 1s but this proved
|
||||
// fragile on loaded builder machines.)
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
|
||||
// Didn't shut down in response to interrupt. Kill it hard.
|
||||
// TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
|
||||
// on certain platforms, such as unix.
|
||||
if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands {
|
||||
// Don't panic here as this reliably fails on windows with EINVAL.
|
||||
if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
|
||||
log.Printf("error killing the Go command: %v", err)
|
||||
}
|
||||
|
||||
// See above: don't wait indefinitely if we're debugging hanging Go commands.
|
||||
if DebugHangingGoCommands {
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill
|
||||
HandleHangingGoCommand(cmd.Process)
|
||||
}
|
||||
}
|
||||
return <-resChan
|
||||
}
|
||||
|
||||
|
18
vendor/golang.org/x/tools/internal/gocommand/version.go
generated
vendored
18
vendor/golang.org/x/tools/internal/gocommand/version.go
generated
vendored
@ -23,21 +23,11 @@ import (
|
||||
func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
|
||||
inv.Verb = "list"
|
||||
inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
|
||||
inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off")
|
||||
// Unset any unneeded flags, and remove them from BuildFlags, if they're
|
||||
// present.
|
||||
inv.ModFile = ""
|
||||
inv.BuildFlags = nil // This is not a build command.
|
||||
inv.ModFlag = ""
|
||||
var buildFlags []string
|
||||
for _, flag := range inv.BuildFlags {
|
||||
// Flags can be prefixed by one or two dashes.
|
||||
f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-")
|
||||
if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") {
|
||||
continue
|
||||
}
|
||||
buildFlags = append(buildFlags, flag)
|
||||
}
|
||||
inv.BuildFlags = buildFlags
|
||||
inv.ModFile = ""
|
||||
inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off")
|
||||
|
||||
stdoutBytes, err := r.Run(ctx, inv)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
9
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
9
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
@ -11,8 +11,6 @@ import (
|
||||
"go/types"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/tools/go/types/objectpath"
|
||||
)
|
||||
|
||||
func SetUsesCgo(conf *types.Config) bool {
|
||||
@ -52,10 +50,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
|
||||
}
|
||||
|
||||
var SetGoVersion = func(conf *types.Config, version string) bool { return false }
|
||||
|
||||
// NewObjectpathEncoder returns a function closure equivalent to
|
||||
// objectpath.For but amortized for multiple (sequential) calls.
|
||||
// It is a temporary workaround, pending the approval of proposal 58668.
|
||||
//
|
||||
//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor
|
||||
func NewObjectpathFunc() func(types.Object) (objectpath.Path, error)
|
||||
|
16
vendor/modules.txt
vendored
16
vendor/modules.txt
vendored
@ -1,8 +1,6 @@
|
||||
# dario.cat/mergo v1.0.0
|
||||
## explicit; go 1.13
|
||||
dario.cat/mergo
|
||||
# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161
|
||||
## explicit; go 1.16
|
||||
# github.com/BurntSushi/toml v1.3.2
|
||||
## explicit; go 1.16
|
||||
github.com/BurntSushi/toml
|
||||
@ -17,7 +15,7 @@ github.com/Microsoft/go-winio/internal/stringbuffer
|
||||
github.com/Microsoft/go-winio/pkg/guid
|
||||
github.com/Microsoft/go-winio/tools/mkwinsyscall
|
||||
github.com/Microsoft/go-winio/vhd
|
||||
# github.com/Microsoft/hcsshim v0.10.0-rc.7
|
||||
# github.com/Microsoft/hcsshim v0.10.0-rc.8
|
||||
## explicit; go 1.18
|
||||
github.com/Microsoft/hcsshim
|
||||
github.com/Microsoft/hcsshim/computestorage
|
||||
@ -56,11 +54,15 @@ github.com/asaskevich/govalidator
|
||||
# github.com/containerd/cgroups v1.1.0
|
||||
## explicit; go 1.17
|
||||
github.com/containerd/cgroups/stats/v1
|
||||
# github.com/containerd/containerd v1.7.2
|
||||
## explicit; go 1.19
|
||||
github.com/containerd/containerd/errdefs
|
||||
github.com/containerd/containerd/log
|
||||
# github.com/containerd/stargz-snapshotter/estargz v0.14.3
|
||||
## explicit; go 1.19
|
||||
github.com/containerd/stargz-snapshotter/estargz
|
||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||
# github.com/containers/common v0.53.0
|
||||
# github.com/containers/common v0.54.0
|
||||
## explicit; go 1.18
|
||||
github.com/containers/common/pkg/auth
|
||||
github.com/containers/common/pkg/capabilities
|
||||
@ -340,8 +342,6 @@ github.com/google/go-containerregistry/pkg/name
|
||||
# github.com/google/go-intervals v0.0.2
|
||||
## explicit; go 1.12
|
||||
github.com/google/go-intervals/intervalset
|
||||
# github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c
|
||||
## explicit; go 1.18
|
||||
# github.com/google/uuid v1.3.0
|
||||
## explicit
|
||||
github.com/google/uuid
|
||||
@ -655,17 +655,17 @@ golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
# golang.org/x/tools v0.8.0
|
||||
# golang.org/x/tools v0.9.3
|
||||
## explicit; go 1.18
|
||||
golang.org/x/tools/cmd/stringer
|
||||
golang.org/x/tools/go/gcexportdata
|
||||
golang.org/x/tools/go/internal/packagesdriver
|
||||
golang.org/x/tools/go/packages
|
||||
golang.org/x/tools/go/types/objectpath
|
||||
golang.org/x/tools/internal/event
|
||||
golang.org/x/tools/internal/event/core
|
||||
golang.org/x/tools/internal/event/keys
|
||||
golang.org/x/tools/internal/event/label
|
||||
golang.org/x/tools/internal/event/tag
|
||||
golang.org/x/tools/internal/gcimporter
|
||||
golang.org/x/tools/internal/gocommand
|
||||
golang.org/x/tools/internal/packagesinternal
|
||||
|
Loading…
Reference in New Issue
Block a user