mirror of
https://github.com/k8snetworkplumbingwg/multus-cni.git
synced 2025-07-05 11:56:15 +00:00
Bump ginkgo to v2
This commit bumps ginkgo to version v2.5.1.
This commit is contained in:
parent
344fb0ae09
commit
048438e0ef
19
go.mod
19
go.mod
@ -9,12 +9,12 @@ require (
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.3.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.17.0
|
||||
github.com/onsi/ginkgo/v2 v2.5.1
|
||||
github.com/onsi/gomega v1.24.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956
|
||||
golang.org/x/net v0.1.0
|
||||
golang.org/x/sys v0.2.0
|
||||
google.golang.org/grpc v1.40.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
k8s.io/api v0.22.8
|
||||
@ -37,7 +37,7 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.5 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.11 // indirect
|
||||
@ -45,24 +45,21 @@ require (
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
)
|
||||
|
30
go.sum
30
go.sum
@ -181,8 +181,9 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@ -305,14 +306,15 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw=
|
||||
github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg=
|
||||
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
@ -499,8 +501,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -573,13 +575,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956 h1:XeJjHH1KiLpKGb6lvMiksZ9l0fVUh+AmGcm0nOMEBOY=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -587,8 +590,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -645,8 +648,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
@ -766,8 +767,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
787
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
Normal file
787
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
Normal file
@ -0,0 +1,787 @@
|
||||
/*
|
||||
Copyright 2021 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package funcr implements formatting of structured log messages and
|
||||
// optionally captures the call site and timestamp.
|
||||
//
|
||||
// The simplest way to use it is via its implementation of a
|
||||
// github.com/go-logr/logr.LogSink with output through an arbitrary
|
||||
// "write" function. See New and NewJSON for details.
|
||||
//
|
||||
// Custom LogSinks
|
||||
//
|
||||
// For users who need more control, a funcr.Formatter can be embedded inside
|
||||
// your own custom LogSink implementation. This is useful when the LogSink
|
||||
// needs to implement additional methods, for example.
|
||||
//
|
||||
// Formatting
|
||||
//
|
||||
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
|
||||
// values which are being logged. When rendering a struct, funcr will use Go's
|
||||
// standard JSON tags (all except "string").
|
||||
package funcr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// New returns a logr.Logger which is implemented by an arbitrary function.
|
||||
func New(fn func(prefix, args string), opts Options) logr.Logger {
|
||||
return logr.New(newSink(fn, NewFormatter(opts)))
|
||||
}
|
||||
|
||||
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
|
||||
// and produces JSON output.
|
||||
func NewJSON(fn func(obj string), opts Options) logr.Logger {
|
||||
fnWrapper := func(_, obj string) {
|
||||
fn(obj)
|
||||
}
|
||||
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
|
||||
}
|
||||
|
||||
// Underlier exposes access to the underlying logging function. Since
|
||||
// callers only have a logr.Logger, they have to know which
|
||||
// implementation is in use, so this interface is less of an
|
||||
// abstraction and more of a way to test type conversion.
|
||||
type Underlier interface {
|
||||
GetUnderlying() func(prefix, args string)
|
||||
}
|
||||
|
||||
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
|
||||
l := &fnlogger{
|
||||
Formatter: formatter,
|
||||
write: fn,
|
||||
}
|
||||
// For skipping fnlogger.Info and fnlogger.Error.
|
||||
l.Formatter.AddCallDepth(1)
|
||||
return l
|
||||
}
|
||||
|
||||
// Options carries parameters which influence the way logs are generated.
|
||||
type Options struct {
|
||||
// LogCaller tells funcr to add a "caller" key to some or all log lines.
|
||||
// This has some overhead, so some users might not want it.
|
||||
LogCaller MessageClass
|
||||
|
||||
// LogCallerFunc tells funcr to also log the calling function name. This
|
||||
// has no effect if caller logging is not enabled (see Options.LogCaller).
|
||||
LogCallerFunc bool
|
||||
|
||||
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
|
||||
// overhead, so some users might not want it.
|
||||
LogTimestamp bool
|
||||
|
||||
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
|
||||
// is enabled. If not specified, a default format will be used. For more
|
||||
// details, see docs for Go's time.Layout.
|
||||
TimestampFormat string
|
||||
|
||||
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||
// more logs. Info logs at or below this level will be written, while logs
|
||||
// above this level will be discarded.
|
||||
Verbosity int
|
||||
|
||||
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
|
||||
// while a log line is being rendered. The kvList argument follows logr
|
||||
// conventions - each pair of slice elements is comprised of a string key
|
||||
// and an arbitrary value (verified and sanitized before calling this
|
||||
// hook). The value returned must follow the same conventions. This hook
|
||||
// can be used to audit or modify logged data. For example, you might want
|
||||
// to prefix all of funcr's built-in keys with some string. This hook is
|
||||
// only called for built-in (provided by funcr itself) key-value pairs.
|
||||
// Equivalent hooks are offered for key-value pairs saved via
|
||||
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
|
||||
// for user-provided pairs (see RenderArgsHook).
|
||||
RenderBuiltinsHook func(kvList []interface{}) []interface{}
|
||||
|
||||
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
|
||||
// only called for key-value pairs saved via logr.Logger.WithValues. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderValuesHook func(kvList []interface{}) []interface{}
|
||||
|
||||
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
|
||||
// called for key-value pairs passed directly to Info and Error. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderArgsHook func(kvList []interface{}) []interface{}
|
||||
|
||||
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
|
||||
// that contains a struct, etc.) it may log. Every time it finds a struct,
|
||||
// slice, array, or map the depth is increased by one. When the maximum is
|
||||
// reached, the value will be converted to a string indicating that the max
|
||||
// depth has been exceeded. If this field is not specified, a default
|
||||
// value will be used.
|
||||
MaxLogDepth int
|
||||
}
|
||||
|
||||
// MessageClass indicates which category or categories of messages to consider.
|
||||
type MessageClass int
|
||||
|
||||
const (
|
||||
// None ignores all message classes.
|
||||
None MessageClass = iota
|
||||
// All considers all message classes.
|
||||
All
|
||||
// Info only considers info messages.
|
||||
Info
|
||||
// Error only considers error messages.
|
||||
Error
|
||||
)
|
||||
|
||||
// fnlogger inherits some of its LogSink implementation from Formatter
|
||||
// and just needs to add some glue code.
|
||||
type fnlogger struct {
|
||||
Formatter
|
||||
write func(prefix, args string)
|
||||
}
|
||||
|
||||
func (l fnlogger) WithName(name string) logr.LogSink {
|
||||
l.Formatter.AddName(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||
l.Formatter.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
|
||||
l.Formatter.AddCallDepth(depth)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
|
||||
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatError(err, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
|
||||
func (l fnlogger) GetUnderlying() func(prefix, args string) {
|
||||
return l.write
|
||||
}
|
||||
|
||||
// Assert conformance to the interfaces.
|
||||
var _ logr.LogSink = &fnlogger{}
|
||||
var _ logr.CallDepthLogSink = &fnlogger{}
|
||||
var _ Underlier = &fnlogger{}
|
||||
|
||||
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
|
||||
func NewFormatter(opts Options) Formatter {
|
||||
return newFormatter(opts, outputKeyValue)
|
||||
}
|
||||
|
||||
// NewFormatterJSON constructs a Formatter which emits strict JSON.
|
||||
func NewFormatterJSON(opts Options) Formatter {
|
||||
return newFormatter(opts, outputJSON)
|
||||
}
|
||||
|
||||
// Defaults for Options.
|
||||
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
|
||||
const defaultMaxLogDepth = 16
|
||||
|
||||
func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||
if opts.TimestampFormat == "" {
|
||||
opts.TimestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if opts.MaxLogDepth == 0 {
|
||||
opts.MaxLogDepth = defaultMaxLogDepth
|
||||
}
|
||||
f := Formatter{
|
||||
outputFormat: outfmt,
|
||||
prefix: "",
|
||||
values: nil,
|
||||
depth: 0,
|
||||
opts: opts,
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// Formatter is an opaque struct which can be embedded in a LogSink
|
||||
// implementation. It should be constructed with NewFormatter. Some of
|
||||
// its methods directly implement logr.LogSink.
|
||||
type Formatter struct {
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []interface{}
|
||||
valuesStr string
|
||||
depth int
|
||||
opts Options
|
||||
}
|
||||
|
||||
// outputFormat indicates which outputFormat to use.
|
||||
type outputFormat int
|
||||
|
||||
const (
|
||||
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
|
||||
outputKeyValue outputFormat = iota
|
||||
// outputJSON emits strict JSON.
|
||||
outputJSON
|
||||
)
|
||||
|
||||
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
|
||||
type PseudoStruct []interface{}
|
||||
|
||||
// render produces a log line, ready to use.
|
||||
func (f Formatter) render(builtins, args []interface{}) string {
|
||||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
vals := builtins
|
||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||
continuing := len(builtins) > 0
|
||||
if len(f.valuesStr) > 0 {
|
||||
if continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
continuing = true
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
vals = args
|
||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// flatten renders a list of key-value pairs into a buffer. If continuing is
|
||||
// true, it assumes that the buffer has previous values and will emit a
|
||||
// separator (which depends on the output format) before the first pair it
|
||||
// writes. If escapeKeys is true, the keys are assumed to have
|
||||
// non-JSON-compatible characters in them and must be evaluated for escapes.
|
||||
//
|
||||
// This function returns a potentially modified version of kvList, which
|
||||
// ensures that there is a value for every key (adding a value if needed) and
|
||||
// that each key is a string (substituting a key if needed).
|
||||
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
|
||||
// This logic overlaps with sanitize() but saves one type-cast per key,
|
||||
// which can be measurable.
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
k, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
k = f.nonStringKey(kvList[i])
|
||||
kvList[i] = k
|
||||
}
|
||||
v := kvList[i+1]
|
||||
|
||||
if i > 0 || continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
// In theory the format could be something we don't understand. In
|
||||
// practice, we control it, so it won't be.
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
if escapeKeys {
|
||||
buf.WriteString(prettyString(k))
|
||||
} else {
|
||||
// this is faster
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(k)
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(':')
|
||||
} else {
|
||||
buf.WriteByte('=')
|
||||
}
|
||||
buf.WriteString(f.pretty(v))
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
func (f Formatter) pretty(value interface{}) string {
|
||||
return f.prettyWithFlags(value, 0, 0)
|
||||
}
|
||||
|
||||
const (
|
||||
flagRawStruct = 0x1 // do not print braces on structs
|
||||
)
|
||||
|
||||
// TODO: This is not fast. Most of the overhead goes here.
|
||||
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
|
||||
if depth > f.opts.MaxLogDepth {
|
||||
return `"<max-log-depth-exceeded>"`
|
||||
}
|
||||
|
||||
// Handle types that take full control of logging.
|
||||
if v, ok := value.(logr.Marshaler); ok {
|
||||
// Replace the value with what the type wants to get logged.
|
||||
// That then gets handled below via reflection.
|
||||
value = invokeMarshaler(v)
|
||||
}
|
||||
|
||||
// Handle types that want to format themselves.
|
||||
switch v := value.(type) {
|
||||
case fmt.Stringer:
|
||||
value = invokeStringer(v)
|
||||
case error:
|
||||
value = invokeError(v)
|
||||
}
|
||||
|
||||
// Handling the most common types without reflect is a small perf win.
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
return strconv.FormatBool(v)
|
||||
case string:
|
||||
return prettyString(v)
|
||||
case int:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int8:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int16:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int32:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int64:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case uint:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint8:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint16:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint32:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint64:
|
||||
return strconv.FormatUint(v, 10)
|
||||
case uintptr:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case float32:
|
||||
return strconv.FormatFloat(float64(v), 'f', -1, 32)
|
||||
case float64:
|
||||
return strconv.FormatFloat(v, 'f', -1, 64)
|
||||
case complex64:
|
||||
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
|
||||
case complex128:
|
||||
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
|
||||
case PseudoStruct:
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
v = f.sanitize(v)
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||
// arbitrary keys might need escaping
|
||||
buf.WriteString(prettyString(k))
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
t := reflect.TypeOf(value)
|
||||
if t == nil {
|
||||
return "null"
|
||||
}
|
||||
v := reflect.ValueOf(value)
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return strconv.FormatBool(v.Bool())
|
||||
case reflect.String:
|
||||
return prettyString(v.String())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(int64(v.Int()), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return strconv.FormatUint(uint64(v.Uint()), 10)
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
|
||||
case reflect.Float64:
|
||||
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
|
||||
case reflect.Complex64:
|
||||
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
|
||||
case reflect.Complex128:
|
||||
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
|
||||
case reflect.Struct:
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fld := t.Field(i)
|
||||
if fld.PkgPath != "" {
|
||||
// reflect says this field is only defined for non-exported fields.
|
||||
continue
|
||||
}
|
||||
if !v.Field(i).CanInterface() {
|
||||
// reflect isn't clear exactly what this means, but we can't use it.
|
||||
continue
|
||||
}
|
||||
name := ""
|
||||
omitempty := false
|
||||
if tag, found := fld.Tag.Lookup("json"); found {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
if comma := strings.Index(tag, ","); comma != -1 {
|
||||
if n := tag[:comma]; n != "" {
|
||||
name = n
|
||||
}
|
||||
rest := tag[comma:]
|
||||
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
|
||||
omitempty = true
|
||||
}
|
||||
} else {
|
||||
name = tag
|
||||
}
|
||||
}
|
||||
if omitempty && isEmpty(v.Field(i)) {
|
||||
continue
|
||||
}
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
|
||||
continue
|
||||
}
|
||||
if name == "" {
|
||||
name = fld.Name
|
||||
}
|
||||
// field names can't contain characters which need escaping
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(name)
|
||||
buf.WriteByte('"')
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
case reflect.Slice, reflect.Array:
|
||||
buf.WriteByte('[')
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
e := v.Index(i)
|
||||
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
return buf.String()
|
||||
case reflect.Map:
|
||||
buf.WriteByte('{')
|
||||
// This does not sort the map keys, for best perf.
|
||||
it := v.MapRange()
|
||||
i := 0
|
||||
for it.Next() {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
// If a map key supports TextMarshaler, use it.
|
||||
keystr := ""
|
||||
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
|
||||
txt, err := m.MarshalText()
|
||||
if err != nil {
|
||||
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
|
||||
} else {
|
||||
keystr = string(txt)
|
||||
}
|
||||
keystr = prettyString(keystr)
|
||||
} else {
|
||||
// prettyWithFlags will produce already-escaped values
|
||||
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
|
||||
if t.Key().Kind() != reflect.String {
|
||||
// JSON only does string keys. Unlike Go's standard JSON, we'll
|
||||
// convert just about anything to a string.
|
||||
keystr = prettyString(keystr)
|
||||
}
|
||||
}
|
||||
buf.WriteString(keystr)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||
i++
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
return buf.String()
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return "null"
|
||||
}
|
||||
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
|
||||
}
|
||||
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
|
||||
}
|
||||
|
||||
func prettyString(s string) string {
|
||||
// Avoid escaping (which does allocations) if we can.
|
||||
if needsEscape(s) {
|
||||
return strconv.Quote(s)
|
||||
}
|
||||
b := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
b.WriteByte('"')
|
||||
b.WriteString(s)
|
||||
b.WriteByte('"')
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// needsEscape determines whether the input string needs to be escaped or not,
|
||||
// without doing any allocations.
|
||||
func needsEscape(s string) bool {
|
||||
for _, r := range s {
|
||||
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return v.Complex() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return m.MarshalLog()
|
||||
}
|
||||
|
||||
func invokeStringer(s fmt.Stringer) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func invokeError(e error) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// Caller represents the original call site for a log line, after considering
|
||||
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
|
||||
// Line fields will always be provided, while the Func field is optional.
|
||||
// Users can set the render hook fields in Options to examine logged key-value
|
||||
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
|
||||
// field is enabled for the given MessageClass.
|
||||
type Caller struct {
|
||||
// File is the basename of the file for this call site.
|
||||
File string `json:"file"`
|
||||
// Line is the line number in the file for this call site.
|
||||
Line int `json:"line"`
|
||||
// Func is the function name for this call site, or empty if
|
||||
// Options.LogCallerFunc is not enabled.
|
||||
Func string `json:"function,omitempty"`
|
||||
}
|
||||
|
||||
func (f Formatter) caller() Caller {
|
||||
// +1 for this frame, +1 for Info/Error.
|
||||
pc, file, line, ok := runtime.Caller(f.depth + 2)
|
||||
if !ok {
|
||||
return Caller{"<unknown>", 0, ""}
|
||||
}
|
||||
fn := ""
|
||||
if f.opts.LogCallerFunc {
|
||||
if fp := runtime.FuncForPC(pc); fp != nil {
|
||||
fn = fp.Name()
|
||||
}
|
||||
}
|
||||
|
||||
return Caller{filepath.Base(file), line, fn}
|
||||
}
|
||||
|
||||
const noValue = "<no-value>"
|
||||
|
||||
func (f Formatter) nonStringKey(v interface{}) string {
|
||||
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
|
||||
}
|
||||
|
||||
// snippet produces a short snippet string of an arbitrary value.
|
||||
func (f Formatter) snippet(v interface{}) string {
|
||||
const snipLen = 16
|
||||
|
||||
snip := f.pretty(v)
|
||||
if len(snip) > snipLen {
|
||||
snip = snip[:snipLen]
|
||||
}
|
||||
return snip
|
||||
}
|
||||
|
||||
// sanitize ensures that a list of key-value pairs has a value for every key
|
||||
// (adding a value if needed) and that each key is a string (substituting a key
|
||||
// if needed).
|
||||
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
_, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
kvList[i] = f.nonStringKey(kvList[i])
|
||||
}
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
// Init configures this Formatter from runtime info, such as the call depth
|
||||
// imposed by logr itself.
|
||||
// Note that this receiver is a pointer, so depth can be saved.
|
||||
func (f *Formatter) Init(info logr.RuntimeInfo) {
|
||||
f.depth += info.CallDepth
|
||||
}
|
||||
|
||||
// Enabled checks whether an info message at the given level should be logged.
|
||||
func (f Formatter) Enabled(level int) bool {
|
||||
return level <= f.opts.Verbosity
|
||||
}
|
||||
|
||||
// GetDepth returns the current depth of this Formatter. This is useful for
|
||||
// implementations which do their own caller attribution.
|
||||
func (f Formatter) GetDepth() int {
|
||||
return f.depth
|
||||
}
|
||||
|
||||
// FormatInfo renders an Info log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
prefix = ""
|
||||
}
|
||||
if f.opts.LogTimestamp {
|
||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||
}
|
||||
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "level", level, "msg", msg)
|
||||
return prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
// FormatError renders an Error log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
prefix = ""
|
||||
}
|
||||
if f.opts.LogTimestamp {
|
||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||
}
|
||||
if policy := f.opts.LogCaller; policy == All || policy == Error {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "msg", msg)
|
||||
var loggableErr interface{}
|
||||
if err != nil {
|
||||
loggableErr = err.Error()
|
||||
}
|
||||
args = append(args, "error", loggableErr)
|
||||
return f.prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
// AddName appends the specified name. funcr uses '/' characters to separate
|
||||
// name elements. Callers should not pass '/' in the provided name string, but
|
||||
// this library does not actually enforce that.
|
||||
func (f *Formatter) AddName(name string) {
|
||||
if len(f.prefix) > 0 {
|
||||
f.prefix += "/"
|
||||
}
|
||||
f.prefix += name
|
||||
}
|
||||
|
||||
// AddValues adds key-value pairs to the set of saved values to be logged with
|
||||
// each log line.
|
||||
func (f *Formatter) AddValues(kvList []interface{}) {
|
||||
// Three slice args forces a copy.
|
||||
n := len(f.values)
|
||||
f.values = append(f.values[:n:n], kvList...)
|
||||
|
||||
vals := f.values
|
||||
if hook := f.opts.RenderValuesHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
|
||||
// Pre-render values, so we don't have to do it on each Info/Error call.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
f.flatten(buf, vals, false, true) // escape user-provided keys
|
||||
f.valuesStr = buf.String()
|
||||
}
|
||||
|
||||
// AddCallDepth increases the number of stack-frames to skip when attributing
|
||||
// the log line to a file and line.
|
||||
func (f *Formatter) AddCallDepth(depth int) {
|
||||
f.depth += depth
|
||||
}
|
83
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
83
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
@ -13,21 +13,21 @@
|
||||
//
|
||||
// The primary features of cmp are:
|
||||
//
|
||||
// • When the default behavior of equality does not suit the needs of the test,
|
||||
// custom equality functions can override the equality operation.
|
||||
// For example, an equality function may report floats as equal so long as they
|
||||
// are within some tolerance of each other.
|
||||
// - When the default behavior of equality does not suit the test's needs,
|
||||
// custom equality functions can override the equality operation.
|
||||
// For example, an equality function may report floats as equal so long as
|
||||
// they are within some tolerance of each other.
|
||||
//
|
||||
// • Types that have an Equal method may use that method to determine equality.
|
||||
// This allows package authors to determine the equality operation for the types
|
||||
// that they define.
|
||||
// - Types with an Equal method may use that method to determine equality.
|
||||
// This allows package authors to determine the equality operation
|
||||
// for the types that they define.
|
||||
//
|
||||
// • If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on both
|
||||
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
|
||||
// fields are not compared by default; they result in panics unless suppressed
|
||||
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
|
||||
// compared using the Exporter option.
|
||||
// - If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on
|
||||
// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
|
||||
// unexported fields are not compared by default; they result in panics
|
||||
// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
|
||||
// or explicitly compared using the Exporter option.
|
||||
package cmp
|
||||
|
||||
import (
|
||||
@ -36,33 +36,34 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// TODO(≥go1.18): Use any instead of interface{}.
|
||||
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
// • Let S be the set of all Ignore, Transformer, and Comparer options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
||||
// If the number of Transformer and Comparer options in S is greater than one,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single Transformer, then use that to transform the current
|
||||
// values and recursively call Equal on the output values.
|
||||
// If S contains a single Comparer, then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
// - Let S be the set of all Ignore, Transformer, and Comparer options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
||||
// If the number of Transformer and Comparer options in S is non-zero,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single Transformer, then use that to transform
|
||||
// the current values and recursively call Equal on the output values.
|
||||
// If S contains a single Comparer, then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
//
|
||||
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||
// evaluation proceeds to the next rule.
|
||||
// - If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||
// evaluation proceeds to the next rule.
|
||||
//
|
||||
// • Lastly, try to compare x and y based on their basic kinds.
|
||||
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
|
||||
// channels are compared using the equivalent of the == operator in Go.
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
// - Lastly, try to compare x and y based on their basic kinds.
|
||||
// Simple kinds like booleans, integers, floats, complex numbers, strings,
|
||||
// and channels are compared using the equivalent of the == operator in Go.
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
//
|
||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
||||
// If a struct contains unexported fields, Equal panics unless an Ignore option
|
||||
@ -143,7 +144,7 @@ func rootStep(x, y interface{}) PathStep {
|
||||
// so that they have the same parent type.
|
||||
var t reflect.Type
|
||||
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
||||
t = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
t = anyType
|
||||
if vx.IsValid() {
|
||||
vvx := reflect.New(t).Elem()
|
||||
vvx.Set(vx)
|
||||
@ -319,7 +320,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
}
|
||||
|
||||
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
||||
v = sanitizeValue(v, f.Type().In(0))
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{v})[0]
|
||||
}
|
||||
@ -343,8 +343,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
||||
}
|
||||
|
||||
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
|
||||
x = sanitizeValue(x, f.Type().In(0))
|
||||
y = sanitizeValue(y, f.Type().In(1))
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
}
|
||||
@ -372,19 +370,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
|
||||
ret = f.Call(vs)[0]
|
||||
}
|
||||
|
||||
// sanitizeValue converts nil interfaces of type T to those of type R,
|
||||
// assuming that T is assignable to R.
|
||||
// Otherwise, it returns the input value as is.
|
||||
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
|
||||
// TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143).
|
||||
if !flags.AtLeastGo110 {
|
||||
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
|
||||
return reflect.New(t).Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
||||
var addr bool
|
||||
var vax, vay reflect.Value // Addressable versions of vx and vy
|
||||
@ -654,7 +639,9 @@ type dynChecker struct{ curr, next int }
|
||||
// Next increments the state and reports whether a check should be performed.
|
||||
//
|
||||
// Checks occur every Nth function call, where N is a triangular number:
|
||||
//
|
||||
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
||||
//
|
||||
// See https://en.wikipedia.org/wiki/Triangular_number
|
||||
//
|
||||
// This sequence ensures that the cost of checks drops significantly as
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
package cmp
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
package cmp
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !cmp_debug
|
||||
// +build !cmp_debug
|
||||
|
||||
package diff
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build cmp_debug
|
||||
// +build cmp_debug
|
||||
|
||||
package diff
|
||||
|
44
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
44
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
// This function returns an edit-script, which is a sequence of operations
|
||||
// needed to convert one list into the other. The following invariants for
|
||||
// the edit-script are maintained:
|
||||
// • eq == (es.Dist()==0)
|
||||
// • nx == es.LenX()
|
||||
// • ny == es.LenY()
|
||||
// - eq == (es.Dist()==0)
|
||||
// - nx == es.LenX()
|
||||
// - ny == es.LenY()
|
||||
//
|
||||
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
||||
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
||||
@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||
|
||||
// Invariants:
|
||||
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
//
|
||||
// In general:
|
||||
// • fwdFrontier.X < revFrontier.X
|
||||
// • fwdFrontier.Y < revFrontier.Y
|
||||
// - fwdFrontier.X < revFrontier.X
|
||||
// - fwdFrontier.Y < revFrontier.Y
|
||||
//
|
||||
// Unless, it is time for the algorithm to terminate.
|
||||
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
||||
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
||||
@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// computing sub-optimal edit-scripts between two lists.
|
||||
//
|
||||
// The algorithm is approximately as follows:
|
||||
// • Searching for differences switches back-and-forth between
|
||||
// a search that starts at the beginning (the top-left corner), and
|
||||
// a search that starts at the end (the bottom-right corner). The goal of
|
||||
// the search is connect with the search from the opposite corner.
|
||||
// • As we search, we build a path in a greedy manner, where the first
|
||||
// match seen is added to the path (this is sub-optimal, but provides a
|
||||
// decent result in practice). When matches are found, we try the next pair
|
||||
// of symbols in the lists and follow all matches as far as possible.
|
||||
// • When searching for matches, we search along a diagonal going through
|
||||
// through the "frontier" point. If no matches are found, we advance the
|
||||
// frontier towards the opposite corner.
|
||||
// • This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
// - Searching for differences switches back-and-forth between
|
||||
// a search that starts at the beginning (the top-left corner), and
|
||||
// a search that starts at the end (the bottom-right corner).
|
||||
// The goal of the search is connect with the search
|
||||
// from the opposite corner.
|
||||
// - As we search, we build a path in a greedy manner,
|
||||
// where the first match seen is added to the path (this is sub-optimal,
|
||||
// but provides a decent result in practice). When matches are found,
|
||||
// we try the next pair of symbols in the lists and follow all matches
|
||||
// as far as possible.
|
||||
// - When searching for matches, we search along a diagonal going through
|
||||
// through the "frontier" point. If no matches are found,
|
||||
// we advance the frontier towards the opposite corner.
|
||||
// - This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
|
||||
// This algorithm is correct even if searching only in the forward direction
|
||||
// or in the reverse direction. We do both because it is commonly observed
|
||||
@ -389,6 +392,7 @@ type point struct{ X, Y int }
|
||||
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
||||
|
||||
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
||||
//
|
||||
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
||||
func zigzag(x int) int {
|
||||
if x&1 != 0 {
|
||||
|
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package flags
|
||||
|
||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
||||
const AtLeastGo110 = false
|
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package flags
|
||||
|
||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
||||
const AtLeastGo110 = true
|
7
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
7
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
@ -9,6 +9,8 @@ import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
|
||||
// TypeString is nearly identical to reflect.Type.String,
|
||||
// but has an additional option to specify that full type names be used.
|
||||
func TypeString(t reflect.Type, qualified bool) string {
|
||||
@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte
|
||||
// of the same name and within the same package,
|
||||
// but declared within the namespace of different functions.
|
||||
|
||||
// Use the "any" alias instead of "interface{}" for better readability.
|
||||
if t == anyType {
|
||||
return append(b, "any"...)
|
||||
}
|
||||
|
||||
// Named type.
|
||||
if t.Name() != "" {
|
||||
if qualified && t.PkgPath() != "" {
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
package value
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
package value
|
||||
|
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// IsZero reports whether v is the zero value.
|
||||
// This does not rely on Interface and so can be used on unexported fields.
|
||||
func IsZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return v.Bool() == false
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return math.Float64bits(v.Float()) == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
case reflect.UnsafePointer:
|
||||
return v.Pointer() == 0
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
|
||||
return v.IsNil()
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if !IsZero(v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if !IsZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
10
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
@ -33,6 +33,7 @@ type Option interface {
|
||||
}
|
||||
|
||||
// applicableOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Grouping: Options
|
||||
type applicableOption interface {
|
||||
@ -43,6 +44,7 @@ type applicableOption interface {
|
||||
}
|
||||
|
||||
// coreOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Filters: *pathFilter | *valuesFilter
|
||||
type coreOption interface {
|
||||
@ -336,9 +338,9 @@ func (tr transformer) String() string {
|
||||
// both implement T.
|
||||
//
|
||||
// The equality function must be:
|
||||
// • Symmetric: equal(x, y) == equal(y, x)
|
||||
// • Deterministic: equal(x, y) == equal(x, y)
|
||||
// • Pure: equal(x, y) does not modify x or y
|
||||
// - Symmetric: equal(x, y) == equal(y, x)
|
||||
// - Deterministic: equal(x, y) == equal(x, y)
|
||||
// - Pure: equal(x, y) does not modify x or y
|
||||
func Comparer(f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
||||
@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option {
|
||||
}
|
||||
|
||||
// Result represents the comparison result for a single node and
|
||||
// is provided by cmp when calling Result (see Reporter).
|
||||
// is provided by cmp when calling Report (see Reporter).
|
||||
type Result struct {
|
||||
_ [0]func() // Make Result incomparable
|
||||
flags resultFlags
|
||||
|
24
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
24
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
@ -41,13 +41,13 @@ type PathStep interface {
|
||||
// The type of each valid value is guaranteed to be identical to Type.
|
||||
//
|
||||
// In some cases, one or both may be invalid or have restrictions:
|
||||
// • For StructField, both are not interface-able if the current field
|
||||
// is unexported and the struct type is not explicitly permitted by
|
||||
// an Exporter to traverse unexported fields.
|
||||
// • For SliceIndex, one may be invalid if an element is missing from
|
||||
// either the x or y slice.
|
||||
// • For MapIndex, one may be invalid if an entry is missing from
|
||||
// either the x or y map.
|
||||
// - For StructField, both are not interface-able if the current field
|
||||
// is unexported and the struct type is not explicitly permitted by
|
||||
// an Exporter to traverse unexported fields.
|
||||
// - For SliceIndex, one may be invalid if an element is missing from
|
||||
// either the x or y slice.
|
||||
// - For MapIndex, one may be invalid if an entry is missing from
|
||||
// either the x or y map.
|
||||
//
|
||||
// The provided values must not be mutated.
|
||||
Values() (vx, vy reflect.Value)
|
||||
@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep {
|
||||
// The simplified path only contains struct field accesses.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// MyMap.MySlices.MyField
|
||||
func (pa Path) String() string {
|
||||
var ss []string
|
||||
@ -108,6 +109,7 @@ func (pa Path) String() string {
|
||||
// GoString returns the path to a specific node using Go syntax.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
||||
func (pa Path) GoString() string {
|
||||
var ssPre, ssPost []string
|
||||
@ -159,7 +161,7 @@ func (ps pathStep) String() string {
|
||||
if ps.typ == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
s := ps.typ.String()
|
||||
s := value.TypeString(ps.typ, false)
|
||||
if s == "" || strings.ContainsAny(s, "{}\n") {
|
||||
return "root" // Type too simple or complex to print
|
||||
}
|
||||
@ -178,7 +180,7 @@ type structField struct {
|
||||
unexported bool
|
||||
mayForce bool // Forcibly allow visibility
|
||||
paddr bool // Was parent addressable?
|
||||
pvx, pvy reflect.Value // Parent values (always addressible)
|
||||
pvx, pvy reflect.Value // Parent values (always addressable)
|
||||
field reflect.StructField // Field information
|
||||
}
|
||||
|
||||
@ -282,7 +284,7 @@ type typeAssertion struct {
|
||||
|
||||
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
|
||||
|
||||
// Transform is a transformation from the parent type to the current type.
|
||||
type Transform struct{ *transform }
|
||||
@ -315,7 +317,7 @@ func (tf Transform) Option() Option { return tf.trans }
|
||||
// pops the address from the stack. Thus, when traversing into a pointer from
|
||||
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
|
||||
// by checking whether the pointer has already been visited. The cycle detection
|
||||
// uses a seperate stack for the x and y values.
|
||||
// uses a separate stack for the x and y values.
|
||||
//
|
||||
// If a cycle is detected we need to determine whether the two pointers
|
||||
// should be considered equal. The definition of equality chosen by Equal
|
||||
|
13
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
13
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
@ -7,8 +7,6 @@ package cmp
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// numContextRecords is the number of surrounding equal records to print.
|
||||
@ -116,7 +114,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out
|
||||
}
|
||||
|
||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||
if v.MaxDepth == 0 {
|
||||
// As a special case, treat equal []byte as a leaf nodes.
|
||||
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
|
||||
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
|
||||
if v.MaxDepth == 0 || isEqualBytes {
|
||||
switch opts.DiffMode {
|
||||
case diffUnknown, diffIdentical:
|
||||
// Format Equal.
|
||||
@ -245,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt
|
||||
var isZero bool
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical:
|
||||
isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
|
||||
isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
|
||||
case diffRemoved:
|
||||
isZero = value.IsZero(r.Value.ValueX)
|
||||
isZero = r.Value.ValueX.IsZero()
|
||||
case diffInserted:
|
||||
isZero = value.IsZero(r.Value.ValueY)
|
||||
isZero = r.Value.ValueY.IsZero()
|
||||
}
|
||||
if isZero {
|
||||
continue
|
||||
|
24
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
24
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
@ -16,6 +16,13 @@ import (
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
var (
|
||||
anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
stringType = reflect.TypeOf((*string)(nil)).Elem()
|
||||
bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
|
||||
byteType = reflect.TypeOf((*byte)(nil)).Elem()
|
||||
)
|
||||
|
||||
type formatValueOptions struct {
|
||||
// AvoidStringer controls whether to avoid calling custom stringer
|
||||
// methods like error.Error or fmt.Stringer.String.
|
||||
@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
vv := v.Field(i)
|
||||
if value.IsZero(vv) {
|
||||
if vv.IsZero() {
|
||||
continue // Elide fields with zero values
|
||||
}
|
||||
if len(list) == maxLen {
|
||||
@ -205,12 +212,13 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
}
|
||||
|
||||
// Check whether this is a []byte of text data.
|
||||
if t.Elem() == reflect.TypeOf(byte(0)) {
|
||||
if t.Elem() == byteType {
|
||||
b := v.Bytes()
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) }
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
|
||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||
out = opts.formatString("", string(b))
|
||||
return opts.WithTypeMode(emitType).FormatType(t, out)
|
||||
skipType = true
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,7 +289,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
// Skip the name only if this is an unnamed pointer type.
|
||||
// Otherwise taking the address of a value does not reproduce
|
||||
// the named pointer type.
|
||||
if v.Type().Name() == "" {
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
}
|
||||
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
@ -292,7 +305,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
}
|
||||
// Interfaces accept different concrete types,
|
||||
// so configure the underlying value to explicitly print the type.
|
||||
skipType = true // Print the concrete type instead
|
||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
||||
|
221
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
221
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
@ -7,6 +7,7 @@ package cmp
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -79,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
|
||||
}
|
||||
|
||||
// Use specialized string diffing for longer slices or strings.
|
||||
const minLength = 64
|
||||
const minLength = 32
|
||||
return vx.Len() >= minLength && vy.Len() >= minLength
|
||||
}
|
||||
|
||||
@ -96,15 +97,16 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
}
|
||||
|
||||
// Auto-detect the type of the data.
|
||||
var isLinedText, isText, isBinary bool
|
||||
var sx, sy string
|
||||
var ssx, ssy []string
|
||||
var isString, isMostlyText, isPureLinedText, isBinary bool
|
||||
switch {
|
||||
case t.Kind() == reflect.String:
|
||||
sx, sy = vx.String(), vy.String()
|
||||
isText = true // Initial estimate, verify later
|
||||
case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
|
||||
isString = true
|
||||
case t.Kind() == reflect.Slice && t.Elem() == byteType:
|
||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
||||
isBinary = true // Initial estimate, verify later
|
||||
isString = true
|
||||
case t.Kind() == reflect.Array:
|
||||
// Arrays need to be addressable for slice operations to work.
|
||||
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
|
||||
@ -112,13 +114,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
vy2.Set(vy)
|
||||
vx, vy = vx2, vy2
|
||||
}
|
||||
if isText || isBinary {
|
||||
var numLines, lastLineIdx, maxLineLen int
|
||||
isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy)
|
||||
if isString {
|
||||
var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
|
||||
for i, r := range sx + sy {
|
||||
if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
|
||||
isBinary = true
|
||||
break
|
||||
numTotalRunes++
|
||||
if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
|
||||
numValidRunes++
|
||||
}
|
||||
if r == '\n' {
|
||||
if maxLineLen < i-lastLineIdx {
|
||||
@ -128,8 +129,29 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
numLines++
|
||||
}
|
||||
}
|
||||
isText = !isBinary
|
||||
isLinedText = isText && numLines >= 4 && maxLineLen <= 1024
|
||||
isPureText := numValidRunes == numTotalRunes
|
||||
isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
|
||||
isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
|
||||
isBinary = !isMostlyText
|
||||
|
||||
// Avoid diffing by lines if it produces a significantly more complex
|
||||
// edit script than diffing by bytes.
|
||||
if isPureLinedText {
|
||||
ssx = strings.Split(sx, "\n")
|
||||
ssy = strings.Split(sy, "\n")
|
||||
esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(ssx[ix] == ssy[iy])
|
||||
})
|
||||
esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(sx[ix] == sy[iy])
|
||||
})
|
||||
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
|
||||
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
|
||||
quotedLength := len(strconv.Quote(sx + sy))
|
||||
unquotedLength := len(sx) + len(sy)
|
||||
escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
|
||||
isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
|
||||
}
|
||||
}
|
||||
|
||||
// Format the string into printable records.
|
||||
@ -138,9 +160,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
switch {
|
||||
// If the text appears to be multi-lined text,
|
||||
// then perform differencing across individual lines.
|
||||
case isLinedText:
|
||||
ssx := strings.Split(sx, "\n")
|
||||
ssy := strings.Split(sy, "\n")
|
||||
case isPureLinedText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
@ -154,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
// differences in a string literal. This format is more readable,
|
||||
// but has edge-cases where differences are visually indistinguishable.
|
||||
// This format is avoided under the following conditions:
|
||||
// • A line starts with `"""`
|
||||
// • A line starts with "..."
|
||||
// • A line contains non-printable characters
|
||||
// • Adjacent different lines differ only by whitespace
|
||||
// - A line starts with `"""`
|
||||
// - A line starts with "..."
|
||||
// - A line contains non-printable characters
|
||||
// - Adjacent different lines differ only by whitespace
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// """
|
||||
// ... // 3 identical lines
|
||||
// foo
|
||||
@ -214,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if t != reflect.TypeOf(string("")) {
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
@ -229,7 +250,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
// If the text appears to be single-lined text,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is printed as quoted strings.
|
||||
case isText:
|
||||
case isMostlyText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
@ -237,7 +258,6 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
delim = ""
|
||||
|
||||
// If the text appears to be binary data,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
@ -299,7 +319,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
|
||||
// Wrap the output with appropriate type information.
|
||||
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if !isText {
|
||||
if !isMostlyText {
|
||||
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
||||
// Emit the type for extra clarity (e.g. "string{...}").
|
||||
if t.Kind() == reflect.String {
|
||||
@ -310,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != reflect.TypeOf(string("")) {
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != reflect.TypeOf([]byte(nil)) {
|
||||
if t != bytesType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
@ -338,8 +358,11 @@ func (opts formatOptions) formatDiffSlice(
|
||||
vx, vy reflect.Value, chunkSize int, name string,
|
||||
makeRec func(reflect.Value, diffMode) textRecord,
|
||||
) (list textList) {
|
||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result {
|
||||
return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface())
|
||||
eq := func(ix, iy int) bool {
|
||||
return vx.Index(ix).Interface() == vy.Index(iy).Interface()
|
||||
}
|
||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(eq(ix, iy))
|
||||
})
|
||||
|
||||
appendChunks := func(v reflect.Value, d diffMode) int {
|
||||
@ -364,6 +387,7 @@ func (opts formatOptions) formatDiffSlice(
|
||||
|
||||
groups := coalesceAdjacentEdits(name, es)
|
||||
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
||||
groups = cleanupSurroundingIdentical(groups, eq)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
@ -416,25 +440,35 @@ func (opts formatOptions) formatDiffSlice(
|
||||
|
||||
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
|
||||
// equal or unequal counts.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: "..XXY...Y"
|
||||
// Output: [
|
||||
// {NumIdentical: 2},
|
||||
// {NumRemoved: 2, NumInserted 1},
|
||||
// {NumIdentical: 3},
|
||||
// {NumInserted: 1},
|
||||
// ]
|
||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
||||
var prevCase int // Arbitrary index into which case last occurred
|
||||
lastStats := func(i int) *diffStats {
|
||||
if prevCase != i {
|
||||
var prevMode byte
|
||||
lastStats := func(mode byte) *diffStats {
|
||||
if prevMode != mode {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevCase = i
|
||||
prevMode = mode
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case diff.Identity:
|
||||
lastStats(1).NumIdentical++
|
||||
lastStats('=').NumIdentical++
|
||||
case diff.UniqueX:
|
||||
lastStats(2).NumRemoved++
|
||||
lastStats('!').NumRemoved++
|
||||
case diff.UniqueY:
|
||||
lastStats(2).NumInserted++
|
||||
lastStats('!').NumInserted++
|
||||
case diff.Modified:
|
||||
lastStats(2).NumModified++
|
||||
lastStats('!').NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
@ -444,6 +478,34 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats)
|
||||
// equal groups into adjacent unequal groups that currently result in a
|
||||
// dual inserted/removed printout. This acts as a high-pass filter to smooth
|
||||
// out high-frequency changes within the windowSize.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// WindowSize: 16,
|
||||
// Input: [
|
||||
// {NumIdentical: 61}, // group 0
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 1
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 9}, // └── coalesce
|
||||
// {NumIdentical: 64}, // group 2
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 3
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 7}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 2}, // └── coalesce
|
||||
// {NumIdentical: 63}, // group 4
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 64},
|
||||
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 63},
|
||||
// ]
|
||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
||||
groups, groupsOrig := groups[:0], groups
|
||||
for i, ds := range groupsOrig {
|
||||
@ -463,3 +525,90 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// cleanupSurroundingIdentical scans through all unequal groups, and
|
||||
// moves any leading sequence of equal elements to the preceding equal group and
|
||||
// moves and trailing sequence of equal elements to the succeeding equal group.
|
||||
//
|
||||
// This is necessary since coalesceInterveningIdentical may coalesce edit groups
|
||||
// together such that leading/trailing spans of equal elements becomes possible.
|
||||
// Note that this can occur even with an optimal diffing algorithm.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
|
||||
// {NumIdentical: 67},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
|
||||
// {NumIdentical: 54},
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 64}, // incremented by 3
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 67},
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 64}, // incremented by 10
|
||||
// ]
|
||||
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
|
||||
var ix, iy int // indexes into sequence x and y
|
||||
for i, ds := range groups {
|
||||
// Handle equal group.
|
||||
if ds.NumDiff() == 0 {
|
||||
ix += ds.NumIdentical
|
||||
iy += ds.NumIdentical
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal group.
|
||||
nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
|
||||
ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
|
||||
var numLeadingIdentical, numTrailingIdentical int
|
||||
for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
|
||||
numLeadingIdentical++
|
||||
}
|
||||
for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
|
||||
numTrailingIdentical++
|
||||
}
|
||||
if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
|
||||
if numLeadingIdentical > 0 {
|
||||
// Remove leading identical span from this group and
|
||||
// insert it into the preceding group.
|
||||
if i-1 >= 0 {
|
||||
groups[i-1].NumIdentical += numLeadingIdentical
|
||||
} else {
|
||||
// No preceding group exists, so prepend a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
|
||||
}()
|
||||
}
|
||||
// Increment indexes since the preceding group would have handled this.
|
||||
ix += numLeadingIdentical
|
||||
iy += numLeadingIdentical
|
||||
}
|
||||
if numTrailingIdentical > 0 {
|
||||
// Remove trailing identical span from this group and
|
||||
// insert it into the succeeding group.
|
||||
if i+1 < len(groups) {
|
||||
groups[i+1].NumIdentical += numTrailingIdentical
|
||||
} else {
|
||||
// No succeeding group exists, so append a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
|
||||
}()
|
||||
}
|
||||
// Do not increment indexes since the succeeding group will handle this.
|
||||
}
|
||||
|
||||
// Update this group since some identical elements were removed.
|
||||
nx -= numIdentical
|
||||
ny -= numIdentical
|
||||
groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
|
||||
}
|
||||
ix += nx
|
||||
iy += ny
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats {
|
||||
// String prints a humanly-readable summary of coalesced records.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
||||
func (s diffStats) String() string {
|
||||
var ss []string
|
||||
|
3
vendor/github.com/nxadm/tail/.gitignore
generated
vendored
3
vendor/github.com/nxadm/tail/.gitignore
generated
vendored
@ -1,3 +0,0 @@
|
||||
.idea/
|
||||
.test/
|
||||
examples/_*
|
56
vendor/github.com/nxadm/tail/CHANGES.md
generated
vendored
56
vendor/github.com/nxadm/tail/CHANGES.md
generated
vendored
@ -1,56 +0,0 @@
|
||||
# Version v1.4.7-v1.4.8
|
||||
* Documentation updates.
|
||||
* Small linter cleanups.
|
||||
* Added example in test.
|
||||
|
||||
# Version v1.4.6
|
||||
|
||||
* Document the usage of Cleanup when re-reading a file (thanks to @lesovsky) for issue #18.
|
||||
* Add example directories with example and tests for issues.
|
||||
|
||||
# Version v1.4.4-v1.4.5
|
||||
|
||||
* Fix of checksum problem because of forced tag. No changes to the code.
|
||||
|
||||
# Version v1.4.1
|
||||
|
||||
* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag".
|
||||
|
||||
# Version v1.4.0
|
||||
|
||||
* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail".
|
||||
|
||||
# Version v1.3.1
|
||||
|
||||
* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer",
|
||||
fixes upstream issue 93.
|
||||
|
||||
|
||||
# Version v1.3.0
|
||||
|
||||
* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num
|
||||
to Line struct".
|
||||
|
||||
# Version v1.2.1
|
||||
|
||||
* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able
|
||||
code in readme".
|
||||
* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change
|
||||
to comment wording".
|
||||
* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed
|
||||
spurious newlines from log messages".
|
||||
|
||||
# Version v1.2.0
|
||||
|
||||
* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the
|
||||
problem for never return the last line if it's not followed by a newline".
|
||||
* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove
|
||||
deprecated os.SEEK consts". The changes bumped the minimal supported Go
|
||||
release to 1.9.
|
||||
|
||||
# Version v1.1.0
|
||||
|
||||
* migration to go modules.
|
||||
* release of master branch of the dormant upstream, because it contains
|
||||
fixes and improvement no present in the tagged release.
|
||||
|
19
vendor/github.com/nxadm/tail/Dockerfile
generated
vendored
19
vendor/github.com/nxadm/tail/Dockerfile
generated
vendored
@ -1,19 +0,0 @@
|
||||
FROM golang
|
||||
|
||||
RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/
|
||||
ADD . $GOPATH/src/github.com/nxadm/tail/
|
||||
|
||||
# expecting to fetch dependencies successfully.
|
||||
RUN go get -v github.com/nxadm/tail
|
||||
|
||||
# expecting to run the test successfully.
|
||||
RUN go test -v github.com/nxadm/tail
|
||||
|
||||
# expecting to install successfully
|
||||
RUN go install -v github.com/nxadm/tail
|
||||
RUN go install -v github.com/nxadm/tail/cmd/gotail
|
||||
|
||||
RUN $GOPATH/bin/gotail -h || true
|
||||
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
CMD ["gotail"]
|
21
vendor/github.com/nxadm/tail/LICENSE
generated
vendored
21
vendor/github.com/nxadm/tail/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
# The MIT License (MIT)
|
||||
|
||||
# © Copyright 2015 Hewlett Packard Enterprise Development LP
|
||||
Copyright (c) 2014 ActiveState
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
44
vendor/github.com/nxadm/tail/README.md
generated
vendored
44
vendor/github.com/nxadm/tail/README.md
generated
vendored
@ -1,44 +0,0 @@
|
||||
[](https://pkg.go.dev/github.com/nxadm/tail)
|
||||
|
||||
# tail functionality in Go
|
||||
|
||||
nxadm/tail provides a Go library that emulates the features of the BSD `tail`
|
||||
program. The library comes with full support for truncation/move detection as
|
||||
it is designed to work with log rotation tools. The library works on all
|
||||
operating systems supported by Go, including POSIX systems like Linux and
|
||||
*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
|
||||
|
||||
A simple example:
|
||||
|
||||
```Go
|
||||
// Create a tail
|
||||
t, err := tail.TailFile(
|
||||
"/var/log/nginx.log", tail.Config{Follow: true, ReOpen: true})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Print the text of each received line
|
||||
for line := range t.Lines {
|
||||
fmt.Println(line.Text)
|
||||
}
|
||||
```
|
||||
|
||||
See [API documentation](https://pkg.go.dev/github.com/nxadm/tail).
|
||||
|
||||
## Installing
|
||||
|
||||
go get github.com/nxadm/tail/...
|
||||
|
||||
## History
|
||||
|
||||
This project is an active, drop-in replacement for the
|
||||
[abandoned](https://en.wikipedia.org/wiki/HPE_Helion) Go tail library at
|
||||
[hpcloud](https://github.com/hpcloud/tail). Next to
|
||||
[addressing open issues/PRs of the original project](https://github.com/nxadm/tail/issues/6),
|
||||
nxadm/tail continues the development by keeping up to date with the Go toolchain
|
||||
(e.g. go modules) and dependencies, completing the documentation, adding features
|
||||
and fixing bugs.
|
||||
|
||||
## Examples
|
||||
Examples, e.g. used to debug an issue, are kept in the [examples directory](/examples).
|
7
vendor/github.com/nxadm/tail/ratelimiter/Licence
generated
vendored
7
vendor/github.com/nxadm/tail/ratelimiter/Licence
generated
vendored
@ -1,7 +0,0 @@
|
||||
Copyright (C) 2013 99designs
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
97
vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
generated
vendored
97
vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
generated
vendored
@ -1,97 +0,0 @@
|
||||
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
|
||||
package ratelimiter
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type LeakyBucket struct {
|
||||
Size uint16
|
||||
Fill float64
|
||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||
Lastupdate time.Time
|
||||
Now func() time.Time
|
||||
}
|
||||
|
||||
func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
|
||||
bucket := LeakyBucket{
|
||||
Size: size,
|
||||
Fill: 0,
|
||||
LeakInterval: leakInterval,
|
||||
Now: time.Now,
|
||||
Lastupdate: time.Now(),
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) updateFill() {
|
||||
now := b.Now()
|
||||
if b.Fill > 0 {
|
||||
elapsed := now.Sub(b.Lastupdate)
|
||||
|
||||
b.Fill -= float64(elapsed) / float64(b.LeakInterval)
|
||||
if b.Fill < 0 {
|
||||
b.Fill = 0
|
||||
}
|
||||
}
|
||||
b.Lastupdate = now
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) Pour(amount uint16) bool {
|
||||
b.updateFill()
|
||||
|
||||
var newfill float64 = b.Fill + float64(amount)
|
||||
|
||||
if newfill > float64(b.Size) {
|
||||
return false
|
||||
}
|
||||
|
||||
b.Fill = newfill
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// The time at which this bucket will be completely drained
|
||||
func (b *LeakyBucket) DrainedAt() time.Time {
|
||||
return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
|
||||
}
|
||||
|
||||
// The duration until this bucket is completely drained
|
||||
func (b *LeakyBucket) TimeToDrain() time.Duration {
|
||||
return b.DrainedAt().Sub(b.Now())
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
|
||||
return b.Now().Sub(b.Lastupdate)
|
||||
}
|
||||
|
||||
type LeakyBucketSer struct {
|
||||
Size uint16
|
||||
Fill float64
|
||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||
Lastupdate time.Time
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) Serialise() *LeakyBucketSer {
|
||||
bucket := LeakyBucketSer{
|
||||
Size: b.Size,
|
||||
Fill: b.Fill,
|
||||
LeakInterval: b.LeakInterval,
|
||||
Lastupdate: b.Lastupdate,
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
||||
|
||||
func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
|
||||
bucket := LeakyBucket{
|
||||
Size: b.Size,
|
||||
Fill: b.Fill,
|
||||
LeakInterval: b.LeakInterval,
|
||||
Lastupdate: b.Lastupdate,
|
||||
Now: time.Now,
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
60
vendor/github.com/nxadm/tail/ratelimiter/memory.go
generated
vendored
60
vendor/github.com/nxadm/tail/ratelimiter/memory.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
package ratelimiter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
GC_SIZE int = 100
|
||||
GC_PERIOD time.Duration = 60 * time.Second
|
||||
)
|
||||
|
||||
type Memory struct {
|
||||
store map[string]LeakyBucket
|
||||
lastGCCollected time.Time
|
||||
}
|
||||
|
||||
func NewMemory() *Memory {
|
||||
m := new(Memory)
|
||||
m.store = make(map[string]LeakyBucket)
|
||||
m.lastGCCollected = time.Now()
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
|
||||
|
||||
bucket, ok := m.store[key]
|
||||
if !ok {
|
||||
return nil, errors.New("miss")
|
||||
}
|
||||
|
||||
return &bucket, nil
|
||||
}
|
||||
|
||||
func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
|
||||
|
||||
if len(m.store) > GC_SIZE {
|
||||
m.GarbageCollect()
|
||||
}
|
||||
|
||||
m.store[key] = bucket
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Memory) GarbageCollect() {
|
||||
now := time.Now()
|
||||
|
||||
// rate limit GC to once per minute
|
||||
if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() {
|
||||
for key, bucket := range m.store {
|
||||
// if the bucket is drained, then GC
|
||||
if bucket.DrainedAt().Unix() < now.Unix() {
|
||||
delete(m.store, key)
|
||||
}
|
||||
}
|
||||
|
||||
m.lastGCCollected = now
|
||||
}
|
||||
}
|
6
vendor/github.com/nxadm/tail/ratelimiter/storage.go
generated
vendored
6
vendor/github.com/nxadm/tail/ratelimiter/storage.go
generated
vendored
@ -1,6 +0,0 @@
|
||||
package ratelimiter
|
||||
|
||||
type Storage interface {
|
||||
GetBucketFor(string) (*LeakyBucket, error)
|
||||
SetBucketFor(string, LeakyBucket) error
|
||||
}
|
455
vendor/github.com/nxadm/tail/tail.go
generated
vendored
455
vendor/github.com/nxadm/tail/tail.go
generated
vendored
@ -1,455 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
//nxadm/tail provides a Go library that emulates the features of the BSD `tail`
|
||||
//program. The library comes with full support for truncation/move detection as
|
||||
//it is designed to work with log rotation tools. The library works on all
|
||||
//operating systems supported by Go, including POSIX systems like Linux and
|
||||
//*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
|
||||
package tail
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nxadm/tail/ratelimiter"
|
||||
"github.com/nxadm/tail/util"
|
||||
"github.com/nxadm/tail/watch"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrStop is returned when the tail of a file has been marked to be stopped.
|
||||
ErrStop = errors.New("tail should now stop")
|
||||
)
|
||||
|
||||
type Line struct {
|
||||
Text string // The contents of the file
|
||||
Num int // The line number
|
||||
SeekInfo SeekInfo // SeekInfo
|
||||
Time time.Time // Present time
|
||||
Err error // Error from tail
|
||||
}
|
||||
|
||||
// Deprecated: this function is no longer used internally and it has little of no
|
||||
// use in the API. As such, it will be removed from the API in a future major
|
||||
// release.
|
||||
//
|
||||
// NewLine returns a * pointer to a Line struct.
|
||||
func NewLine(text string, lineNum int) *Line {
|
||||
return &Line{text, lineNum, SeekInfo{}, time.Now(), nil}
|
||||
}
|
||||
|
||||
// SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek
|
||||
type SeekInfo struct {
|
||||
Offset int64
|
||||
Whence int
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
Fatal(v ...interface{})
|
||||
Fatalf(format string, v ...interface{})
|
||||
Fatalln(v ...interface{})
|
||||
Panic(v ...interface{})
|
||||
Panicf(format string, v ...interface{})
|
||||
Panicln(v ...interface{})
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// Config is used to specify how a file must be tailed.
|
||||
type Config struct {
|
||||
// File-specifc
|
||||
Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file
|
||||
ReOpen bool // Reopen recreated files (tail -F)
|
||||
MustExist bool // Fail early if the file does not exist
|
||||
Poll bool // Poll for file changes instead of using the default inotify
|
||||
Pipe bool // The file is a named pipe (mkfifo)
|
||||
|
||||
// Generic IO
|
||||
Follow bool // Continue looking for new lines (tail -f)
|
||||
MaxLineSize int // If non-zero, split longer lines into multiple lines
|
||||
|
||||
// Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function)
|
||||
RateLimiter *ratelimiter.LeakyBucket
|
||||
|
||||
// Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger.
|
||||
// To disable logging, set it to tail.DiscardingLogger
|
||||
Logger logger
|
||||
}
|
||||
|
||||
type Tail struct {
|
||||
Filename string // The filename
|
||||
Lines chan *Line // A consumable channel of *Line
|
||||
Config // Tail.Configuration
|
||||
|
||||
file *os.File
|
||||
reader *bufio.Reader
|
||||
lineNum int
|
||||
|
||||
watcher watch.FileWatcher
|
||||
changes *watch.FileChanges
|
||||
|
||||
tomb.Tomb // provides: Done, Kill, Dying
|
||||
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil
|
||||
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
// DiscardingLogger can be used to disable logging output
|
||||
DiscardingLogger = log.New(ioutil.Discard, "", 0)
|
||||
)
|
||||
|
||||
// TailFile begins tailing the file. And returns a pointer to a Tail struct
|
||||
// and an error. An output stream is made available via the Tail.Lines
|
||||
// channel (e.g. to be looped and printed). To handle errors during tailing,
|
||||
// after finishing reading from the Lines channel, invoke the `Wait` or `Err`
|
||||
// method on the returned *Tail.
|
||||
func TailFile(filename string, config Config) (*Tail, error) {
|
||||
if config.ReOpen && !config.Follow {
|
||||
util.Fatal("cannot set ReOpen without Follow.")
|
||||
}
|
||||
|
||||
t := &Tail{
|
||||
Filename: filename,
|
||||
Lines: make(chan *Line),
|
||||
Config: config,
|
||||
}
|
||||
|
||||
// when Logger was not specified in config, use default logger
|
||||
if t.Logger == nil {
|
||||
t.Logger = DefaultLogger
|
||||
}
|
||||
|
||||
if t.Poll {
|
||||
t.watcher = watch.NewPollingFileWatcher(filename)
|
||||
} else {
|
||||
t.watcher = watch.NewInotifyFileWatcher(filename)
|
||||
}
|
||||
|
||||
if t.MustExist {
|
||||
var err error
|
||||
t.file, err = OpenFile(t.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
go t.tailFileSync()
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Tell returns the file's current position, like stdio's ftell() and an error.
|
||||
// Beware that this value may not be completely accurate because one line from
|
||||
// the chan(tail.Lines) may have been read already.
|
||||
func (tail *Tail) Tell() (offset int64, err error) {
|
||||
if tail.file == nil {
|
||||
return
|
||||
}
|
||||
offset, err = tail.file.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tail.lk.Lock()
|
||||
defer tail.lk.Unlock()
|
||||
if tail.reader == nil {
|
||||
return
|
||||
}
|
||||
|
||||
offset -= int64(tail.reader.Buffered())
|
||||
return
|
||||
}
|
||||
|
||||
// Stop stops the tailing activity.
|
||||
func (tail *Tail) Stop() error {
|
||||
tail.Kill(nil)
|
||||
return tail.Wait()
|
||||
}
|
||||
|
||||
// StopAtEOF stops tailing as soon as the end of the file is reached. The function
|
||||
// returns an error,
|
||||
func (tail *Tail) StopAtEOF() error {
|
||||
tail.Kill(errStopAtEOF)
|
||||
return tail.Wait()
|
||||
}
|
||||
|
||||
var errStopAtEOF = errors.New("tail: stop at eof")
|
||||
|
||||
func (tail *Tail) close() {
|
||||
close(tail.Lines)
|
||||
tail.closeFile()
|
||||
}
|
||||
|
||||
func (tail *Tail) closeFile() {
|
||||
if tail.file != nil {
|
||||
tail.file.Close()
|
||||
tail.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (tail *Tail) reopen() error {
|
||||
tail.closeFile()
|
||||
tail.lineNum = 0
|
||||
for {
|
||||
var err error
|
||||
tail.file, err = OpenFile(tail.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
|
||||
if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
|
||||
if err == tomb.ErrDying {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tail *Tail) readLine() (string, error) {
|
||||
tail.lk.Lock()
|
||||
line, err := tail.reader.ReadString('\n')
|
||||
tail.lk.Unlock()
|
||||
if err != nil {
|
||||
// Note ReadString "returns the data read before the error" in
|
||||
// case of an error, including EOF, so we return it as is. The
|
||||
// caller is expected to process it if err is EOF.
|
||||
return line, err
|
||||
}
|
||||
|
||||
line = strings.TrimRight(line, "\n")
|
||||
|
||||
return line, err
|
||||
}
|
||||
|
||||
func (tail *Tail) tailFileSync() {
|
||||
defer tail.Done()
|
||||
defer tail.close()
|
||||
|
||||
if !tail.MustExist {
|
||||
// deferred first open.
|
||||
err := tail.reopen()
|
||||
if err != nil {
|
||||
if err != tomb.ErrDying {
|
||||
tail.Kill(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Seek to requested location on first open of the file.
|
||||
if tail.Location != nil {
|
||||
_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
|
||||
if err != nil {
|
||||
tail.Killf("Seek error on %s: %s", tail.Filename, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tail.openReader()
|
||||
|
||||
// Read line by line.
|
||||
for {
|
||||
// do not seek in named pipes
|
||||
if !tail.Pipe {
|
||||
// grab the position in case we need to back up in the event of a half-line
|
||||
if _, err := tail.Tell(); err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
line, err := tail.readLine()
|
||||
|
||||
// Process `line` even if err is EOF.
|
||||
if err == nil {
|
||||
cooloff := !tail.sendLine(line)
|
||||
if cooloff {
|
||||
// Wait a second before seeking till the end of
|
||||
// file when rate limit is reached.
|
||||
msg := ("Too much log activity; waiting a second before resuming tailing")
|
||||
offset, _ := tail.Tell()
|
||||
tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)}
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
case <-tail.Dying():
|
||||
return
|
||||
}
|
||||
if err := tail.seekEnd(); err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
if !tail.Follow {
|
||||
if line != "" {
|
||||
tail.sendLine(line)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if tail.Follow && line != "" {
|
||||
tail.sendLine(line)
|
||||
if err := tail.seekEnd(); err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// When EOF is reached, wait for more data to become
|
||||
// available. Wait strategy is based on the `tail.watcher`
|
||||
// implementation (inotify or polling).
|
||||
err := tail.waitForChanges()
|
||||
if err != nil {
|
||||
if err != ErrStop {
|
||||
tail.Kill(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// non-EOF error
|
||||
tail.Killf("Error reading %s: %s", tail.Filename, err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-tail.Dying():
|
||||
if tail.Err() == errStopAtEOF {
|
||||
continue
|
||||
}
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForChanges waits until the file has been appended, deleted,
|
||||
// moved or truncated. When moved or deleted - the file will be
|
||||
// reopened if ReOpen is true. Truncated files are always reopened.
|
||||
func (tail *Tail) waitForChanges() error {
|
||||
if tail.changes == nil {
|
||||
pos, err := tail.file.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-tail.changes.Modified:
|
||||
return nil
|
||||
case <-tail.changes.Deleted:
|
||||
tail.changes = nil
|
||||
if tail.ReOpen {
|
||||
// XXX: we must not log from a library.
|
||||
tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
|
||||
if err := tail.reopen(); err != nil {
|
||||
return err
|
||||
}
|
||||
tail.Logger.Printf("Successfully reopened %s", tail.Filename)
|
||||
tail.openReader()
|
||||
return nil
|
||||
}
|
||||
tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
|
||||
return ErrStop
|
||||
case <-tail.changes.Truncated:
|
||||
// Always reopen truncated files (Follow is true)
|
||||
tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
|
||||
if err := tail.reopen(); err != nil {
|
||||
return err
|
||||
}
|
||||
tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
|
||||
tail.openReader()
|
||||
return nil
|
||||
case <-tail.Dying():
|
||||
return ErrStop
|
||||
}
|
||||
}
|
||||
|
||||
func (tail *Tail) openReader() {
|
||||
tail.lk.Lock()
|
||||
if tail.MaxLineSize > 0 {
|
||||
// add 2 to account for newline characters
|
||||
tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
|
||||
} else {
|
||||
tail.reader = bufio.NewReader(tail.file)
|
||||
}
|
||||
tail.lk.Unlock()
|
||||
}
|
||||
|
||||
func (tail *Tail) seekEnd() error {
|
||||
return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd})
|
||||
}
|
||||
|
||||
func (tail *Tail) seekTo(pos SeekInfo) error {
|
||||
_, err := tail.file.Seek(pos.Offset, pos.Whence)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
|
||||
}
|
||||
// Reset the read buffer whenever the file is re-seek'ed
|
||||
tail.reader.Reset(tail.file)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendLine sends the line(s) to Lines channel, splitting longer lines
|
||||
// if necessary. Return false if rate limit is reached.
|
||||
func (tail *Tail) sendLine(line string) bool {
|
||||
now := time.Now()
|
||||
lines := []string{line}
|
||||
|
||||
// Split longer lines
|
||||
if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
|
||||
lines = util.PartitionString(line, tail.MaxLineSize)
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
tail.lineNum++
|
||||
offset, _ := tail.Tell()
|
||||
select {
|
||||
case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}:
|
||||
case <-tail.Dying():
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if tail.Config.RateLimiter != nil {
|
||||
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
|
||||
if !ok {
|
||||
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.",
|
||||
tail.Filename)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Cleanup removes inotify watches added by the tail package. This function is
|
||||
// meant to be invoked from a process's exit handler. Linux kernel may not
|
||||
// automatically remove inotify watches after the process exits.
|
||||
// If you plan to re-read a file, don't call Cleanup in between.
|
||||
func (tail *Tail) Cleanup() {
|
||||
watch.Cleanup(tail.Filename)
|
||||
}
|
17
vendor/github.com/nxadm/tail/tail_posix.go
generated
vendored
17
vendor/github.com/nxadm/tail/tail_posix.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// +build !windows
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Deprecated: this function is only useful internally and, as such,
|
||||
// it will be removed from the API in a future major release.
|
||||
//
|
||||
// OpenFile proxies a os.Open call for a file so it can be correctly tailed
|
||||
// on POSIX and non-POSIX OSes like MS Windows.
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return os.Open(name)
|
||||
}
|
19
vendor/github.com/nxadm/tail/tail_windows.go
generated
vendored
19
vendor/github.com/nxadm/tail/tail_windows.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// +build windows
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/nxadm/tail/winfile"
|
||||
)
|
||||
|
||||
// Deprecated: this function is only useful internally and, as such,
|
||||
// it will be removed from the API in a future major release.
|
||||
//
|
||||
// OpenFile proxies a os.Open call for a file so it can be correctly tailed
|
||||
// on POSIX and non-POSIX OSes like MS Windows.
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return winfile.OpenFile(name, os.O_RDONLY, 0)
|
||||
}
|
49
vendor/github.com/nxadm/tail/util/util.go
generated
vendored
49
vendor/github.com/nxadm/tail/util/util.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
*log.Logger
|
||||
}
|
||||
|
||||
var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
|
||||
|
||||
// fatal is like panic except it displays only the current goroutine's stack.
|
||||
func Fatal(format string, v ...interface{}) {
|
||||
// https://github.com/nxadm/log/blob/master/log.go#L45
|
||||
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// partitionString partitions the string into chunks of given size,
|
||||
// with the last chunk of variable size.
|
||||
func PartitionString(s string, chunkSize int) []string {
|
||||
if chunkSize <= 0 {
|
||||
panic("invalid chunkSize")
|
||||
}
|
||||
length := len(s)
|
||||
chunks := 1 + length/chunkSize
|
||||
start := 0
|
||||
end := chunkSize
|
||||
parts := make([]string, 0, chunks)
|
||||
for {
|
||||
if end > length {
|
||||
end = length
|
||||
}
|
||||
parts = append(parts, s[start:end])
|
||||
if end == length {
|
||||
break
|
||||
}
|
||||
start, end = end, end+chunkSize
|
||||
}
|
||||
return parts
|
||||
}
|
37
vendor/github.com/nxadm/tail/watch/filechanges.go
generated
vendored
37
vendor/github.com/nxadm/tail/watch/filechanges.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
package watch
|
||||
|
||||
type FileChanges struct {
|
||||
Modified chan bool // Channel to get notified of modifications
|
||||
Truncated chan bool // Channel to get notified of truncations
|
||||
Deleted chan bool // Channel to get notified of deletions/renames
|
||||
}
|
||||
|
||||
func NewFileChanges() *FileChanges {
|
||||
return &FileChanges{
|
||||
make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)}
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyModified() {
|
||||
sendOnlyIfEmpty(fc.Modified)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyTruncated() {
|
||||
sendOnlyIfEmpty(fc.Truncated)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyDeleted() {
|
||||
sendOnlyIfEmpty(fc.Deleted)
|
||||
}
|
||||
|
||||
// sendOnlyIfEmpty sends on a bool channel only if the channel has no
|
||||
// backlog to be read by other goroutines. This concurrency pattern
|
||||
// can be used to notify other goroutines if and only if they are
|
||||
// looking for it (i.e., subsequent notifications can be compressed
|
||||
// into one).
|
||||
func sendOnlyIfEmpty(ch chan bool) {
|
||||
select {
|
||||
case ch <- true:
|
||||
default:
|
||||
}
|
||||
}
|
136
vendor/github.com/nxadm/tail/watch/inotify.go
generated
vendored
136
vendor/github.com/nxadm/tail/watch/inotify.go
generated
vendored
@ -1,136 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/nxadm/tail/util"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// InotifyFileWatcher uses inotify to monitor file changes.
|
||||
type InotifyFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
|
||||
fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
err := WatchCreate(fw.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer RemoveWatchCreate(fw.Filename)
|
||||
|
||||
// Do a real check now as the file might have been created before
|
||||
// calling `WatchFlags` above.
|
||||
if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
|
||||
// file exists, or stat returned an error.
|
||||
return err
|
||||
}
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt, ok := <-events:
|
||||
if !ok {
|
||||
return fmt.Errorf("inotify watcher has been closed")
|
||||
}
|
||||
evtName, err := filepath.Abs(evt.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fwFilename, err := filepath.Abs(fw.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if evtName == fwFilename {
|
||||
return nil
|
||||
}
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
err := Watch(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
prevSize := fw.Size
|
||||
|
||||
var evt fsnotify.Event
|
||||
var ok bool
|
||||
|
||||
select {
|
||||
case evt, ok = <-events:
|
||||
if !ok {
|
||||
RemoveWatch(fw.Filename)
|
||||
return
|
||||
}
|
||||
case <-t.Dying():
|
||||
RemoveWatch(fw.Filename)
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case evt.Op&fsnotify.Remove == fsnotify.Remove:
|
||||
fallthrough
|
||||
|
||||
case evt.Op&fsnotify.Rename == fsnotify.Rename:
|
||||
RemoveWatch(fw.Filename)
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
|
||||
//With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod)
|
||||
case evt.Op&fsnotify.Chmod == fsnotify.Chmod:
|
||||
fallthrough
|
||||
|
||||
case evt.Op&fsnotify.Write == fsnotify.Write:
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
RemoveWatch(fw.Filename)
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
fw.Size = fi.Size()
|
||||
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
} else {
|
||||
changes.NotifyModified()
|
||||
}
|
||||
prevSize = fw.Size
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
249
vendor/github.com/nxadm/tail/watch/inotify_tracker.go
generated
vendored
249
vendor/github.com/nxadm/tail/watch/inotify_tracker.go
generated
vendored
@ -1,249 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/nxadm/tail/util"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
type InotifyTracker struct {
|
||||
mux sync.Mutex
|
||||
watcher *fsnotify.Watcher
|
||||
chans map[string]chan fsnotify.Event
|
||||
done map[string]chan bool
|
||||
watchNums map[string]int
|
||||
watch chan *watchInfo
|
||||
remove chan *watchInfo
|
||||
error chan error
|
||||
}
|
||||
|
||||
type watchInfo struct {
|
||||
op fsnotify.Op
|
||||
fname string
|
||||
}
|
||||
|
||||
func (this *watchInfo) isCreate() bool {
|
||||
return this.op == fsnotify.Create
|
||||
}
|
||||
|
||||
var (
|
||||
// globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
|
||||
shared *InotifyTracker
|
||||
|
||||
// these are used to ensure the shared InotifyTracker is run exactly once
|
||||
once = sync.Once{}
|
||||
goRun = func() {
|
||||
shared = &InotifyTracker{
|
||||
mux: sync.Mutex{},
|
||||
chans: make(map[string]chan fsnotify.Event),
|
||||
done: make(map[string]chan bool),
|
||||
watchNums: make(map[string]int),
|
||||
watch: make(chan *watchInfo),
|
||||
remove: make(chan *watchInfo),
|
||||
error: make(chan error),
|
||||
}
|
||||
go shared.run()
|
||||
}
|
||||
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
// Watch signals the run goroutine to begin watching the input filename
|
||||
func Watch(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// Watch create signals the run goroutine to begin watching the input filename
|
||||
// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
|
||||
func WatchCreate(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func watch(winfo *watchInfo) error {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.watch <- winfo
|
||||
return <-shared.error
|
||||
}
|
||||
|
||||
// RemoveWatch signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatch(fname string) error {
|
||||
return remove(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatchCreate(fname string) error {
|
||||
return remove(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func remove(winfo *watchInfo) error {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.mux.Lock()
|
||||
done := shared.done[winfo.fname]
|
||||
if done != nil {
|
||||
delete(shared.done, winfo.fname)
|
||||
close(done)
|
||||
}
|
||||
shared.mux.Unlock()
|
||||
|
||||
shared.remove <- winfo
|
||||
return <-shared.error
|
||||
}
|
||||
|
||||
// Events returns a channel to which FileEvents corresponding to the input filename
|
||||
// will be sent. This channel will be closed when removeWatch is called on this
|
||||
// filename.
|
||||
func Events(fname string) <-chan fsnotify.Event {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
return shared.chans[fname]
|
||||
}
|
||||
|
||||
// Cleanup removes the watch for the input filename if necessary.
|
||||
func Cleanup(fname string) error {
|
||||
return RemoveWatch(fname)
|
||||
}
|
||||
|
||||
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
|
||||
// a new Watcher if the previous Watcher was closed.
|
||||
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
if shared.chans[winfo.fname] == nil {
|
||||
shared.chans[winfo.fname] = make(chan fsnotify.Event)
|
||||
}
|
||||
if shared.done[winfo.fname] == nil {
|
||||
shared.done[winfo.fname] = make(chan bool)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
|
||||
var err error
|
||||
// already in inotify watch
|
||||
if shared.watchNums[fname] == 0 {
|
||||
err = shared.watcher.Add(fname)
|
||||
}
|
||||
if err == nil {
|
||||
shared.watchNums[fname]++
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
|
||||
// corresponding events channel.
|
||||
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
|
||||
ch := shared.chans[winfo.fname]
|
||||
if ch != nil {
|
||||
delete(shared.chans, winfo.fname)
|
||||
close(ch)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
shared.watchNums[fname]--
|
||||
watchNum := shared.watchNums[fname]
|
||||
if watchNum == 0 {
|
||||
delete(shared.watchNums, fname)
|
||||
}
|
||||
shared.mux.Unlock()
|
||||
|
||||
var err error
|
||||
// If we were the last ones to watch this file, unsubscribe from inotify.
|
||||
// This needs to happen after releasing the lock because fsnotify waits
|
||||
// synchronously for the kernel to acknowledge the removal of the watch
|
||||
// for this file, which causes us to deadlock if we still held the lock.
|
||||
if watchNum == 0 {
|
||||
err = shared.watcher.Remove(fname)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// sendEvent sends the input event to the appropriate Tail.
|
||||
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
|
||||
name := filepath.Clean(event.Name)
|
||||
|
||||
shared.mux.Lock()
|
||||
ch := shared.chans[name]
|
||||
done := shared.done[name]
|
||||
shared.mux.Unlock()
|
||||
|
||||
if ch != nil && done != nil {
|
||||
select {
|
||||
case ch <- event:
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// run starts the goroutine in which the shared struct reads events from its
|
||||
// Watcher's Event channel and sends the events to the appropriate Tail.
|
||||
func (shared *InotifyTracker) run() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
util.Fatal("failed to create Watcher")
|
||||
}
|
||||
shared.watcher = watcher
|
||||
|
||||
for {
|
||||
select {
|
||||
case winfo := <-shared.watch:
|
||||
shared.error <- shared.addWatch(winfo)
|
||||
|
||||
case winfo := <-shared.remove:
|
||||
shared.error <- shared.removeWatch(winfo)
|
||||
|
||||
case event, open := <-shared.watcher.Events:
|
||||
if !open {
|
||||
return
|
||||
}
|
||||
shared.sendEvent(event)
|
||||
|
||||
case err, open := <-shared.watcher.Errors:
|
||||
if !open {
|
||||
return
|
||||
} else if err != nil {
|
||||
sysErr, ok := err.(*os.SyscallError)
|
||||
if !ok || sysErr.Err != syscall.EINTR {
|
||||
logger.Printf("Error in Watcher Error channel: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
119
vendor/github.com/nxadm/tail/watch/polling.go
generated
vendored
119
vendor/github.com/nxadm/tail/watch/polling.go
generated
vendored
@ -1,119 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/nxadm/tail/util"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// PollingFileWatcher polls the file for changes.
|
||||
type PollingFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewPollingFileWatcher(filename string) *PollingFileWatcher {
|
||||
fw := &PollingFileWatcher{filename, 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
var POLL_DURATION time.Duration
|
||||
|
||||
func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
for {
|
||||
if _, err := os.Stat(fw.Filename); err == nil {
|
||||
return nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-time.After(POLL_DURATION):
|
||||
continue
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
origFi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
var prevModTime time.Time
|
||||
|
||||
// XXX: use tomb.Tomb to cleanly manage these goroutines. replace
|
||||
// the fatal (below) with tomb's Kill.
|
||||
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
prevSize := fw.Size
|
||||
for {
|
||||
select {
|
||||
case <-t.Dying():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
time.Sleep(POLL_DURATION)
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
// Windows cannot delete a file if a handle is still open (tail keeps one open)
|
||||
// so it gives access denied to anything trying to read it until all handles are released.
|
||||
if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
|
||||
// File does not exist (has been deleted).
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
|
||||
// File got moved/renamed?
|
||||
if !os.SameFile(origFi, fi) {
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// File got truncated?
|
||||
fw.Size = fi.Size()
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
// File got bigger?
|
||||
if prevSize > 0 && prevSize < fw.Size {
|
||||
changes.NotifyModified()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
prevSize = fw.Size
|
||||
|
||||
// File was appended to (changed)?
|
||||
modTime := fi.ModTime()
|
||||
if modTime != prevModTime {
|
||||
prevModTime = modTime
|
||||
changes.NotifyModified()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
POLL_DURATION = 250 * time.Millisecond
|
||||
}
|
21
vendor/github.com/nxadm/tail/watch/watch.go
generated
vendored
21
vendor/github.com/nxadm/tail/watch/watch.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import "gopkg.in/tomb.v1"
|
||||
|
||||
// FileWatcher monitors file-level events.
|
||||
type FileWatcher interface {
|
||||
// BlockUntilExists blocks until the file comes into existence.
|
||||
BlockUntilExists(*tomb.Tomb) error
|
||||
|
||||
// ChangeEvents reports on changes to a file, be it modification,
|
||||
// deletion, renames or truncations. Returned FileChanges group of
|
||||
// channels will be closed, thus become unusable, after a deletion
|
||||
// or truncation event.
|
||||
// In order to properly report truncations, ChangeEvents requires
|
||||
// the caller to pass their current offset in the file.
|
||||
ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
|
||||
}
|
93
vendor/github.com/nxadm/tail/winfile/winfile.go
generated
vendored
93
vendor/github.com/nxadm/tail/winfile/winfile.go
generated
vendored
@ -1,93 +0,0 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// +build windows
|
||||
|
||||
package winfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// issue also described here
|
||||
//https://codereview.appspot.com/8203043/
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
|
||||
func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
|
||||
if len(path) == 0 {
|
||||
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
|
||||
}
|
||||
pathp, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return syscall.InvalidHandle, err
|
||||
}
|
||||
var access uint32
|
||||
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
|
||||
case syscall.O_RDONLY:
|
||||
access = syscall.GENERIC_READ
|
||||
case syscall.O_WRONLY:
|
||||
access = syscall.GENERIC_WRITE
|
||||
case syscall.O_RDWR:
|
||||
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_CREAT != 0 {
|
||||
access |= syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_APPEND != 0 {
|
||||
access &^= syscall.GENERIC_WRITE
|
||||
access |= syscall.FILE_APPEND_DATA
|
||||
}
|
||||
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
|
||||
var sa *syscall.SecurityAttributes
|
||||
if mode&syscall.O_CLOEXEC == 0 {
|
||||
sa = makeInheritSa()
|
||||
}
|
||||
var createmode uint32
|
||||
switch {
|
||||
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
|
||||
createmode = syscall.CREATE_NEW
|
||||
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
|
||||
createmode = syscall.CREATE_ALWAYS
|
||||
case mode&syscall.O_CREAT == syscall.O_CREAT:
|
||||
createmode = syscall.OPEN_ALWAYS
|
||||
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
|
||||
createmode = syscall.TRUNCATE_EXISTING
|
||||
default:
|
||||
createmode = syscall.OPEN_EXISTING
|
||||
}
|
||||
h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||
return h, e
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
|
||||
func makeInheritSa() *syscall.SecurityAttributes {
|
||||
var sa syscall.SecurityAttributes
|
||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||
sa.InheritHandle = 1
|
||||
return &sa
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
|
||||
func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
|
||||
r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return os.NewFile(uintptr(r), name), nil
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
// No mapping for Go's ModeTemporary (plan9 only).
|
||||
return
|
||||
}
|
24
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
24
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
@ -1,24 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
- 1.16.x
|
||||
- 1.15.x
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOPATH/pkg/mod
|
||||
|
||||
# allow internal package imports, necessary for forked repositories
|
||||
go_import_path: github.com/onsi/ginkgo
|
||||
|
||||
install:
|
||||
- GO111MODULE="off" go get -v -t ./...
|
||||
- GO111MODULE="off" go get golang.org/x/tools/cmd/cover
|
||||
- GO111MODULE="off" go get github.com/onsi/gomega
|
||||
- GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo
|
||||
- export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
script:
|
||||
- GO111MODULE="on" go mod tidy && git diff --exit-code go.mod go.sum
|
||||
- go vet
|
||||
- ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
|
33
vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
generated
vendored
33
vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
generated
vendored
@ -1,33 +0,0 @@
|
||||
# Contributing to Ginkgo
|
||||
|
||||
Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
|
||||
|
||||
- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
|
||||
- Ensure adequate test coverage:
|
||||
- When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
|
||||
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
|
||||
- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch.
|
||||
If relevant, please submit a docs PR to that branch alongside your code PR.
|
||||
|
||||
Thanks for supporting Ginkgo!
|
||||
|
||||
## Setup
|
||||
|
||||
Fork the repo, then:
|
||||
|
||||
```
|
||||
go get github.com/onsi/ginkgo
|
||||
go get github.com/onsi/gomega/...
|
||||
cd $GOPATH/src/github.com/onsi/ginkgo
|
||||
git remote add fork git@github.com:<NAME>/ginkgo.git
|
||||
|
||||
ginkgo -r -p # ensure tests are green
|
||||
go vet ./... # ensure linter is happy
|
||||
```
|
||||
|
||||
## Making the PR
|
||||
- go to a new branch `git checkout -b my-feature`
|
||||
- make your changes
|
||||
- run tests and linter again (see above)
|
||||
- `git push fork`
|
||||
- open PR 🎉
|
169
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
169
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
@ -1,169 +0,0 @@
|
||||

|
||||
|
||||
[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster)
|
||||
|
||||
Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
||||
|
||||
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
|
||||
|
||||
# Ginkgo 2.0 Release Candidate is available!
|
||||
|
||||
An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [ver2](https://github.com/onsi/ginkgo/tree/ver2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion.
|
||||
|
||||
As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711).
|
||||
|
||||
Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide.
|
||||
|
||||
## TLDR
|
||||
Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests.
|
||||
It is typically (and optionally) paired with the [Gomega](https://github.com/onsi/gomega) matcher library.
|
||||
|
||||
```go
|
||||
Describe("the strings package", func() {
|
||||
Context("strings.Contains()", func() {
|
||||
When("the string contains the substring in the middle", func() {
|
||||
It("returns `true`", func() {
|
||||
Expect(strings.Contains("Ginkgo is awesome", "is")).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Feature List
|
||||
|
||||
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
|
||||
|
||||
- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style:
|
||||
- Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
||||
- [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
|
||||
- [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions
|
||||
- [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
|
||||
- [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
|
||||
|
||||
- A comprehensive test runner that lets you:
|
||||
- Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs)
|
||||
- [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
|
||||
- Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
|
||||
- Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs)
|
||||
|
||||
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
|
||||
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
|
||||
- `ginkgo -cover` runs your tests using Go's code coverage tool
|
||||
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
|
||||
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
|
||||
- `ginkgo -r` runs all tests suites under the current directory
|
||||
- `ginkgo -v` prints out identifying information for each tests just before it runs
|
||||
|
||||
And much more: run `ginkgo help` for details!
|
||||
|
||||
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
|
||||
|
||||
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
|
||||
|
||||
- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests)
|
||||
|
||||
- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
|
||||
|
||||
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
|
||||
|
||||
- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
|
||||
|
||||
- [Ginkgo tools for VSCode](https://marketplace.visualstudio.com/items?itemName=joselitofilho.ginkgotestexplorer): just use VSCode's extension installer to install `ginkgoTestExplorer`.
|
||||
|
||||
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details.
|
||||
|
||||
- A modular architecture that lets you easily:
|
||||
- Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
|
||||
- [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
|
||||
|
||||
## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
|
||||
|
||||
Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/)
|
||||
|
||||
## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework
|
||||
|
||||
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org)
|
||||
|
||||
## Getting Started
|
||||
|
||||
You'll need the Go command-line tools. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
|
||||
|
||||
### Global installation
|
||||
To install the Ginkgo command line interface:
|
||||
```bash
|
||||
go get -u github.com/onsi/ginkgo/ginkgo
|
||||
```
|
||||
Note that this will install it to `$GOBIN`, which will need to be in the `$PATH` (or equivalent). Run `go help install` for more information.
|
||||
|
||||
### Go module ["tools package"](https://github.com/golang/go/issues/25922):
|
||||
Create (or update) a file called `tools/tools.go` with the following contents:
|
||||
```go
|
||||
// +build tools
|
||||
|
||||
package tools
|
||||
|
||||
import (
|
||||
_ "github.com/onsi/ginkgo/ginkgo"
|
||||
)
|
||||
|
||||
// This file imports packages that are used when running go generate, or used
|
||||
// during the development process but not otherwise depended on by built code.
|
||||
```
|
||||
The Ginkgo command can then be run via `go run github.com/onsi/ginkgo/ginkgo`.
|
||||
This approach allows the version of Ginkgo to be maintained under source control for reproducible results,
|
||||
and is well suited to automated test pipelines.
|
||||
|
||||
### Bootstrapping
|
||||
```bash
|
||||
cd path/to/package/you/want/to/test
|
||||
|
||||
ginkgo bootstrap # set up a new ginkgo suite
|
||||
ginkgo generate # will create a sample test file. edit this file and add your tests then...
|
||||
|
||||
go test # to run your tests
|
||||
|
||||
ginkgo # also runs your tests
|
||||
|
||||
```
|
||||
|
||||
## I'm new to Go: What are my testing options?
|
||||
|
||||
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
|
||||
|
||||
With that said, it's great to know what your options are :)
|
||||
|
||||
### What Go gives you out of the box
|
||||
|
||||
Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
||||
|
||||
### Matcher libraries for Go's XUnit style tests
|
||||
|
||||
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
|
||||
|
||||
- [testify](https://github.com/stretchr/testify)
|
||||
- [gocheck](https://labix.org/gocheck)
|
||||
|
||||
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
|
||||
|
||||
### BDD style testing frameworks
|
||||
|
||||
There are a handful of BDD-style testing frameworks written for Go. Here are a few:
|
||||
|
||||
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
|
||||
- [GoConvey](https://github.com/smartystreets/goconvey)
|
||||
- [Goblin](https://github.com/franela/goblin)
|
||||
- [Mao](https://github.com/azer/mao)
|
||||
- [Zen](https://github.com/pranavraja/zen)
|
||||
|
||||
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
|
||||
|
||||
Go explore!
|
||||
|
||||
## License
|
||||
|
||||
Ginkgo is MIT-Licensed
|
||||
|
||||
## Contributing
|
||||
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
232
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
232
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
@ -1,232 +0,0 @@
|
||||
/*
|
||||
Ginkgo accepts a number of configuration options.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
|
||||
|
||||
You can also learn more via
|
||||
|
||||
ginkgo help
|
||||
|
||||
or (I kid you not):
|
||||
|
||||
go test -asdf
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const VERSION = "1.16.5"
|
||||
|
||||
type GinkgoConfigType struct {
|
||||
RandomSeed int64
|
||||
RandomizeAllSpecs bool
|
||||
RegexScansFilePath bool
|
||||
FocusStrings []string
|
||||
SkipStrings []string
|
||||
SkipMeasurements bool
|
||||
FailOnPending bool
|
||||
FailFast bool
|
||||
FlakeAttempts int
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
DebugParallel bool
|
||||
|
||||
ParallelNode int
|
||||
ParallelTotal int
|
||||
SyncHost string
|
||||
StreamHost string
|
||||
}
|
||||
|
||||
var GinkgoConfig = GinkgoConfigType{}
|
||||
|
||||
type DefaultReporterConfigType struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold float64
|
||||
NoisyPendings bool
|
||||
NoisySkippings bool
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
FullTrace bool
|
||||
ReportPassed bool
|
||||
ReportFile string
|
||||
}
|
||||
|
||||
var DefaultReporterConfig = DefaultReporterConfigType{}
|
||||
|
||||
func processPrefix(prefix string) string {
|
||||
if prefix != "" {
|
||||
prefix += "."
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
type flagFunc func(string)
|
||||
|
||||
func (f flagFunc) String() string { return "" }
|
||||
func (f flagFunc) Set(s string) error { f(s); return nil }
|
||||
|
||||
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||
prefix = processPrefix(prefix)
|
||||
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
|
||||
|
||||
flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.")
|
||||
flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
|
||||
|
||||
flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
|
||||
|
||||
if includeParallelFlags {
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
||||
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
|
||||
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
|
||||
}
|
||||
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
||||
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
|
||||
flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
|
||||
|
||||
}
|
||||
|
||||
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
||||
prefix = processPrefix(prefix)
|
||||
result := make([]string, 0)
|
||||
|
||||
if ginkgo.RandomSeed > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
|
||||
}
|
||||
|
||||
if ginkgo.RandomizeAllSpecs {
|
||||
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.SkipMeasurements {
|
||||
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailOnPending {
|
||||
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailFast {
|
||||
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DryRun {
|
||||
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
|
||||
}
|
||||
|
||||
for _, s := range ginkgo.FocusStrings {
|
||||
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s))
|
||||
}
|
||||
|
||||
for _, s := range ginkgo.SkipStrings {
|
||||
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s))
|
||||
}
|
||||
|
||||
if ginkgo.FlakeAttempts > 1 {
|
||||
result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
|
||||
}
|
||||
|
||||
if ginkgo.EmitSpecProgress {
|
||||
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DebugParallel {
|
||||
result = append(result, fmt.Sprintf("--%sdebug", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelNode != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelTotal != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
|
||||
}
|
||||
|
||||
if ginkgo.StreamHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
|
||||
}
|
||||
|
||||
if ginkgo.SyncHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
|
||||
}
|
||||
|
||||
if ginkgo.RegexScansFilePath {
|
||||
result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
|
||||
}
|
||||
|
||||
if reporter.NoColor {
|
||||
result = append(result, fmt.Sprintf("--%snoColor", prefix))
|
||||
}
|
||||
|
||||
if reporter.SlowSpecThreshold > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
|
||||
}
|
||||
|
||||
if !reporter.NoisyPendings {
|
||||
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
||||
}
|
||||
|
||||
if !reporter.NoisySkippings {
|
||||
result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
|
||||
}
|
||||
|
||||
if reporter.Verbose {
|
||||
result = append(result, fmt.Sprintf("--%sv", prefix))
|
||||
}
|
||||
|
||||
if reporter.Succinct {
|
||||
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
|
||||
}
|
||||
|
||||
if reporter.FullTrace {
|
||||
result = append(result, fmt.Sprintf("--%strace", prefix))
|
||||
}
|
||||
|
||||
if reporter.ReportPassed {
|
||||
result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
|
||||
}
|
||||
|
||||
if reporter.ReportFile != "" {
|
||||
result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// flagFocus implements the -focus flag.
|
||||
func flagFocus(arg string) {
|
||||
if arg != "" {
|
||||
GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg)
|
||||
}
|
||||
}
|
||||
|
||||
// flagSkip implements the -skip flag.
|
||||
func flagSkip(arg string) {
|
||||
if arg != "" {
|
||||
GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg)
|
||||
}
|
||||
}
|
681
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
681
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
@ -1,681 +0,0 @@
|
||||
/*
|
||||
Ginkgo is a BDD-style testing framework for Golang
|
||||
|
||||
The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
|
||||
|
||||
Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
|
||||
|
||||
Ginkgo on Github: http://github.com/onsi/ginkgo
|
||||
|
||||
Ginkgo is MIT-Licensed
|
||||
*/
|
||||
package ginkgo
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/global"
|
||||
"github.com/onsi/ginkgo/internal/remote"
|
||||
"github.com/onsi/ginkgo/internal/testingtproxy"
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
var deprecationTracker = types.NewDeprecationTracker()
|
||||
|
||||
const GINKGO_VERSION = config.VERSION
|
||||
const GINKGO_PANIC = `
|
||||
Your test failed.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
|
||||
func init() {
|
||||
config.Flags(flag.CommandLine, "ginkgo", true)
|
||||
GinkgoWriter = writer.New(os.Stdout)
|
||||
}
|
||||
|
||||
//GinkgoWriter implements an io.Writer
|
||||
//When running in verbose mode any writes to GinkgoWriter will be immediately printed
|
||||
//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
|
||||
//only if the current test fails.
|
||||
var GinkgoWriter io.Writer
|
||||
|
||||
//The interface by which Ginkgo receives *testing.T
|
||||
type GinkgoTestingT interface {
|
||||
Fail()
|
||||
}
|
||||
|
||||
//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
|
||||
//useful for seeding your own pseudorandom number generators (PRNGs) to ensure
|
||||
//consistent executions from run to run, where your tests contain variability (for
|
||||
//example, when selecting random test data).
|
||||
func GinkgoRandomSeed() int64 {
|
||||
return config.GinkgoConfig.RandomSeed
|
||||
}
|
||||
|
||||
//GinkgoParallelNode is deprecated, use GinkgoParallelProcess instead
|
||||
func GinkgoParallelNode() int {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.ParallelNode(), codelocation.New(1))
|
||||
return GinkgoParallelProcess()
|
||||
}
|
||||
|
||||
//GinkgoParallelProcess returns the parallel process number for the current ginkgo process
|
||||
//The process number is 1-indexed
|
||||
func GinkgoParallelProcess() int {
|
||||
return config.GinkgoConfig.ParallelNode
|
||||
}
|
||||
|
||||
//Some matcher libraries or legacy codebases require a *testing.T
|
||||
//GinkgoT implements an interface analogous to *testing.T and can be used if
|
||||
//the library in question accepts *testing.T through an interface
|
||||
//
|
||||
// For example, with testify:
|
||||
// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
|
||||
//
|
||||
// Or with gomock:
|
||||
// gomock.NewController(GinkgoT())
|
||||
//
|
||||
// GinkgoT() takes an optional offset argument that can be used to get the
|
||||
// correct line number associated with the failure.
|
||||
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
||||
offset := 3
|
||||
if len(optionalOffset) > 0 {
|
||||
offset = optionalOffset[0]
|
||||
}
|
||||
failedFunc := func() bool {
|
||||
return CurrentGinkgoTestDescription().Failed
|
||||
}
|
||||
nameFunc := func() string {
|
||||
return CurrentGinkgoTestDescription().FullTestText
|
||||
}
|
||||
return testingtproxy.New(GinkgoWriter, Fail, Skip, failedFunc, nameFunc, offset)
|
||||
}
|
||||
|
||||
//The interface returned by GinkgoT(). This covers most of the methods
|
||||
//in the testing package's T.
|
||||
type GinkgoTInterface interface {
|
||||
Cleanup(func())
|
||||
Setenv(key, value string)
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Fail()
|
||||
FailNow()
|
||||
Failed() bool
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Helper()
|
||||
Log(args ...interface{})
|
||||
Logf(format string, args ...interface{})
|
||||
Name() string
|
||||
Parallel()
|
||||
Skip(args ...interface{})
|
||||
SkipNow()
|
||||
Skipf(format string, args ...interface{})
|
||||
Skipped() bool
|
||||
TempDir() string
|
||||
}
|
||||
|
||||
//Custom Ginkgo test reporters must implement the Reporter interface.
|
||||
//
|
||||
//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
|
||||
//and a SpecSummary just before a spec begins and just after a spec ends
|
||||
type Reporter reporters.Reporter
|
||||
|
||||
//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
|
||||
//to tell Ginkgo that your async test is done.
|
||||
type Done chan<- interface{}
|
||||
|
||||
//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
|
||||
// FullTestText: a concatenation of ComponentTexts and the TestText
|
||||
// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
|
||||
// TestText: the text in the actual It or Measure node
|
||||
// IsMeasurement: true if the current test is a measurement
|
||||
// FileName: the name of the file containing the current test
|
||||
// LineNumber: the line number for the current test
|
||||
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
|
||||
type GinkgoTestDescription struct {
|
||||
FullTestText string
|
||||
ComponentTexts []string
|
||||
TestText string
|
||||
|
||||
IsMeasurement bool
|
||||
|
||||
FileName string
|
||||
LineNumber int
|
||||
|
||||
Failed bool
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
//CurrentGinkgoTestDescripton returns information about the current running test.
|
||||
func CurrentGinkgoTestDescription() GinkgoTestDescription {
|
||||
summary, ok := global.Suite.CurrentRunningSpecSummary()
|
||||
if !ok {
|
||||
return GinkgoTestDescription{}
|
||||
}
|
||||
|
||||
subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
|
||||
|
||||
return GinkgoTestDescription{
|
||||
ComponentTexts: summary.ComponentTexts[1:],
|
||||
FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
|
||||
TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
|
||||
IsMeasurement: summary.IsMeasurement,
|
||||
FileName: subjectCodeLocation.FileName,
|
||||
LineNumber: subjectCodeLocation.LineNumber,
|
||||
Failed: summary.HasFailureState(),
|
||||
Duration: summary.RunTime,
|
||||
}
|
||||
}
|
||||
|
||||
//Measurement tests receive a Benchmarker.
|
||||
//
|
||||
//You use the Time() function to time how long the passed in body function takes to run
|
||||
//You use the RecordValue() function to track arbitrary numerical measurements.
|
||||
//The RecordValueWithPrecision() function can be used alternatively to provide the unit
|
||||
//and resolution of the numeric measurement.
|
||||
//The optional info argument is passed to the test reporter and can be used to
|
||||
// provide the measurement data to a custom reporter with context.
|
||||
//
|
||||
//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
|
||||
type Benchmarker interface {
|
||||
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
|
||||
RecordValue(name string, value float64, info ...interface{})
|
||||
RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
|
||||
}
|
||||
|
||||
//RunSpecs is the entry point for the Ginkgo test runner.
|
||||
//You must call this within a Golang testing TestX(t *testing.T) function.
|
||||
//
|
||||
//To bootstrap a test suite you can use the Ginkgo CLI:
|
||||
//
|
||||
// ginkgo bootstrap
|
||||
func RunSpecs(t GinkgoTestingT, description string) bool {
|
||||
specReporters := []Reporter{buildDefaultReporter()}
|
||||
if config.DefaultReporterConfig.ReportFile != "" {
|
||||
reportFile := config.DefaultReporterConfig.ReportFile
|
||||
specReporters[0] = reporters.NewJUnitReporter(reportFile)
|
||||
specReporters = append(specReporters, buildDefaultReporter())
|
||||
}
|
||||
return runSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
|
||||
//RunSpecs() with this method.
|
||||
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
|
||||
specReporters = append(specReporters, buildDefaultReporter())
|
||||
return runSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
|
||||
//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
|
||||
func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
|
||||
return runSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
func runSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
writer := GinkgoWriter.(*writer.Writer)
|
||||
writer.SetStream(config.DefaultReporterConfig.Verbose)
|
||||
reporters := make([]reporters.Reporter, len(specReporters))
|
||||
for i, reporter := range specReporters {
|
||||
reporters[i] = reporter
|
||||
}
|
||||
passed, hasFocusedTests := global.Suite.Run(t, description, reporters, writer, config.GinkgoConfig)
|
||||
|
||||
if deprecationTracker.DidTrackDeprecations() {
|
||||
fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport())
|
||||
}
|
||||
|
||||
if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||
fmt.Println("PASS | FOCUSED")
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
}
|
||||
return passed
|
||||
}
|
||||
|
||||
func buildDefaultReporter() Reporter {
|
||||
remoteReportingServer := config.GinkgoConfig.StreamHost
|
||||
if remoteReportingServer == "" {
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
|
||||
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
|
||||
} else {
|
||||
debugFile := ""
|
||||
if config.GinkgoConfig.DebugParallel {
|
||||
debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode)
|
||||
}
|
||||
return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile)
|
||||
}
|
||||
}
|
||||
|
||||
//Skip notifies Ginkgo that the current spec was skipped.
|
||||
func Skip(message string, callerSkip ...int) {
|
||||
skip := 0
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
|
||||
global.Failer.Skip(message, codelocation.New(skip+1))
|
||||
panic(GINKGO_PANIC)
|
||||
}
|
||||
|
||||
//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
|
||||
func Fail(message string, callerSkip ...int) {
|
||||
skip := 0
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
|
||||
global.Failer.Fail(message, codelocation.New(skip+1))
|
||||
panic(GINKGO_PANIC)
|
||||
}
|
||||
|
||||
//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
||||
//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
||||
//calls out to Gomega
|
||||
//
|
||||
//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
|
||||
//further assertions from running. This panic must be recovered. Ginkgo does this for you
|
||||
//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
|
||||
//
|
||||
//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
|
||||
//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
|
||||
func GinkgoRecover() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
global.Failer.Panic(codelocation.New(1), e)
|
||||
}
|
||||
}
|
||||
|
||||
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||
func Describe(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FDescribe
|
||||
func FDescribe(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PDescribe
|
||||
func PDescribe(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XDescribe
|
||||
func XDescribe(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//Context blocks allow you to organize your specs. A Context block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||
func Context(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FContext
|
||||
func FContext(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PContext
|
||||
func PContext(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XContext
|
||||
func XContext(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//When blocks allow you to organize your specs. A When block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||
func When(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FWhen
|
||||
func FWhen(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PWhen
|
||||
func PWhen(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XWhen
|
||||
func XWhen(text string, body func()) bool {
|
||||
global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
|
||||
//within an It block.
|
||||
//
|
||||
//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
|
||||
//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
|
||||
func It(text string, body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Its using FIt
|
||||
func FIt(text string, body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Its as pending using PIt
|
||||
func PIt(text string, _ ...interface{}) bool {
|
||||
global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Its as pending using XIt
|
||||
func XIt(text string, _ ...interface{}) bool {
|
||||
global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//Specify blocks are aliases for It blocks and allow for more natural wording in situations
|
||||
//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
|
||||
//which apply to It blocks.
|
||||
func Specify(text string, body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Specifys using FSpecify
|
||||
func FSpecify(text string, body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Specifys as pending using PSpecify
|
||||
func PSpecify(text string, is ...interface{}) bool {
|
||||
global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Specifys as pending using XSpecify
|
||||
func XSpecify(text string, is ...interface{}) bool {
|
||||
global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//By allows you to better document large Its.
|
||||
//
|
||||
//Generally you should try to keep your Its short and to the point. This is not always possible, however,
|
||||
//especially in the context of integration tests that capture a particular workflow.
|
||||
//
|
||||
//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
|
||||
//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
|
||||
func By(text string, callbacks ...func()) {
|
||||
preamble := "\x1b[1mSTEP\x1b[0m"
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
preamble = "STEP"
|
||||
}
|
||||
fmt.Fprintln(GinkgoWriter, preamble+": "+text)
|
||||
if len(callbacks) == 1 {
|
||||
callbacks[0]()
|
||||
}
|
||||
if len(callbacks) > 1 {
|
||||
panic("just one callback per By, please")
|
||||
}
|
||||
}
|
||||
|
||||
//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
|
||||
//and accumulate metrics provided to the Benchmarker by the body function.
|
||||
//
|
||||
//The body function must have the signature:
|
||||
// func(b Benchmarker)
|
||||
func Measure(text string, body interface{}, samples int) bool {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
|
||||
global.Suite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Measures using FMeasure
|
||||
func FMeasure(text string, body interface{}, samples int) bool {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
|
||||
global.Suite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Measurements as pending using PMeasure
|
||||
func PMeasure(text string, _ ...interface{}) bool {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
|
||||
global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Measurements as pending using XMeasure
|
||||
func XMeasure(text string, _ ...interface{}) bool {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
|
||||
global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
|
||||
//parallel node process will call BeforeSuite.
|
||||
//
|
||||
//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||
//
|
||||
//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||
func BeforeSuite(body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
|
||||
//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
|
||||
//
|
||||
//When running in parallel, each parallel node process will call AfterSuite.
|
||||
//
|
||||
//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||
//
|
||||
//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||
func AfterSuite(body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
|
||||
//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
|
||||
//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
|
||||
//until that node is done before running.
|
||||
//
|
||||
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
|
||||
//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
|
||||
//to the second function (on all the other nodes).
|
||||
//
|
||||
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
|
||||
//
|
||||
// func() []byte
|
||||
//
|
||||
//or, to run asynchronously:
|
||||
//
|
||||
// func(done Done) []byte
|
||||
//
|
||||
//The byte array returned by the first function is then passed to the second function, which has the signature:
|
||||
//
|
||||
// func(data []byte)
|
||||
//
|
||||
//or, to run asynchronously:
|
||||
//
|
||||
// func(data []byte, done Done)
|
||||
//
|
||||
//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
|
||||
//
|
||||
// var dbClient db.Client
|
||||
// var dbRunner db.Runner
|
||||
//
|
||||
// var _ = SynchronizedBeforeSuite(func() []byte {
|
||||
// dbRunner = db.NewRunner()
|
||||
// err := dbRunner.Start()
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
// return []byte(dbRunner.URL)
|
||||
// }, func(data []byte) {
|
||||
// dbClient = db.NewClient()
|
||||
// err := dbClient.Connect(string(data))
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
// })
|
||||
func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
|
||||
global.Suite.SetSynchronizedBeforeSuiteNode(
|
||||
node1Body,
|
||||
allNodesBody,
|
||||
codelocation.New(1),
|
||||
parseTimeout(timeout...),
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
|
||||
//external singleton resources shared across nodes when running tests in parallel.
|
||||
//
|
||||
//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
|
||||
//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
|
||||
//all other nodes are finished.
|
||||
//
|
||||
//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
|
||||
//
|
||||
//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
|
||||
//only after all nodes have finished:
|
||||
//
|
||||
// var _ = SynchronizedAfterSuite(func() {
|
||||
// dbClient.Cleanup()
|
||||
// }, func() {
|
||||
// dbRunner.Stop()
|
||||
// })
|
||||
func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
|
||||
global.Suite.SetSynchronizedAfterSuiteNode(
|
||||
allNodesBody,
|
||||
node1Body,
|
||||
codelocation.New(1),
|
||||
parseTimeout(timeout...),
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
|
||||
//Describe and Context blocks the outermost BeforeEach blocks are run first.
|
||||
//
|
||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func BeforeEach(body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
|
||||
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
||||
//
|
||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func JustBeforeEach(body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details,
|
||||
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
||||
//
|
||||
//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func JustAfterEach(body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
|
||||
//Describe and Context blocks the innermost AfterEach blocks are run first.
|
||||
//
|
||||
//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func AfterEach(body interface{}, timeout ...float64) bool {
|
||||
validateBodyFunc(body, codelocation.New(1))
|
||||
global.Suite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
func validateBodyFunc(body interface{}, cl types.CodeLocation) {
|
||||
t := reflect.TypeOf(body)
|
||||
if t.Kind() != reflect.Func {
|
||||
return
|
||||
}
|
||||
|
||||
if t.NumOut() > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if t.NumIn() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if t.In(0) == reflect.TypeOf(make(Done)) {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl)
|
||||
}
|
||||
}
|
||||
|
||||
func parseTimeout(timeout ...float64) time.Duration {
|
||||
if len(timeout) == 0 {
|
||||
return global.DefaultTimeout
|
||||
} else {
|
||||
return time.Duration(timeout[0] * float64(time.Second))
|
||||
}
|
||||
}
|
48
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
48
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
package codelocation
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func New(skip int) types.CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
stackTrace := PruneStack(string(debug.Stack()), skip+1)
|
||||
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||
}
|
||||
|
||||
// PruneStack removes references to functions that are internal to Ginkgo
|
||||
// and the Go runtime from a stack string and a certain number of stack entries
|
||||
// at the beginning of the stack. The stack string has the format
|
||||
// as returned by runtime/debug.Stack. The leading goroutine information is
|
||||
// optional and always removed if present. Beware that runtime/debug.Stack
|
||||
// adds itself as first entry, so typically skip must be >= 1 to remove that
|
||||
// entry.
|
||||
func PruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
// Ensure that the even entries are the method names and the
|
||||
// the odd entries the source code information.
|
||||
if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
|
||||
// Ignore "goroutine 29 [running]:" line.
|
||||
stack = stack[1:]
|
||||
}
|
||||
// The "+1" is for skipping over the initial entry, which is
|
||||
// runtime/debug.Stack() itself.
|
||||
if len(stack) > 2*(skip+1) {
|
||||
stack = stack[2*(skip+1):]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
// We filter out based on the source code file name.
|
||||
if !re.Match([]byte(stack[i*2+1])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
@ -1,151 +0,0 @@
|
||||
package containernode
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type subjectOrContainerNode struct {
|
||||
containerNode *ContainerNode
|
||||
subjectNode leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
func (n subjectOrContainerNode) text() string {
|
||||
if n.containerNode != nil {
|
||||
return n.containerNode.Text()
|
||||
} else {
|
||||
return n.subjectNode.Text()
|
||||
}
|
||||
}
|
||||
|
||||
type CollatedNodes struct {
|
||||
Containers []*ContainerNode
|
||||
Subject leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
type ContainerNode struct {
|
||||
text string
|
||||
flag types.FlagType
|
||||
codeLocation types.CodeLocation
|
||||
|
||||
setupNodes []leafnodes.BasicNode
|
||||
subjectAndContainerNodes []subjectOrContainerNode
|
||||
}
|
||||
|
||||
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
|
||||
return &ContainerNode{
|
||||
text: text,
|
||||
flag: flag,
|
||||
codeLocation: codeLocation,
|
||||
}
|
||||
}
|
||||
|
||||
func (container *ContainerNode) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(container)
|
||||
permutation := r.Perm(len(container.subjectAndContainerNodes))
|
||||
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
|
||||
for i, j := range permutation {
|
||||
shuffledNodes[i] = container.subjectAndContainerNodes[j]
|
||||
}
|
||||
container.subjectAndContainerNodes = shuffledNodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
|
||||
if node.flag == types.FlagTypePending {
|
||||
return false
|
||||
}
|
||||
|
||||
shouldUnfocus := false
|
||||
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainerNode.containerNode != nil {
|
||||
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
|
||||
} else {
|
||||
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
|
||||
}
|
||||
}
|
||||
|
||||
if shouldUnfocus {
|
||||
if node.flag == types.FlagTypeFocused {
|
||||
node.flag = types.FlagTypeNone
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return node.flag == types.FlagTypeFocused
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Collate() []CollatedNodes {
|
||||
return node.collate([]*ContainerNode{})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
|
||||
collated := make([]CollatedNodes, 0)
|
||||
|
||||
containers := make([]*ContainerNode, len(enclosingContainers))
|
||||
copy(containers, enclosingContainers)
|
||||
containers = append(containers, node)
|
||||
|
||||
for _, subjectOrContainer := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainer.containerNode != nil {
|
||||
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
|
||||
} else {
|
||||
collated = append(collated, CollatedNodes{
|
||||
Containers: containers,
|
||||
Subject: subjectOrContainer.subjectNode,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return collated
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
|
||||
node.setupNodes = append(node.setupNodes, setupNode)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
|
||||
nodes := []leafnodes.BasicNode{}
|
||||
for _, setupNode := range node.setupNodes {
|
||||
if setupNode.Type() == nodeType {
|
||||
nodes = append(nodes, setupNode)
|
||||
}
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ContainerNode) CodeLocation() types.CodeLocation {
|
||||
return node.codeLocation
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (node *ContainerNode) Len() int {
|
||||
return len(node.subjectAndContainerNodes)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Less(i, j int) bool {
|
||||
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Swap(i, j int) {
|
||||
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
|
||||
}
|
22
vendor/github.com/onsi/ginkgo/internal/global/init.go
generated
vendored
22
vendor/github.com/onsi/ginkgo/internal/global/init.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
package global
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/internal/suite"
|
||||
)
|
||||
|
||||
const DefaultTimeout = time.Duration(1 * time.Second)
|
||||
|
||||
var Suite *suite.Suite
|
||||
var Failer *failer.Failer
|
||||
|
||||
func init() {
|
||||
InitializeGlobals()
|
||||
}
|
||||
|
||||
func InitializeGlobals() {
|
||||
Failer = failer.New()
|
||||
Suite = suite.New(Failer)
|
||||
}
|
103
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
103
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
@ -1,103 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type benchmarker struct {
|
||||
mu sync.Mutex
|
||||
measurements map[string]*types.SpecMeasurement
|
||||
orderCounter int
|
||||
}
|
||||
|
||||
func newBenchmarker() *benchmarker {
|
||||
return &benchmarker{
|
||||
measurements: make(map[string]*types.SpecMeasurement),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
|
||||
t := time.Now()
|
||||
body()
|
||||
elapsedTime = time.Since(t)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
|
||||
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
||||
b.mu.Lock()
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
|
||||
b.mu.Lock()
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
|
||||
measurement, ok := b.measurements[name]
|
||||
if !ok {
|
||||
var computedInfo interface{}
|
||||
computedInfo = nil
|
||||
if len(info) > 0 {
|
||||
computedInfo = info[0]
|
||||
}
|
||||
measurement = &types.SpecMeasurement{
|
||||
Name: name,
|
||||
Info: computedInfo,
|
||||
Order: b.orderCounter,
|
||||
SmallestLabel: smallestLabel,
|
||||
LargestLabel: largestLabel,
|
||||
AverageLabel: averageLabel,
|
||||
Units: units,
|
||||
Precision: precision,
|
||||
Results: make([]float64, 0),
|
||||
}
|
||||
b.measurements[name] = measurement
|
||||
b.orderCounter++
|
||||
}
|
||||
|
||||
return measurement
|
||||
}
|
||||
|
||||
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, measurement := range b.measurements {
|
||||
measurement.Smallest = math.MaxFloat64
|
||||
measurement.Largest = -math.MaxFloat64
|
||||
sum := float64(0)
|
||||
sumOfSquares := float64(0)
|
||||
|
||||
for _, result := range measurement.Results {
|
||||
if result > measurement.Largest {
|
||||
measurement.Largest = result
|
||||
}
|
||||
if result < measurement.Smallest {
|
||||
measurement.Smallest = result
|
||||
}
|
||||
sum += result
|
||||
sumOfSquares += result * result
|
||||
}
|
||||
|
||||
n := float64(len(measurement.Results))
|
||||
measurement.Average = sum / n
|
||||
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
|
||||
}
|
||||
|
||||
return b.measurements
|
||||
}
|
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type BasicNode interface {
|
||||
Type() types.SpecComponentType
|
||||
Run() (types.SpecState, types.SpecFailure)
|
||||
CodeLocation() types.CodeLocation
|
||||
}
|
||||
|
||||
type SubjectNode interface {
|
||||
BasicNode
|
||||
|
||||
Text() string
|
||||
Flag() types.FlagType
|
||||
Samples() int
|
||||
}
|
47
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
47
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type ItNode struct {
|
||||
runner *runner
|
||||
|
||||
flag types.FlagType
|
||||
text string
|
||||
}
|
||||
|
||||
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
|
||||
return &ItNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
|
||||
flag: flag,
|
||||
text: text,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *ItNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeIt
|
||||
}
|
||||
|
||||
func (node *ItNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ItNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *ItNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *ItNode) Samples() int {
|
||||
return 1
|
||||
}
|
62
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
62
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type MeasureNode struct {
|
||||
runner *runner
|
||||
|
||||
text string
|
||||
flag types.FlagType
|
||||
samples int
|
||||
benchmarker *benchmarker
|
||||
}
|
||||
|
||||
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
|
||||
benchmarker := newBenchmarker()
|
||||
|
||||
wrappedBody := func() {
|
||||
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
|
||||
}
|
||||
|
||||
return &MeasureNode{
|
||||
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
|
||||
|
||||
text: text,
|
||||
flag: flag,
|
||||
samples: samples,
|
||||
benchmarker: benchmarker,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
|
||||
return node.benchmarker.measurementsReport()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *MeasureNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Samples() int {
|
||||
return node.samples
|
||||
}
|
117
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
117
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
@ -1,117 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type runner struct {
|
||||
isAsync bool
|
||||
asyncFunc func(chan<- interface{})
|
||||
syncFunc func()
|
||||
codeLocation types.CodeLocation
|
||||
timeoutThreshold time.Duration
|
||||
nodeType types.SpecComponentType
|
||||
componentIndex int
|
||||
failer *failer.Failer
|
||||
}
|
||||
|
||||
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
|
||||
bodyType := reflect.TypeOf(body)
|
||||
if bodyType.Kind() != reflect.Func {
|
||||
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
|
||||
}
|
||||
|
||||
runner := &runner{
|
||||
codeLocation: codeLocation,
|
||||
timeoutThreshold: timeout,
|
||||
failer: failer,
|
||||
nodeType: nodeType,
|
||||
componentIndex: componentIndex,
|
||||
}
|
||||
|
||||
switch bodyType.NumIn() {
|
||||
case 0:
|
||||
runner.syncFunc = body.(func())
|
||||
return runner
|
||||
case 1:
|
||||
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
|
||||
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
wrappedBody := func(done chan<- interface{}) {
|
||||
bodyValue := reflect.ValueOf(body)
|
||||
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
}
|
||||
|
||||
runner.isAsync = true
|
||||
runner.asyncFunc = wrappedBody
|
||||
return runner
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
if r.isAsync {
|
||||
return r.runAsync()
|
||||
} else {
|
||||
return r.runSync()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
done := make(chan interface{}, 1)
|
||||
|
||||
go func() {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
select {
|
||||
case <-done:
|
||||
break
|
||||
default:
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
r.asyncFunc(done)
|
||||
finished = true
|
||||
}()
|
||||
|
||||
// If this goroutine gets no CPU time before the select block,
|
||||
// the <-done case may complete even if the test took longer than the timeoutThreshold.
|
||||
// This can cause flaky behaviour, but we haven't seen it in the wild.
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(r.timeoutThreshold):
|
||||
r.failer.Timeout(r.codeLocation)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
return
|
||||
}
|
||||
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
}()
|
||||
|
||||
r.syncFunc()
|
||||
finished = true
|
||||
|
||||
return
|
||||
}
|
48
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
48
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type SetupNode struct {
|
||||
runner *runner
|
||||
}
|
||||
|
||||
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *SetupNode) Type() types.SpecComponentType {
|
||||
return node.runner.nodeType
|
||||
}
|
||||
|
||||
func (node *SetupNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
|
||||
}
|
||||
}
|
55
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
55
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type SuiteNode interface {
|
||||
Run(parallelNode int, parallelTotal int, syncHost string) bool
|
||||
Passed() bool
|
||||
Summary() *types.SetupSummary
|
||||
}
|
||||
|
||||
type simpleSuiteNode struct {
|
||||
runner *runner
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
node.outcome, node.failure = node.runner.run()
|
||||
node.runTime = time.Since(t)
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runner.nodeType,
|
||||
CodeLocation: node.runner.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
@ -1,90 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type synchronizedAfterSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &synchronizedAfterSuiteNode{
|
||||
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
node.outcome, node.failure = node.runnerA.run()
|
||||
|
||||
if parallelNode == 1 {
|
||||
if parallelTotal > 1 {
|
||||
node.waitUntilOtherNodesAreDone(syncHost)
|
||||
}
|
||||
|
||||
outcome, failure := node.runnerB.run()
|
||||
|
||||
if node.outcome == types.SpecStatePassed {
|
||||
node.outcome, node.failure = outcome, failure
|
||||
}
|
||||
}
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
|
||||
for {
|
||||
if node.canRun(syncHost) {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
|
||||
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return false
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
afterSuiteData := types.RemoteAfterSuiteData{}
|
||||
err = json.Unmarshal(body, &afterSuiteData)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return afterSuiteData.CanRun
|
||||
}
|
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
@ -1,181 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type synchronizedBeforeSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
data []byte
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
node := &synchronizedBeforeSuiteNode{}
|
||||
|
||||
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
defer func() {
|
||||
node.runTime = time.Since(t)
|
||||
}()
|
||||
|
||||
if parallelNode == 1 {
|
||||
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
|
||||
} else {
|
||||
node.outcome, node.failure = node.waitForA(syncHost)
|
||||
}
|
||||
|
||||
if node.outcome != types.SpecStatePassed {
|
||||
return false
|
||||
}
|
||||
node.outcome, node.failure = node.runnerB.run()
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
outcome, failure := node.runnerA.run()
|
||||
|
||||
if parallelTotal > 1 {
|
||||
state := types.RemoteBeforeSuiteStatePassed
|
||||
if outcome != types.SpecStatePassed {
|
||||
state = types.RemoteBeforeSuiteStateFailed
|
||||
}
|
||||
json := (types.RemoteBeforeSuiteData{
|
||||
Data: node.data,
|
||||
State: state,
|
||||
}).ToJSON()
|
||||
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
|
||||
}
|
||||
|
||||
return outcome, failure
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
failure := func(message string) types.SpecFailure {
|
||||
return types.SpecFailure{
|
||||
Message: message,
|
||||
Location: node.runnerA.codeLocation,
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
ComponentIndex: node.runnerA.componentIndex,
|
||||
ComponentCodeLocation: node.runnerA.codeLocation,
|
||||
}
|
||||
}
|
||||
for {
|
||||
resp, err := http.Get(syncHost + "/BeforeSuiteState")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
beforeSuiteData := types.RemoteBeforeSuiteData{}
|
||||
err = json.Unmarshal(body, &beforeSuiteData)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
|
||||
}
|
||||
|
||||
switch beforeSuiteData.State {
|
||||
case types.RemoteBeforeSuiteStatePassed:
|
||||
node.data = beforeSuiteData.Data
|
||||
return types.SpecStatePassed, types.SpecFailure{}
|
||||
case types.RemoteBeforeSuiteStateFailed:
|
||||
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
|
||||
case types.RemoteBeforeSuiteStateDisappeared:
|
||||
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
|
||||
typeA := reflect.TypeOf(bodyA)
|
||||
if typeA.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its first argument")
|
||||
}
|
||||
|
||||
takesNothing := typeA.NumIn() == 0
|
||||
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
|
||||
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
|
||||
|
||||
if !((takesNothing || takesADoneChannel) && returnsBytes) {
|
||||
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
|
||||
}
|
||||
|
||||
if takesADoneChannel {
|
||||
return func(done chan<- interface{}) {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
|
||||
typeB := reflect.TypeOf(bodyB)
|
||||
if typeB.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its second argument")
|
||||
}
|
||||
|
||||
returnsNothing := typeB.NumOut() == 0
|
||||
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
|
||||
takesBytesAndDone := typeB.NumIn() == 2 &&
|
||||
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
|
||||
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
|
||||
|
||||
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
|
||||
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
|
||||
}
|
||||
|
||||
if takesBytesAndDone {
|
||||
return func(done chan<- interface{}) {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
|
||||
}
|
||||
}
|
249
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
249
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
@ -1,249 +0,0 @@
|
||||
/*
|
||||
|
||||
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
|
||||
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
where N is the number of nodes you desire.
|
||||
*/
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type configAndSuite struct {
|
||||
config config.GinkgoConfigType
|
||||
summary *types.SuiteSummary
|
||||
}
|
||||
|
||||
type Aggregator struct {
|
||||
nodeCount int
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
result chan bool
|
||||
|
||||
suiteBeginnings chan configAndSuite
|
||||
aggregatedSuiteBeginnings []configAndSuite
|
||||
|
||||
beforeSuites chan *types.SetupSummary
|
||||
aggregatedBeforeSuites []*types.SetupSummary
|
||||
|
||||
afterSuites chan *types.SetupSummary
|
||||
aggregatedAfterSuites []*types.SetupSummary
|
||||
|
||||
specCompletions chan *types.SpecSummary
|
||||
completedSpecs []*types.SpecSummary
|
||||
|
||||
suiteEndings chan *types.SuiteSummary
|
||||
aggregatedSuiteEndings []*types.SuiteSummary
|
||||
specs []*types.SpecSummary
|
||||
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
|
||||
aggregator := &Aggregator{
|
||||
nodeCount: nodeCount,
|
||||
result: result,
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
|
||||
suiteBeginnings: make(chan configAndSuite),
|
||||
beforeSuites: make(chan *types.SetupSummary),
|
||||
afterSuites: make(chan *types.SetupSummary),
|
||||
specCompletions: make(chan *types.SpecSummary),
|
||||
suiteEndings: make(chan *types.SuiteSummary),
|
||||
}
|
||||
|
||||
go aggregator.mux()
|
||||
|
||||
return aggregator
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
aggregator.suiteBeginnings <- configAndSuite{config, summary}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.beforeSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.afterSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
//noop
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
aggregator.specCompletions <- specSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
aggregator.suiteEndings <- summary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) mux() {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case configAndSuite := <-aggregator.suiteBeginnings:
|
||||
aggregator.registerSuiteBeginning(configAndSuite)
|
||||
case setupSummary := <-aggregator.beforeSuites:
|
||||
aggregator.registerBeforeSuite(setupSummary)
|
||||
case setupSummary := <-aggregator.afterSuites:
|
||||
aggregator.registerAfterSuite(setupSummary)
|
||||
case specSummary := <-aggregator.specCompletions:
|
||||
aggregator.registerSpecCompletion(specSummary)
|
||||
case suite := <-aggregator.suiteEndings:
|
||||
finished, passed := aggregator.registerSuiteEnding(suite)
|
||||
if finished {
|
||||
aggregator.result <- passed
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
|
||||
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
|
||||
aggregator.startTime = time.Now()
|
||||
}
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
||||
|
||||
totalNumberOfSpecs := 0
|
||||
if len(aggregator.aggregatedSuiteBeginnings) > 0 {
|
||||
totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
|
||||
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
|
||||
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
|
||||
aggregator.specs = append(aggregator.specs, specSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) flushCompletedSpecs() {
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
|
||||
aggregator.announceBeforeSuite(setupSummary)
|
||||
}
|
||||
|
||||
for _, specSummary := range aggregator.completedSpecs {
|
||||
aggregator.announceSpec(specSummary)
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedAfterSuites {
|
||||
aggregator.announceAfterSuite(setupSummary)
|
||||
}
|
||||
|
||||
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
|
||||
aggregator.completedSpecs = []*types.SpecSummary{}
|
||||
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
||||
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
|
||||
aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct)
|
||||
} else {
|
||||
aggregator.stenographer.AnnounceSuccessfulSpec(specSummary)
|
||||
}
|
||||
|
||||
case types.SpecStatePending:
|
||||
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
|
||||
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
|
||||
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
|
||||
return false, false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary := &types.SuiteSummary{}
|
||||
aggregatedSuiteSummary.SuiteSucceeded = true
|
||||
|
||||
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
||||
if !suiteSummary.SuiteSucceeded {
|
||||
aggregatedSuiteSummary.SuiteSucceeded = false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
|
||||
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
|
||||
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
|
||||
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
|
||||
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
|
||||
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
|
||||
aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
|
||||
|
||||
aggregator.stenographer.SummarizeFailures(aggregator.specs)
|
||||
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
|
||||
|
||||
return true, aggregatedSuiteSummary.SuiteSucceeded
|
||||
}
|
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
@ -1,147 +0,0 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//An interface to net/http's client to allow the injection of fakes under test
|
||||
type Poster interface {
|
||||
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
|
||||
}
|
||||
|
||||
/*
|
||||
The ForwardingReporter is a Ginkgo reporter that forwards information to
|
||||
a Ginkgo remote server.
|
||||
|
||||
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
|
||||
|
||||
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
|
||||
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
|
||||
in place of Ginkgo's DefaultReporter.
|
||||
*/
|
||||
|
||||
type ForwardingReporter struct {
|
||||
serverHost string
|
||||
poster Poster
|
||||
outputInterceptor OutputInterceptor
|
||||
debugMode bool
|
||||
debugFile *os.File
|
||||
nestedReporter *reporters.DefaultReporter
|
||||
}
|
||||
|
||||
func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
|
||||
reporter := &ForwardingReporter{
|
||||
serverHost: serverHost,
|
||||
poster: poster,
|
||||
outputInterceptor: outputInterceptor,
|
||||
}
|
||||
|
||||
if debugFile != "" {
|
||||
var err error
|
||||
reporter.debugMode = true
|
||||
reporter.debugFile, err = os.Create(debugFile)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !config.Verbose {
|
||||
//if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
|
||||
ginkgoWriter.AndRedirectTo(reporter.debugFile)
|
||||
}
|
||||
outputInterceptor.StreamTo(reporter.debugFile) //This is not working
|
||||
|
||||
stenographer := stenographer.New(false, true, reporter.debugFile)
|
||||
config.Succinct = false
|
||||
config.Verbose = true
|
||||
config.FullTrace = true
|
||||
reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
|
||||
}
|
||||
|
||||
return reporter
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
||||
encoded, _ := json.Marshal(data)
|
||||
buffer := bytes.NewBuffer(encoded)
|
||||
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
data := struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}{
|
||||
conf,
|
||||
summary,
|
||||
}
|
||||
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecSuiteWillBegin", data)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecWillRun(specSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecWillRun", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
specSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecDidComplete(specSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecDidComplete", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/AfterSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecSuiteDidEnd(summary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecSuiteDidEnd", summary)
|
||||
}
|
13
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
package remote
|
||||
|
||||
import "os"
|
||||
|
||||
/*
|
||||
The OutputInterceptor is used by the ForwardingReporter to
|
||||
intercept and capture all stdin and stderr output during a test run.
|
||||
*/
|
||||
type OutputInterceptor interface {
|
||||
StartInterceptingOutput() error
|
||||
StopInterceptingAndReturnOutput() (string, error)
|
||||
StreamTo(*os.File)
|
||||
}
|
82
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
82
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/nxadm/tail"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
redirectFile *os.File
|
||||
streamTarget *os.File
|
||||
intercepting bool
|
||||
tailer *tail.Tail
|
||||
doneTailing chan bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
var err error
|
||||
|
||||
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This might call Dup3 if the dup2 syscall is not available, e.g. on
|
||||
// linux/arm64 or linux/riscv64
|
||||
unix.Dup2(int(interceptor.redirectFile.Fd()), 1)
|
||||
unix.Dup2(int(interceptor.redirectFile.Fd()), 2)
|
||||
|
||||
if interceptor.streamTarget != nil {
|
||||
interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
|
||||
interceptor.doneTailing = make(chan bool)
|
||||
|
||||
go func() {
|
||||
for line := range interceptor.tailer.Lines {
|
||||
interceptor.streamTarget.Write([]byte(line.Text + "\n"))
|
||||
}
|
||||
close(interceptor.doneTailing)
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
if !interceptor.intercepting {
|
||||
return "", errors.New("Not intercepting output!")
|
||||
}
|
||||
|
||||
interceptor.redirectFile.Close()
|
||||
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
||||
os.Remove(interceptor.redirectFile.Name())
|
||||
|
||||
interceptor.intercepting = false
|
||||
|
||||
if interceptor.streamTarget != nil {
|
||||
interceptor.tailer.Stop()
|
||||
interceptor.tailer.Cleanup()
|
||||
<-interceptor.doneTailing
|
||||
interceptor.streamTarget.Sync()
|
||||
}
|
||||
|
||||
return string(output), err
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StreamTo(out *os.File) {
|
||||
interceptor.streamTarget = out
|
||||
}
|
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
intercepting bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
// not working on windows...
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
// not working on windows...
|
||||
interceptor.intercepting = false
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StreamTo(*os.File) {}
|
224
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
224
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
@ -1,224 +0,0 @@
|
||||
/*
|
||||
|
||||
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
/*
|
||||
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||
It then forwards that communication to attached reporters.
|
||||
*/
|
||||
type Server struct {
|
||||
listener net.Listener
|
||||
reporters []reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteData types.RemoteBeforeSuiteData
|
||||
parallelTotal int
|
||||
counter int
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
func NewServer(parallelTotal int) (*Server, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Server{
|
||||
listener: listener,
|
||||
lock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
|
||||
parallelTotal: parallelTotal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *Server) Start() {
|
||||
httpServer := &http.Server{}
|
||||
mux := http.NewServeMux()
|
||||
httpServer.Handler = mux
|
||||
|
||||
//streaming endpoints
|
||||
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
|
||||
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
|
||||
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
|
||||
mux.HandleFunc("/SpecWillRun", server.specWillRun)
|
||||
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
|
||||
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
|
||||
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
||||
mux.HandleFunc("/counter", server.handleCounter)
|
||||
mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
|
||||
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
func (server *Server) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *Server) Address() string {
|
||||
return "http://" + server.listener.Addr().String()
|
||||
}
|
||||
|
||||
//
|
||||
// Streaming Endpoints
|
||||
//
|
||||
|
||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
func (server *Server) readAll(request *http.Request) []byte {
|
||||
defer request.Body.Close()
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
return body
|
||||
}
|
||||
|
||||
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
|
||||
server.reporters = reporters
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
|
||||
var data struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}
|
||||
|
||||
json.Unmarshal(body, &data)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.BeforeSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.AfterSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecDidComplete(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var suiteSummary *types.SuiteSummary
|
||||
json.Unmarshal(body, &suiteSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteDidEnd(suiteSummary)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Synchronization Endpoints
|
||||
//
|
||||
|
||||
func (server *Server) RegisterAlive(node int, alive func() bool) {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
server.alives[node-1] = alive
|
||||
}
|
||||
|
||||
func (server *Server) nodeIsAlive(node int) bool {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
alive := server.alives[node-1]
|
||||
if alive == nil {
|
||||
return true
|
||||
}
|
||||
return alive()
|
||||
}
|
||||
|
||||
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||
if request.Method == "POST" {
|
||||
dec := json.NewDecoder(request.Body)
|
||||
dec.Decode(&(server.beforeSuiteData))
|
||||
} else {
|
||||
beforeSuiteData := server.beforeSuiteData
|
||||
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
|
||||
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
|
||||
}
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(beforeSuiteData)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
|
||||
afterSuiteData := types.RemoteAfterSuiteData{
|
||||
CanRun: true,
|
||||
}
|
||||
for i := 2; i <= server.parallelTotal; i++ {
|
||||
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(afterSuiteData)
|
||||
}
|
||||
|
||||
func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
|
||||
c := spec_iterator.Counter{}
|
||||
server.lock.Lock()
|
||||
c.Index = server.counter
|
||||
server.counter++
|
||||
server.lock.Unlock()
|
||||
|
||||
json.NewEncoder(writer).Encode(c)
|
||||
}
|
||||
|
||||
func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
|
||||
writer.Write([]byte(""))
|
||||
}
|
247
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
247
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
@ -1,247 +0,0 @@
|
||||
package spec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Spec struct {
|
||||
subject leafnodes.SubjectNode
|
||||
focused bool
|
||||
announceProgress bool
|
||||
|
||||
containers []*containernode.ContainerNode
|
||||
|
||||
state types.SpecState
|
||||
runTime time.Duration
|
||||
startTime time.Time
|
||||
failure types.SpecFailure
|
||||
previousFailures bool
|
||||
|
||||
stateMutex *sync.Mutex
|
||||
}
|
||||
|
||||
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
||||
spec := &Spec{
|
||||
subject: subject,
|
||||
containers: containers,
|
||||
focused: subject.Flag() == types.FlagTypeFocused,
|
||||
announceProgress: announceProgress,
|
||||
stateMutex: &sync.Mutex{},
|
||||
}
|
||||
|
||||
spec.processFlag(subject.Flag())
|
||||
for i := len(containers) - 1; i >= 0; i-- {
|
||||
spec.processFlag(containers[i].Flag())
|
||||
}
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
func (spec *Spec) processFlag(flag types.FlagType) {
|
||||
if flag == types.FlagTypeFocused {
|
||||
spec.focused = true
|
||||
} else if flag == types.FlagTypePending {
|
||||
spec.setState(types.SpecStatePending)
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) Skip() {
|
||||
spec.setState(types.SpecStateSkipped)
|
||||
}
|
||||
|
||||
func (spec *Spec) Failed() bool {
|
||||
return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (spec *Spec) Passed() bool {
|
||||
return spec.getState() == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (spec *Spec) Flaked() bool {
|
||||
return spec.getState() == types.SpecStatePassed && spec.previousFailures
|
||||
}
|
||||
|
||||
func (spec *Spec) Pending() bool {
|
||||
return spec.getState() == types.SpecStatePending
|
||||
}
|
||||
|
||||
func (spec *Spec) Skipped() bool {
|
||||
return spec.getState() == types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Focused() bool {
|
||||
return spec.focused
|
||||
}
|
||||
|
||||
func (spec *Spec) IsMeasurement() bool {
|
||||
return spec.subject.Type() == types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
||||
componentTexts := make([]string, len(spec.containers)+1)
|
||||
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
|
||||
|
||||
for i, container := range spec.containers {
|
||||
componentTexts[i] = container.Text()
|
||||
componentCodeLocations[i] = container.CodeLocation()
|
||||
}
|
||||
|
||||
componentTexts[len(spec.containers)] = spec.subject.Text()
|
||||
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
||||
|
||||
runTime := spec.runTime
|
||||
if runTime == 0 && !spec.startTime.IsZero() {
|
||||
runTime = time.Since(spec.startTime)
|
||||
}
|
||||
|
||||
return &types.SpecSummary{
|
||||
IsMeasurement: spec.IsMeasurement(),
|
||||
NumberOfSamples: spec.subject.Samples(),
|
||||
ComponentTexts: componentTexts,
|
||||
ComponentCodeLocations: componentCodeLocations,
|
||||
State: spec.getState(),
|
||||
RunTime: runTime,
|
||||
Failure: spec.failure,
|
||||
Measurements: spec.measurementsReport(),
|
||||
SuiteID: suiteID,
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) ConcatenatedString() string {
|
||||
s := ""
|
||||
for _, container := range spec.containers {
|
||||
s += container.Text() + " "
|
||||
}
|
||||
|
||||
return s + spec.subject.Text()
|
||||
}
|
||||
|
||||
func (spec *Spec) Run(writer io.Writer) {
|
||||
if spec.getState() == types.SpecStateFailed {
|
||||
spec.previousFailures = true
|
||||
}
|
||||
|
||||
spec.startTime = time.Now()
|
||||
defer func() {
|
||||
spec.runTime = time.Since(spec.startTime)
|
||||
}()
|
||||
|
||||
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||
spec.runSample(sample, writer)
|
||||
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) getState() types.SpecState {
|
||||
spec.stateMutex.Lock()
|
||||
defer spec.stateMutex.Unlock()
|
||||
return spec.state
|
||||
}
|
||||
|
||||
func (spec *Spec) setState(state types.SpecState) {
|
||||
spec.stateMutex.Lock()
|
||||
defer spec.stateMutex.Unlock()
|
||||
spec.state = state
|
||||
}
|
||||
|
||||
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
spec.setState(types.SpecStatePassed)
|
||||
spec.failure = types.SpecFailure{}
|
||||
innerMostContainerIndexToUnwind := -1
|
||||
|
||||
defer func() {
|
||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||
container := spec.containers[i]
|
||||
for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
|
||||
spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
|
||||
justAfterEachState, justAfterEachFailure := justAfterEach.Run()
|
||||
if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||
spec.state = justAfterEachState
|
||||
spec.failure = justAfterEachFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||
container := spec.containers[i]
|
||||
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||
afterEachState, afterEachFailure := afterEach.Run()
|
||||
if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
|
||||
spec.setState(afterEachState)
|
||||
spec.failure = afterEachFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i, container := range spec.containers {
|
||||
innerMostContainerIndexToUnwind = i
|
||||
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||
s, f := beforeEach.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range spec.containers {
|
||||
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||
s, f := justBeforeEach.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spec.announceSubject(writer, spec.subject)
|
||||
s, f := spec.subject.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||
if spec.announceProgress {
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
|
||||
if spec.announceProgress {
|
||||
nodeType := ""
|
||||
switch subject.Type() {
|
||||
case types.SpecComponentTypeIt:
|
||||
nodeType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
nodeType = "Measure"
|
||||
}
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
if !spec.IsMeasurement() || spec.Failed() {
|
||||
return map[string]*types.SpecMeasurement{}
|
||||
}
|
||||
|
||||
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
|
||||
}
|
144
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
144
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
@ -1,144 +0,0 @@
|
||||
package spec
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Specs struct {
|
||||
specs []*Spec
|
||||
names []string
|
||||
|
||||
hasProgrammaticFocus bool
|
||||
RegexScansFilePath bool
|
||||
}
|
||||
|
||||
func NewSpecs(specs []*Spec) *Specs {
|
||||
names := make([]string, len(specs))
|
||||
for i, spec := range specs {
|
||||
names[i] = spec.ConcatenatedString()
|
||||
}
|
||||
return &Specs{
|
||||
specs: specs,
|
||||
names: names,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) Specs() []*Spec {
|
||||
return e.specs
|
||||
}
|
||||
|
||||
func (e *Specs) HasProgrammaticFocus() bool {
|
||||
return e.hasProgrammaticFocus
|
||||
}
|
||||
|
||||
func (e *Specs) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(e)
|
||||
permutation := r.Perm(len(e.specs))
|
||||
shuffledSpecs := make([]*Spec, len(e.specs))
|
||||
names := make([]string, len(e.specs))
|
||||
for i, j := range permutation {
|
||||
shuffledSpecs[i] = e.specs[j]
|
||||
names[i] = e.names[j]
|
||||
}
|
||||
e.specs = shuffledSpecs
|
||||
e.names = names
|
||||
}
|
||||
|
||||
func (e *Specs) ApplyFocus(description string, focus, skip []string) {
|
||||
if len(focus)+len(skip) == 0 {
|
||||
e.applyProgrammaticFocus()
|
||||
} else {
|
||||
e.applyRegExpFocusAndSkip(description, focus, skip)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyProgrammaticFocus() {
|
||||
e.hasProgrammaticFocus = false
|
||||
for _, spec := range e.specs {
|
||||
if spec.Focused() && !spec.Pending() {
|
||||
e.hasProgrammaticFocus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if e.hasProgrammaticFocus {
|
||||
for _, spec := range e.specs {
|
||||
if !spec.Focused() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
|
||||
// this is the place which we append to.
|
||||
func (e *Specs) toMatch(description string, i int) []byte {
|
||||
if i > len(e.names) {
|
||||
return nil
|
||||
}
|
||||
if e.RegexScansFilePath {
|
||||
return []byte(
|
||||
description + " " +
|
||||
e.names[i] + " " +
|
||||
e.specs[i].subject.CodeLocation().FileName)
|
||||
} else {
|
||||
return []byte(
|
||||
description + " " +
|
||||
e.names[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string) {
|
||||
var focusFilter, skipFilter *regexp.Regexp
|
||||
if len(focus) > 0 {
|
||||
focusFilter = regexp.MustCompile(strings.Join(focus, "|"))
|
||||
}
|
||||
if len(skip) > 0 {
|
||||
skipFilter = regexp.MustCompile(strings.Join(skip, "|"))
|
||||
}
|
||||
|
||||
for i, spec := range e.specs {
|
||||
matchesFocus := true
|
||||
matchesSkip := false
|
||||
|
||||
toMatch := e.toMatch(description, i)
|
||||
|
||||
if focusFilter != nil {
|
||||
matchesFocus = focusFilter.Match(toMatch)
|
||||
}
|
||||
|
||||
if skipFilter != nil {
|
||||
matchesSkip = skipFilter.Match(toMatch)
|
||||
}
|
||||
|
||||
if !matchesFocus || matchesSkip {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) SkipMeasurements() {
|
||||
for _, spec := range e.specs {
|
||||
if spec.IsMeasurement() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (e *Specs) Len() int {
|
||||
return len(e.specs)
|
||||
}
|
||||
|
||||
func (e *Specs) Less(i, j int) bool {
|
||||
return e.names[i] < e.names[j]
|
||||
}
|
||||
|
||||
func (e *Specs) Swap(i, j int) {
|
||||
e.names[i], e.names[j] = e.names[j], e.names[i]
|
||||
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
|
||||
}
|
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
package spec_iterator
|
||||
|
||||
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||
if length == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// We have more nodes than tests. Trivial case.
|
||||
if parallelTotal >= length {
|
||||
if parallelNode > length {
|
||||
return 0, 0
|
||||
} else {
|
||||
return parallelNode - 1, 1
|
||||
}
|
||||
}
|
||||
|
||||
// This is the minimum amount of tests that a node will be required to run
|
||||
minTestsPerNode := length / parallelTotal
|
||||
|
||||
// This is the maximum amount of tests that a node will be required to run
|
||||
// The algorithm guarantees that this would be equal to at least the minimum amount
|
||||
// and at most one more
|
||||
maxTestsPerNode := minTestsPerNode
|
||||
if length%parallelTotal != 0 {
|
||||
maxTestsPerNode++
|
||||
}
|
||||
|
||||
// Number of nodes that will have to run the maximum amount of tests per node
|
||||
numMaxLoadNodes := length % parallelTotal
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
||||
var numPrecedingMaxLoadNodes int
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
||||
} else {
|
||||
numPrecedingMaxLoadNodes = parallelNode - 1
|
||||
}
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
||||
var numPrecedingMinLoadNodes int
|
||||
if parallelNode <= numMaxLoadNodes {
|
||||
numPrecedingMinLoadNodes = 0
|
||||
} else {
|
||||
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
||||
}
|
||||
|
||||
// Evaluate the test start index and number of tests to run
|
||||
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
count = minTestsPerNode
|
||||
} else {
|
||||
count = maxTestsPerNode
|
||||
}
|
||||
return
|
||||
}
|
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
package spec_iterator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type ParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
host string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
|
||||
return &ParallelIterator{
|
||||
specs: specs,
|
||||
host: host,
|
||||
client: &http.Client{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) Next() (*spec.Spec, error) {
|
||||
resp, err := s.client.Get(s.host + "/counter")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var counter Counter
|
||||
err = json.NewDecoder(resp.Body).Decode(&counter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if counter.Index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
return s.specs[counter.Index], nil
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
package spec_iterator
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type SerialIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
}
|
||||
|
||||
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
|
||||
return &SerialIterator{
|
||||
specs: specs,
|
||||
index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SerialIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return len(s.specs), true
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for _, s := range s.specs {
|
||||
if !s.Skipped() && !s.Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package spec_iterator
|
||||
|
||||
import "github.com/onsi/ginkgo/internal/spec"
|
||||
|
||||
type ShardedParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
maxIndex int
|
||||
}
|
||||
|
||||
func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
|
||||
startIndex, count := ParallelizedIndexRange(len(specs), total, node)
|
||||
|
||||
return &ShardedParallelIterator{
|
||||
specs: specs,
|
||||
index: startIndex,
|
||||
maxIndex: startIndex + count,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= s.maxIndex {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return s.maxIndex - s.index, true
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for i := s.index; i < s.maxIndex; i += 1 {
|
||||
if !s.specs[i].Skipped() && !s.specs[i].Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
package spec_iterator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
var ErrClosed = errors.New("no more specs to run")
|
||||
|
||||
type SpecIterator interface {
|
||||
Next() (*spec.Spec, error)
|
||||
NumberOfSpecsPriorToIteration() int
|
||||
NumberOfSpecsToProcessIfKnown() (int, bool)
|
||||
NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
|
||||
}
|
||||
|
||||
type Counter struct {
|
||||
Index int `json:"index"`
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package specrunner
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func randomID() string {
|
||||
b := make([]byte, 8)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
|
||||
}
|
411
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
411
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
@ -1,411 +0,0 @@
|
||||
package specrunner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
Writer "github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
|
||||
"time"
|
||||
)
|
||||
|
||||
type SpecRunner struct {
|
||||
description string
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
iterator spec_iterator.SpecIterator
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
reporters []reporters.Reporter
|
||||
startTime time.Time
|
||||
suiteID string
|
||||
runningSpec *spec.Spec
|
||||
writer Writer.WriterInterface
|
||||
config config.GinkgoConfigType
|
||||
interrupted bool
|
||||
processedSpecs []*spec.Spec
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
||||
return &SpecRunner{
|
||||
description: description,
|
||||
beforeSuiteNode: beforeSuiteNode,
|
||||
iterator: iterator,
|
||||
afterSuiteNode: afterSuiteNode,
|
||||
reporters: reporters,
|
||||
writer: writer,
|
||||
config: config,
|
||||
suiteID: randomID(),
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) Run() bool {
|
||||
if runner.config.DryRun {
|
||||
runner.performDryRun()
|
||||
return true
|
||||
}
|
||||
|
||||
runner.reportSuiteWillBegin()
|
||||
signalRegistered := make(chan struct{})
|
||||
go runner.registerForInterrupts(signalRegistered)
|
||||
<-signalRegistered
|
||||
|
||||
suitePassed := runner.runBeforeSuite()
|
||||
|
||||
if suitePassed {
|
||||
suitePassed = runner.runSpecs()
|
||||
}
|
||||
|
||||
runner.blockForeverIfInterrupted()
|
||||
|
||||
suitePassed = runner.runAfterSuite() && suitePassed
|
||||
|
||||
runner.reportSuiteDidEnd(suitePassed)
|
||||
|
||||
return suitePassed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) performDryRun() {
|
||||
runner.reportSuiteWillBegin()
|
||||
|
||||
if runner.beforeSuiteNode != nil {
|
||||
summary := runner.beforeSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportBeforeSuite(summary)
|
||||
}
|
||||
|
||||
for {
|
||||
spec, err := runner.iterator.Next()
|
||||
if err == spec_iterator.ErrClosed {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
||||
break
|
||||
}
|
||||
|
||||
runner.processedSpecs = append(runner.processedSpecs, spec)
|
||||
|
||||
summary := spec.Summary(runner.suiteID)
|
||||
runner.reportSpecWillRun(summary)
|
||||
if summary.State == types.SpecStateInvalid {
|
||||
summary.State = types.SpecStatePassed
|
||||
}
|
||||
runner.reportSpecDidComplete(summary, false)
|
||||
}
|
||||
|
||||
if runner.afterSuiteNode != nil {
|
||||
summary := runner.afterSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportAfterSuite(summary)
|
||||
}
|
||||
|
||||
runner.reportSuiteDidEnd(true)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runBeforeSuite() bool {
|
||||
if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runAfterSuite() bool {
|
||||
if runner.afterSuiteNode == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportAfterSuite(runner.afterSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runSpecs() bool {
|
||||
suiteFailed := false
|
||||
skipRemainingSpecs := false
|
||||
for {
|
||||
spec, err := runner.iterator.Next()
|
||||
if err == spec_iterator.ErrClosed {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
||||
suiteFailed = true
|
||||
break
|
||||
}
|
||||
|
||||
runner.processedSpecs = append(runner.processedSpecs, spec)
|
||||
|
||||
if runner.wasInterrupted() {
|
||||
break
|
||||
}
|
||||
if skipRemainingSpecs {
|
||||
spec.Skip()
|
||||
}
|
||||
|
||||
if !spec.Skipped() && !spec.Pending() {
|
||||
if passed := runner.runSpec(spec); !passed {
|
||||
suiteFailed = true
|
||||
}
|
||||
} else if spec.Pending() && runner.config.FailOnPending {
|
||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||
suiteFailed = true
|
||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||
} else {
|
||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||
}
|
||||
|
||||
if spec.Failed() && runner.config.FailFast {
|
||||
skipRemainingSpecs = true
|
||||
}
|
||||
}
|
||||
|
||||
return !suiteFailed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) {
|
||||
maxAttempts := 1
|
||||
if runner.config.FlakeAttempts > 0 {
|
||||
// uninitialized configs count as 1
|
||||
maxAttempts = runner.config.FlakeAttempts
|
||||
}
|
||||
|
||||
for i := 0; i < maxAttempts; i++ {
|
||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||
runner.runningSpec = spec
|
||||
spec.Run(runner.writer)
|
||||
runner.runningSpec = nil
|
||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||
if !spec.Failed() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
|
||||
if runner.runningSpec == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return runner.runningSpec.Summary(runner.suiteID), true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
close(signalRegistered)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
runner.markInterrupted()
|
||||
go runner.registerForHardInterrupts()
|
||||
runner.writer.DumpOutWithHeader(`
|
||||
Received interrupt. Emitting contents of GinkgoWriter...
|
||||
---------------------------------------------------------
|
||||
`)
|
||||
if runner.afterSuiteNode != nil {
|
||||
fmt.Fprint(os.Stderr, `
|
||||
---------------------------------------------------------
|
||||
Received interrupt. Running AfterSuite...
|
||||
^C again to terminate immediately
|
||||
`)
|
||||
runner.runAfterSuite()
|
||||
}
|
||||
runner.reportSuiteDidEnd(false)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForHardInterrupts() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) blockForeverIfInterrupted() {
|
||||
runner.lock.Lock()
|
||||
interrupted := runner.interrupted
|
||||
runner.lock.Unlock()
|
||||
|
||||
if interrupted {
|
||||
select {}
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) markInterrupted() {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
runner.interrupted = true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) wasInterrupted() bool {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
return runner.interrupted
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteWillBegin() {
|
||||
runner.startTime = time.Now()
|
||||
summary := runner.suiteWillBeginSummary()
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteWillBegin(runner.config, summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.BeforeSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.AfterSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
|
||||
runner.writer.Truncate()
|
||||
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecWillRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
||||
if len(summary.CapturedOutput) == 0 {
|
||||
summary.CapturedOutput = string(runner.writer.Bytes())
|
||||
}
|
||||
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
||||
runner.reporters[i].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
if failed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
|
||||
runner.reporters[0].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
|
||||
summary := runner.suiteDidEndSummary(success)
|
||||
summary.RunTime = time.Since(runner.startTime)
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteDidEnd(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
||||
count = 0
|
||||
|
||||
for _, spec := range runner.processedSpecs {
|
||||
if filter(spec) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
|
||||
numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return !ex.Skipped() && !ex.Pending()
|
||||
})
|
||||
|
||||
numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Pending()
|
||||
})
|
||||
|
||||
numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Skipped()
|
||||
})
|
||||
|
||||
numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Passed()
|
||||
})
|
||||
|
||||
numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Flaked()
|
||||
})
|
||||
|
||||
numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Failed()
|
||||
})
|
||||
|
||||
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
|
||||
var known bool
|
||||
numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||
if !known {
|
||||
numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
|
||||
}
|
||||
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
|
||||
}
|
||||
|
||||
return &types.SuiteSummary{
|
||||
SuiteDescription: runner.description,
|
||||
SuiteSucceeded: success,
|
||||
SuiteID: runner.suiteID,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
||||
NumberOfTotalSpecs: len(runner.processedSpecs),
|
||||
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
|
||||
NumberOfPendingSpecs: numberOfPendingSpecs,
|
||||
NumberOfSkippedSpecs: numberOfSkippedSpecs,
|
||||
NumberOfPassedSpecs: numberOfPassedSpecs,
|
||||
NumberOfFailedSpecs: numberOfFailedSpecs,
|
||||
NumberOfFlakedSpecs: numberOfFlakedSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
|
||||
numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
|
||||
if !known {
|
||||
numTotal = -1
|
||||
}
|
||||
|
||||
numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||
if !known {
|
||||
numToRun = -1
|
||||
}
|
||||
|
||||
return &types.SuiteSummary{
|
||||
SuiteDescription: runner.description,
|
||||
SuiteID: runner.suiteID,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
||||
NumberOfTotalSpecs: numTotal,
|
||||
NumberOfSpecsThatWillBeRun: numToRun,
|
||||
NumberOfPendingSpecs: -1,
|
||||
NumberOfSkippedSpecs: -1,
|
||||
NumberOfPassedSpecs: -1,
|
||||
NumberOfFailedSpecs: -1,
|
||||
NumberOfFlakedSpecs: -1,
|
||||
}
|
||||
}
|
227
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
227
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
@ -1,227 +0,0 @@
|
||||
package suite
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
"github.com/onsi/ginkgo/internal/specrunner"
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type ginkgoTestingT interface {
|
||||
Fail()
|
||||
}
|
||||
|
||||
type deferredContainerNode struct {
|
||||
text string
|
||||
body func()
|
||||
flag types.FlagType
|
||||
codeLocation types.CodeLocation
|
||||
}
|
||||
|
||||
type Suite struct {
|
||||
topLevelContainer *containernode.ContainerNode
|
||||
currentContainer *containernode.ContainerNode
|
||||
|
||||
deferredContainerNodes []deferredContainerNode
|
||||
|
||||
containerIndex int
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
runner *specrunner.SpecRunner
|
||||
failer *failer.Failer
|
||||
running bool
|
||||
expandTopLevelNodes bool
|
||||
}
|
||||
|
||||
func New(failer *failer.Failer) *Suite {
|
||||
topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
|
||||
|
||||
return &Suite{
|
||||
topLevelContainer: topLevelContainer,
|
||||
currentContainer: topLevelContainer,
|
||||
failer: failer,
|
||||
containerIndex: 1,
|
||||
deferredContainerNodes: []deferredContainerNode{},
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
|
||||
if config.ParallelTotal < 1 {
|
||||
panic("ginkgo.parallel.total must be >= 1")
|
||||
}
|
||||
|
||||
if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
|
||||
panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
|
||||
}
|
||||
|
||||
suite.expandTopLevelNodes = true
|
||||
for _, deferredNode := range suite.deferredContainerNodes {
|
||||
suite.PushContainerNode(deferredNode.text, deferredNode.body, deferredNode.flag, deferredNode.codeLocation)
|
||||
}
|
||||
|
||||
r := rand.New(rand.NewSource(config.RandomSeed))
|
||||
suite.topLevelContainer.Shuffle(r)
|
||||
iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
|
||||
suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
|
||||
|
||||
suite.running = true
|
||||
success := suite.runner.Run()
|
||||
if !success {
|
||||
t.Fail()
|
||||
}
|
||||
return success, hasProgrammaticFocus
|
||||
}
|
||||
|
||||
func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
|
||||
specsSlice := []*spec.Spec{}
|
||||
suite.topLevelContainer.BackPropagateProgrammaticFocus()
|
||||
for _, collatedNodes := range suite.topLevelContainer.Collate() {
|
||||
specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
|
||||
}
|
||||
|
||||
specs := spec.NewSpecs(specsSlice)
|
||||
specs.RegexScansFilePath = config.RegexScansFilePath
|
||||
|
||||
if config.RandomizeAllSpecs {
|
||||
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
|
||||
}
|
||||
|
||||
specs.ApplyFocus(description, config.FocusStrings, config.SkipStrings)
|
||||
|
||||
if config.SkipMeasurements {
|
||||
specs.SkipMeasurements()
|
||||
}
|
||||
|
||||
var iterator spec_iterator.SpecIterator
|
||||
|
||||
if config.ParallelTotal > 1 {
|
||||
iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
|
||||
resp, err := http.Get(config.SyncHost + "/has-counter")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
|
||||
}
|
||||
} else {
|
||||
iterator = spec_iterator.NewSerialIterator(specs.Specs())
|
||||
}
|
||||
|
||||
return iterator, specs.HasProgrammaticFocus()
|
||||
}
|
||||
|
||||
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
|
||||
if !suite.running {
|
||||
return nil, false
|
||||
}
|
||||
return suite.runner.CurrentSpecSummary()
|
||||
}
|
||||
|
||||
func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
|
||||
/*
|
||||
We defer walking the container nodes (which immediately evaluates the `body` function)
|
||||
until `RunSpecs` is called. We do this by storing off the deferred container nodes. Then, when
|
||||
`RunSpecs` is called we actually go through and add the container nodes to the test structure.
|
||||
|
||||
This allows us to defer calling all the `body` functions until _after_ the top level functions
|
||||
have been walked, _after_ func init()s have been called, and _after_ `go test` has called `flag.Parse()`.
|
||||
|
||||
This allows users to load up configuration information in the `TestX` go test hook just before `RunSpecs`
|
||||
is invoked and solves issues like #693 and makes the lifecycle easier to reason about.
|
||||
|
||||
*/
|
||||
if !suite.expandTopLevelNodes {
|
||||
suite.deferredContainerNodes = append(suite.deferredContainerNodes, deferredContainerNode{text, body, flag, codeLocation})
|
||||
return
|
||||
}
|
||||
|
||||
container := containernode.New(text, flag, codeLocation)
|
||||
suite.currentContainer.PushContainerNode(container)
|
||||
|
||||
previousContainer := suite.currentContainer
|
||||
suite.currentContainer = container
|
||||
suite.containerIndex++
|
||||
|
||||
body()
|
||||
|
||||
suite.containerIndex--
|
||||
suite.currentContainer = previousContainer
|
||||
}
|
||||
|
||||
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
36
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
36
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
package writer
|
||||
|
||||
type FakeGinkgoWriter struct {
|
||||
EventStream []string
|
||||
}
|
||||
|
||||
func NewFake() *FakeGinkgoWriter {
|
||||
return &FakeGinkgoWriter{
|
||||
EventStream: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) AddEvent(event string) {
|
||||
writer.EventStream = append(writer.EventStream, event)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Truncate() {
|
||||
writer.EventStream = append(writer.EventStream, "TRUNCATE")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOut() {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Bytes() []byte {
|
||||
writer.EventStream = append(writer.EventStream, "BYTES")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
89
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
89
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
@ -1,89 +0,0 @@
|
||||
package writer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WriterInterface interface {
|
||||
io.Writer
|
||||
|
||||
Truncate()
|
||||
DumpOut()
|
||||
DumpOutWithHeader(header string)
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
stream bool
|
||||
redirector io.Writer
|
||||
}
|
||||
|
||||
func New(outWriter io.Writer) *Writer {
|
||||
return &Writer{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
stream: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) AndRedirectTo(writer io.Writer) {
|
||||
w.redirector = writer
|
||||
}
|
||||
|
||||
func (w *Writer) SetStream(stream bool) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.stream = stream
|
||||
}
|
||||
|
||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
n, err = w.buffer.Write(b)
|
||||
if w.redirector != nil {
|
||||
w.redirector.Write(b)
|
||||
}
|
||||
if w.stream {
|
||||
return w.outWriter.Write(b)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *Writer) Truncate() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.buffer.Reset()
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOut() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream {
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) Bytes() []byte {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
b := w.buffer.Bytes()
|
||||
copied := make([]byte, len(b))
|
||||
copy(copied, b)
|
||||
return copied
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOutWithHeader(header string) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream && w.buffer.Len() > 0 {
|
||||
w.outWriter.Write([]byte(header))
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
87
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
87
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
@ -1,87 +0,0 @@
|
||||
/*
|
||||
Ginkgo's Default Reporter
|
||||
|
||||
A number of command line flags are available to tweak Ginkgo's default output.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
||||
*/
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type DefaultReporter struct {
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
specSummaries []*types.SpecSummary
|
||||
}
|
||||
|
||||
func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
|
||||
return &DefaultReporter{
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
|
||||
if config.ParallelTotal > 1 {
|
||||
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
|
||||
} else {
|
||||
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
reporter.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
|
||||
reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct)
|
||||
} else {
|
||||
reporter.stenographer.AnnounceSuccessfulSpec(specSummary)
|
||||
if reporter.config.ReportPassed {
|
||||
reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||
}
|
||||
}
|
||||
case types.SpecStatePending:
|
||||
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
|
||||
reporter.specSummaries = append(reporter.specSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.stenographer.SummarizeFailures(reporter.specSummaries)
|
||||
reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
|
||||
}
|
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//FakeReporter is useful for testing purposes
|
||||
type FakeReporter struct {
|
||||
Config config.GinkgoConfigType
|
||||
|
||||
BeginSummary *types.SuiteSummary
|
||||
BeforeSuiteSummary *types.SetupSummary
|
||||
SpecWillRunSummaries []*types.SpecSummary
|
||||
SpecSummaries []*types.SpecSummary
|
||||
AfterSuiteSummary *types.SetupSummary
|
||||
EndSummary *types.SuiteSummary
|
||||
|
||||
SpecWillRunStub func(specSummary *types.SpecSummary)
|
||||
SpecDidCompleteStub func(specSummary *types.SpecSummary)
|
||||
}
|
||||
|
||||
func NewFakeReporter() *FakeReporter {
|
||||
return &FakeReporter{
|
||||
SpecWillRunSummaries: make([]*types.SpecSummary, 0),
|
||||
SpecSummaries: make([]*types.SpecSummary, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
fakeR.Config = config
|
||||
fakeR.BeginSummary = summary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.BeforeSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecWillRunStub != nil {
|
||||
fakeR.SpecWillRunStub(specSummary)
|
||||
}
|
||||
fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecDidCompleteStub != nil {
|
||||
fakeR.SpecDidCompleteStub(specSummary)
|
||||
}
|
||||
fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.AfterSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
fakeR.EndSummary = summary
|
||||
}
|
178
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
178
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
@ -1,178 +0,0 @@
|
||||
/*
|
||||
|
||||
JUnit XML Reporter for Ginkgo
|
||||
|
||||
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
||||
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type JUnitTestSuite struct {
|
||||
XMLName xml.Name `xml:"testsuite"`
|
||||
TestCases []JUnitTestCase `xml:"testcase"`
|
||||
Name string `xml:"name,attr"`
|
||||
Tests int `xml:"tests,attr"`
|
||||
Failures int `xml:"failures,attr"`
|
||||
Errors int `xml:"errors,attr"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
}
|
||||
|
||||
type JUnitTestCase struct {
|
||||
Name string `xml:"name,attr"`
|
||||
ClassName string `xml:"classname,attr"`
|
||||
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
SystemOut string `xml:"system-out,omitempty"`
|
||||
}
|
||||
|
||||
type JUnitFailureMessage struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Message string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type JUnitSkipped struct {
|
||||
Message string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type JUnitReporter struct {
|
||||
suite JUnitTestSuite
|
||||
filename string
|
||||
testSuiteName string
|
||||
ReporterConfig config.DefaultReporterConfigType
|
||||
}
|
||||
|
||||
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
||||
func NewJUnitReporter(filename string) *JUnitReporter {
|
||||
return &JUnitReporter{
|
||||
filename: filename,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.suite = JUnitTestSuite{
|
||||
Name: summary.SuiteDescription,
|
||||
TestCases: []JUnitTestCase{},
|
||||
}
|
||||
reporter.testSuiteName = summary.SuiteDescription
|
||||
reporter.ReporterConfig = config.DefaultReporterConfig
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||
}
|
||||
|
||||
func failureMessage(failure types.SpecFailure) string {
|
||||
return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String())
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
testCase := JUnitTestCase{
|
||||
Name: name,
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(setupSummary.State),
|
||||
Message: failureMessage(setupSummary.Failure),
|
||||
}
|
||||
testCase.SystemOut = setupSummary.CapturedOutput
|
||||
testCase.Time = setupSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
testCase := JUnitTestCase{
|
||||
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
|
||||
testCase.SystemOut = specSummary.CapturedOutput
|
||||
}
|
||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(specSummary.State),
|
||||
Message: failureMessage(specSummary.Failure),
|
||||
}
|
||||
if specSummary.State == types.SpecStatePanicked {
|
||||
testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s",
|
||||
specSummary.Failure.ForwardedPanic,
|
||||
specSummary.Failure.Location.FullStackTrace)
|
||||
}
|
||||
testCase.SystemOut = specSummary.CapturedOutput
|
||||
}
|
||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||
testCase.Skipped = &JUnitSkipped{}
|
||||
if specSummary.Failure.Message != "" {
|
||||
testCase.Skipped.Message = failureMessage(specSummary.Failure)
|
||||
}
|
||||
}
|
||||
testCase.Time = specSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
|
||||
reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000
|
||||
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
||||
reporter.suite.Errors = 0
|
||||
if reporter.ReporterConfig.ReportFile != "" {
|
||||
reporter.filename = reporter.ReporterConfig.ReportFile
|
||||
fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename)
|
||||
}
|
||||
filePath, _ := filepath.Abs(reporter.filename)
|
||||
dirPath := filepath.Dir(filePath)
|
||||
err := os.MkdirAll(dirPath, os.ModePerm)
|
||||
if err != nil {
|
||||
fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error())
|
||||
}
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error())
|
||||
}
|
||||
defer file.Close()
|
||||
file.WriteString(xml.Header)
|
||||
encoder := xml.NewEncoder(file)
|
||||
encoder.Indent(" ", " ")
|
||||
err = encoder.Encode(reporter.suite)
|
||||
if err == nil {
|
||||
fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
|
||||
switch state {
|
||||
case types.SpecStateFailed:
|
||||
return "Failure"
|
||||
case types.SpecStateTimedOut:
|
||||
return "Timeout"
|
||||
case types.SpecStatePanicked:
|
||||
return "Panic"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Reporter interface {
|
||||
SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
||||
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecWillRun(specSummary *types.SpecSummary)
|
||||
SpecDidComplete(specSummary *types.SpecSummary)
|
||||
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecSuiteDidEnd(summary *types.SuiteSummary)
|
||||
}
|
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
package stenographer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
|
||||
var out string
|
||||
|
||||
if len(args) > 0 {
|
||||
out = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
out = format
|
||||
}
|
||||
|
||||
if s.color {
|
||||
return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
|
||||
} else {
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
|
||||
fmt.Fprintln(s.w, text)
|
||||
fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printNewLine() {
|
||||
fmt.Fprintln(s.w, "")
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printDelimiter() {
|
||||
fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
|
||||
fmt.Fprint(s.w, s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
|
||||
fmt.Fprintln(s.w, s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
|
||||
var text string
|
||||
|
||||
if len(args) > 0 {
|
||||
text = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
text = format
|
||||
}
|
||||
|
||||
stringArray := strings.Split(text, "\n")
|
||||
padding := ""
|
||||
if indentation >= 0 {
|
||||
padding = strings.Repeat(" ", indentation)
|
||||
}
|
||||
for i, s := range stringArray {
|
||||
stringArray[i] = fmt.Sprintf("%s%s", padding, s)
|
||||
}
|
||||
|
||||
return strings.Join(stringArray, "\n")
|
||||
}
|
142
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
142
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
@ -1,142 +0,0 @@
|
||||
package stenographer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
|
||||
return FakeStenographerCall{
|
||||
Method: method,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
type FakeStenographer struct {
|
||||
calls []FakeStenographerCall
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
type FakeStenographerCall struct {
|
||||
Method string
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
func NewFakeStenographer() *FakeStenographer {
|
||||
stenographer := &FakeStenographer{
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
stenographer.Reset()
|
||||
return stenographer
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
return stenographer.calls
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) Reset() {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
stenographer.calls = make([]FakeStenographerCall, 0)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
results := make([]FakeStenographerCall, 0)
|
||||
for _, call := range stenographer.calls {
|
||||
if call.Method == method {
|
||||
results = append(results, call)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||
stenographer.registerCall("AnnounceSpecWillRun", spec)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
|
||||
}
|
||||
func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
|
||||
stenographer.registerCall("AnnounceCapturedOutput", output)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
|
||||
stenographer.registerCall("AnnounceSuccessfulSpec", spec)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||
stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||
stenographer.registerCall("SummarizeFailures", summaries)
|
||||
}
|
572
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
572
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
@ -1,572 +0,0 @@
|
||||
/*
|
||||
The stenographer is used by Ginkgo's reporters to generate output.
|
||||
|
||||
Move along, nothing to see here.
|
||||
*/
|
||||
|
||||
package stenographer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const defaultStyle = "\x1b[0m"
|
||||
const boldStyle = "\x1b[1m"
|
||||
const redColor = "\x1b[91m"
|
||||
const greenColor = "\x1b[32m"
|
||||
const yellowColor = "\x1b[33m"
|
||||
const cyanColor = "\x1b[36m"
|
||||
const grayColor = "\x1b[90m"
|
||||
const lightGrayColor = "\x1b[37m"
|
||||
|
||||
type cursorStateType int
|
||||
|
||||
const (
|
||||
cursorStateTop cursorStateType = iota
|
||||
cursorStateStreaming
|
||||
cursorStateMidBlock
|
||||
cursorStateEndBlock
|
||||
)
|
||||
|
||||
type Stenographer interface {
|
||||
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
|
||||
AnnounceAggregatedParallelRun(nodes int, succinct bool)
|
||||
AnnounceParallelRun(node int, nodes int, succinct bool)
|
||||
AnnounceTotalNumberOfSpecs(total int, succinct bool)
|
||||
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
|
||||
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
|
||||
|
||||
AnnounceSpecWillRun(spec *types.SpecSummary)
|
||||
AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||
AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||
|
||||
AnnounceCapturedOutput(output string)
|
||||
|
||||
AnnounceSuccessfulSpec(spec *types.SpecSummary)
|
||||
AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool)
|
||||
AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool)
|
||||
|
||||
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
|
||||
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
|
||||
AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
|
||||
SummarizeFailures(summaries []*types.SpecSummary)
|
||||
}
|
||||
|
||||
func New(color bool, enableFlakes bool, writer io.Writer) Stenographer {
|
||||
denoter := "•"
|
||||
if runtime.GOOS == "windows" {
|
||||
denoter = "+"
|
||||
}
|
||||
return &consoleStenographer{
|
||||
color: color,
|
||||
denoter: denoter,
|
||||
cursorState: cursorStateTop,
|
||||
enableFlakes: enableFlakes,
|
||||
w: writer,
|
||||
}
|
||||
}
|
||||
|
||||
type consoleStenographer struct {
|
||||
color bool
|
||||
denoter string
|
||||
cursorState cursorStateType
|
||||
enableFlakes bool
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
var alternatingColors = []string{defaultStyle, grayColor}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
|
||||
return
|
||||
}
|
||||
s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
|
||||
s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
|
||||
if randomizingAll {
|
||||
s.print(0, " - Will randomize all specs")
|
||||
}
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- node #%d ", node)
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Parallel test node %s/%s.",
|
||||
s.colorize(boldStyle, "%d", node),
|
||||
s.colorize(boldStyle, "%d", nodes),
|
||||
)
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d nodes ", nodes)
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Running in parallel across %s nodes",
|
||||
s.colorize(boldStyle, "%d", nodes),
|
||||
)
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d/%d specs ", specsToRun, total)
|
||||
s.stream()
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Will run %s of %s specs",
|
||||
s.colorize(boldStyle, "%d", specsToRun),
|
||||
s.colorize(boldStyle, "%d", total),
|
||||
)
|
||||
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d specs ", total)
|
||||
s.stream()
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Will run %s specs",
|
||||
s.colorize(boldStyle, "%d", total),
|
||||
)
|
||||
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
if succinct && summary.SuiteSucceeded {
|
||||
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
|
||||
return
|
||||
}
|
||||
s.printNewLine()
|
||||
color := greenColor
|
||||
if !summary.SuiteSucceeded {
|
||||
color = redColor
|
||||
}
|
||||
s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
|
||||
|
||||
status := ""
|
||||
if summary.SuiteSucceeded {
|
||||
status = s.colorize(boldStyle+greenColor, "SUCCESS!")
|
||||
} else {
|
||||
status = s.colorize(boldStyle+redColor, "FAIL!")
|
||||
}
|
||||
|
||||
flakes := ""
|
||||
if s.enableFlakes {
|
||||
flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs)
|
||||
}
|
||||
|
||||
s.print(0,
|
||||
"%s -- %s | %s | %s | %s\n",
|
||||
status,
|
||||
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
|
||||
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes,
|
||||
s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
|
||||
s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||
s.startBlock()
|
||||
for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
|
||||
s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
|
||||
}
|
||||
|
||||
indentation := 0
|
||||
if len(spec.ComponentTexts) > 2 {
|
||||
indentation = 1
|
||||
s.printNewLine()
|
||||
}
|
||||
index := len(spec.ComponentTexts) - 1
|
||||
s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
|
||||
s.printNewLine()
|
||||
s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
|
||||
s.printNewLine()
|
||||
s.midBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.startBlock()
|
||||
var message string
|
||||
switch summary.State {
|
||||
case types.SpecStateFailed:
|
||||
message = "Failure"
|
||||
case types.SpecStatePanicked:
|
||||
message = "Panic"
|
||||
case types.SpecStateTimedOut:
|
||||
message = "Timeout"
|
||||
}
|
||||
|
||||
s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
|
||||
|
||||
s.printNewLine()
|
||||
s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
|
||||
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
|
||||
if output == "" {
|
||||
return
|
||||
}
|
||||
|
||||
s.startBlock()
|
||||
s.println(0, output)
|
||||
s.midBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
|
||||
s.print(0, s.colorize(greenColor, s.denoter))
|
||||
s.stream()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
|
||||
"",
|
||||
spec,
|
||||
succinct,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
|
||||
s.measurementReport(spec, succinct),
|
||||
spec,
|
||||
succinct,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||
if noisy {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(yellowColor, "P [PENDING]"),
|
||||
"",
|
||||
spec,
|
||||
false,
|
||||
)
|
||||
} else {
|
||||
s.print(0, s.colorize(yellowColor, "P"))
|
||||
s.stream()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
|
||||
if succinct || spec.Failure == (types.SpecFailure{}) {
|
||||
s.print(0, s.colorize(cyanColor, "S"))
|
||||
s.stream()
|
||||
} else {
|
||||
s.startBlock()
|
||||
s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||
|
||||
s.printNewLine()
|
||||
s.printSkip(indentation, spec.Failure)
|
||||
s.endBlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||
failingSpecs := []*types.SpecSummary{}
|
||||
|
||||
for _, summary := range summaries {
|
||||
if summary.HasFailureState() {
|
||||
failingSpecs = append(failingSpecs, summary)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failingSpecs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s.printNewLine()
|
||||
s.printNewLine()
|
||||
plural := "s"
|
||||
if len(failingSpecs) == 1 {
|
||||
plural = ""
|
||||
}
|
||||
s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
|
||||
for _, summary := range failingSpecs {
|
||||
s.printNewLine()
|
||||
if summary.HasFailureState() {
|
||||
if summary.TimedOut() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
|
||||
} else if summary.Panicked() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
|
||||
} else if summary.Failed() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
|
||||
}
|
||||
s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
|
||||
s.printNewLine()
|
||||
s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) startBlock() {
|
||||
if s.cursorState == cursorStateStreaming {
|
||||
s.printNewLine()
|
||||
s.printDelimiter()
|
||||
} else if s.cursorState == cursorStateMidBlock {
|
||||
s.printNewLine()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) midBlock() {
|
||||
s.cursorState = cursorStateMidBlock
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) endBlock() {
|
||||
s.printDelimiter()
|
||||
s.cursorState = cursorStateEndBlock
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) stream() {
|
||||
s.cursorState = cursorStateStreaming
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
|
||||
s.startBlock()
|
||||
s.println(0, header)
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
|
||||
|
||||
if message != "" {
|
||||
s.printNewLine()
|
||||
s.println(indentation, message)
|
||||
}
|
||||
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.startBlock()
|
||||
s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||
|
||||
s.printNewLine()
|
||||
s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
|
||||
switch failedComponentType {
|
||||
case types.SpecComponentTypeBeforeSuite:
|
||||
return " in Suite Setup (BeforeSuite)"
|
||||
case types.SpecComponentTypeAfterSuite:
|
||||
return " in Suite Teardown (AfterSuite)"
|
||||
case types.SpecComponentTypeBeforeEach:
|
||||
return " in Spec Setup (BeforeEach)"
|
||||
case types.SpecComponentTypeJustBeforeEach:
|
||||
return " in Spec Setup (JustBeforeEach)"
|
||||
case types.SpecComponentTypeAfterEach:
|
||||
return " in Spec Teardown (AfterEach)"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
|
||||
s.println(indentation, s.colorize(cyanColor, spec.Message))
|
||||
s.printNewLine()
|
||||
s.println(indentation, spec.Location.String())
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
|
||||
if state == types.SpecStatePanicked {
|
||||
s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
|
||||
s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
|
||||
s.println(indentation, failure.Location.String())
|
||||
s.printNewLine()
|
||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||
s.println(indentation, failure.Location.FullStackTrace)
|
||||
} else {
|
||||
s.println(indentation, s.colorize(redColor, failure.Message))
|
||||
s.printNewLine()
|
||||
s.println(indentation, failure.Location.String())
|
||||
if fullTrace {
|
||||
s.printNewLine()
|
||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||
s.println(indentation, failure.Location.FullStackTrace)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||
startIndex := 1
|
||||
indentation := 0
|
||||
|
||||
if len(componentTexts) == 1 {
|
||||
startIndex = 0
|
||||
}
|
||||
|
||||
for i := startIndex; i < len(componentTexts); i++ {
|
||||
if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
|
||||
color := redColor
|
||||
if state == types.SpecStateSkipped {
|
||||
color = cyanColor
|
||||
}
|
||||
blockType := ""
|
||||
switch failedComponentType {
|
||||
case types.SpecComponentTypeBeforeSuite:
|
||||
blockType = "BeforeSuite"
|
||||
case types.SpecComponentTypeAfterSuite:
|
||||
blockType = "AfterSuite"
|
||||
case types.SpecComponentTypeBeforeEach:
|
||||
blockType = "BeforeEach"
|
||||
case types.SpecComponentTypeJustBeforeEach:
|
||||
blockType = "JustBeforeEach"
|
||||
case types.SpecComponentTypeAfterEach:
|
||||
blockType = "AfterEach"
|
||||
case types.SpecComponentTypeIt:
|
||||
blockType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
blockType = "Measurement"
|
||||
}
|
||||
if succinct {
|
||||
s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
|
||||
} else {
|
||||
s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
|
||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||
}
|
||||
} else {
|
||||
if succinct {
|
||||
s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
|
||||
} else {
|
||||
s.println(indentation, componentTexts[i])
|
||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||
}
|
||||
}
|
||||
indentation++
|
||||
}
|
||||
|
||||
return indentation
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||
indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
|
||||
|
||||
if succinct {
|
||||
if len(componentTexts) > 0 {
|
||||
s.printNewLine()
|
||||
s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
|
||||
}
|
||||
s.printNewLine()
|
||||
indentation = 1
|
||||
} else {
|
||||
indentation--
|
||||
}
|
||||
|
||||
return indentation
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
|
||||
orderedKeys := make([]string, len(measurements))
|
||||
for key, measurement := range measurements {
|
||||
orderedKeys[measurement.Order] = key
|
||||
}
|
||||
return orderedKeys
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
|
||||
if len(spec.Measurements) == 0 {
|
||||
return "Found no measurements"
|
||||
}
|
||||
|
||||
message := []string{}
|
||||
orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
|
||||
|
||||
if succinct {
|
||||
message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||
for _, key := range orderedKeys {
|
||||
measurement := spec.Measurements[key]
|
||||
message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
|
||||
s.colorize(boldStyle, "%s", measurement.Name),
|
||||
measurement.SmallestLabel,
|
||||
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
|
||||
measurement.Units,
|
||||
measurement.AverageLabel,
|
||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
|
||||
measurement.Units,
|
||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
|
||||
measurement.Units,
|
||||
measurement.LargestLabel,
|
||||
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
|
||||
measurement.Units,
|
||||
))
|
||||
}
|
||||
} else {
|
||||
message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||
for _, key := range orderedKeys {
|
||||
measurement := spec.Measurements[key]
|
||||
info := ""
|
||||
if measurement.Info != nil {
|
||||
message = append(message, fmt.Sprintf("%v", measurement.Info))
|
||||
}
|
||||
|
||||
message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
|
||||
s.colorize(boldStyle, "%s", measurement.Name),
|
||||
info,
|
||||
measurement.SmallestLabel,
|
||||
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
|
||||
measurement.Units,
|
||||
measurement.LargestLabel,
|
||||
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
|
||||
measurement.Units,
|
||||
measurement.AverageLabel,
|
||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
|
||||
measurement.Units,
|
||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
|
||||
measurement.Units,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(message, "\n")
|
||||
}
|
43
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
generated
vendored
43
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
generated
vendored
@ -1,43 +0,0 @@
|
||||
# go-colorable
|
||||
|
||||
Colorable writer for windows.
|
||||
|
||||
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
|
||||
This package is possible to handle escape sequence for ansi color on windows.
|
||||
|
||||
## Too Bad!
|
||||
|
||||

|
||||
|
||||
|
||||
## So Good!
|
||||
|
||||

|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
|
||||
logrus.SetOutput(colorable.NewColorableStdout())
|
||||
|
||||
logrus.Info("succeeded")
|
||||
logrus.Warn("not correct")
|
||||
logrus.Error("something error")
|
||||
logrus.Fatal("panic")
|
||||
```
|
||||
|
||||
You can compile above code on non-windows OSs.
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-colorable
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
MIT
|
||||
|
||||
# Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
@ -1,24 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
func NewColorableStdout() io.Writer {
|
||||
return os.Stdout
|
||||
}
|
||||
|
||||
func NewColorableStderr() io.Writer {
|
||||
return os.Stderr
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type NonColorable struct {
|
||||
out io.Writer
|
||||
lastbuf bytes.Buffer
|
||||
}
|
||||
|
||||
func NewNonColorable(w io.Writer) io.Writer {
|
||||
return &NonColorable{out: w}
|
||||
}
|
||||
|
||||
func (w *NonColorable) Write(data []byte) (n int, err error) {
|
||||
er := bytes.NewBuffer(data)
|
||||
loop:
|
||||
for {
|
||||
c1, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if c1 != 0x1b {
|
||||
fmt.Fprint(w.out, string(c1))
|
||||
continue
|
||||
}
|
||||
c2, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
break loop
|
||||
}
|
||||
if c2 != 0x5b {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
continue
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
c, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
w.lastbuf.Write(buf.Bytes())
|
||||
break loop
|
||||
}
|
||||
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||
break
|
||||
}
|
||||
buf.Write([]byte(string(c)))
|
||||
}
|
||||
}
|
||||
return len(data) - w.lastbuf.Len(), nil
|
||||
}
|
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
generated
vendored
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
generated
vendored
@ -1,9 +0,0 @@
|
||||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||
|
||||
MIT License (Expat)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
37
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
generated
vendored
37
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
generated
vendored
@ -1,37 +0,0 @@
|
||||
# go-isatty
|
||||
|
||||
isatty for golang
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mattn/go-isatty"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
fmt.Println("Is Terminal")
|
||||
} else {
|
||||
fmt.Println("Is Not Terminal")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-isatty
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
MIT
|
||||
|
||||
# Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
2
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
generated
vendored
@ -1,2 +0,0 @@
|
||||
// Package isatty implements interface to isatty
|
||||
package isatty
|
@ -1,9 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
package isatty
|
||||
|
||||
// IsTerminal returns true if the file descriptor is terminal which
|
||||
// is always false on on appengine classic which is a sandboxed PaaS.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
generated
vendored
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
// +build darwin freebsd openbsd netbsd
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termios syscall.Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
generated
vendored
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
// +build linux
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TCGETS
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termios syscall.Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
16
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
generated
vendored
16
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
// +build solaris
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termio unix.Termio
|
||||
err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
|
||||
return err == nil
|
||||
}
|
19
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
generated
vendored
19
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
// +build windows
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
106
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
106
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
/*
|
||||
|
||||
TeamCity Reporter for Ginkgo
|
||||
|
||||
Makes use of TeamCity's support for Service Messages
|
||||
http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const (
|
||||
messageId = "##teamcity"
|
||||
)
|
||||
|
||||
type TeamCityReporter struct {
|
||||
writer io.Writer
|
||||
testSuiteName string
|
||||
ReporterConfig config.DefaultReporterConfigType
|
||||
}
|
||||
|
||||
func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
|
||||
return &TeamCityReporter{
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.testSuiteName = escape(summary.SuiteDescription)
|
||||
fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']\n", messageId, reporter.testSuiteName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
testName := escape(name)
|
||||
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
|
||||
message := reporter.failureMessage(setupSummary.Failure)
|
||||
details := reporter.failureDetails(setupSummary.Failure)
|
||||
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
|
||||
durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
|
||||
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
||||
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
||||
|
||||
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
|
||||
details := escape(specSummary.CapturedOutput)
|
||||
fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']\n", messageId, testName, details)
|
||||
}
|
||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||
message := reporter.failureMessage(specSummary.Failure)
|
||||
details := reporter.failureDetails(specSummary.Failure)
|
||||
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
|
||||
}
|
||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||
fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']\n", messageId, testName)
|
||||
}
|
||||
|
||||
durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
|
||||
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']\n", messageId, reporter.testSuiteName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) failureMessage(failure types.SpecFailure) string {
|
||||
return escape(failure.ComponentCodeLocation.String())
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) failureDetails(failure types.SpecFailure) string {
|
||||
return escape(fmt.Sprintf("%s\n%s", failure.Message, failure.Location.String()))
|
||||
}
|
||||
|
||||
func escape(output string) string {
|
||||
output = strings.Replace(output, "|", "||", -1)
|
||||
output = strings.Replace(output, "'", "|'", -1)
|
||||
output = strings.Replace(output, "\n", "|n", -1)
|
||||
output = strings.Replace(output, "\r", "|r", -1)
|
||||
output = strings.Replace(output, "[", "|[", -1)
|
||||
output = strings.Replace(output, "]", "|]", -1)
|
||||
return output
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/types/code_location.go
generated
vendored
15
vendor/github.com/onsi/ginkgo/types/code_location.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type CodeLocation struct {
|
||||
FileName string
|
||||
LineNumber int
|
||||
FullStackTrace string
|
||||
}
|
||||
|
||||
func (codeLocation CodeLocation) String() string {
|
||||
return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
|
||||
}
|
30
vendor/github.com/onsi/ginkgo/types/synchronization.go
generated
vendored
30
vendor/github.com/onsi/ginkgo/types/synchronization.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type RemoteBeforeSuiteState int
|
||||
|
||||
const (
|
||||
RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
|
||||
|
||||
RemoteBeforeSuiteStatePending
|
||||
RemoteBeforeSuiteStatePassed
|
||||
RemoteBeforeSuiteStateFailed
|
||||
RemoteBeforeSuiteStateDisappeared
|
||||
)
|
||||
|
||||
type RemoteBeforeSuiteData struct {
|
||||
Data []byte
|
||||
State RemoteBeforeSuiteState
|
||||
}
|
||||
|
||||
func (r RemoteBeforeSuiteData) ToJSON() []byte {
|
||||
data, _ := json.Marshal(r)
|
||||
return data
|
||||
}
|
||||
|
||||
type RemoteAfterSuiteData struct {
|
||||
CanRun bool
|
||||
}
|
174
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
174
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
@ -1,174 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const GINKGO_FOCUS_EXIT_CODE = 197
|
||||
|
||||
/*
|
||||
SuiteSummary represents the a summary of the test suite and is passed to both
|
||||
Reporter.SpecSuiteWillBegin
|
||||
Reporter.SpecSuiteDidEnd
|
||||
|
||||
this is unfortunate as these two methods should receive different objects. When running in parallel
|
||||
each node does not deterministically know how many specs it will end up running.
|
||||
|
||||
Unfortunately making such a change would break backward compatibility.
|
||||
|
||||
Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields
|
||||
with -1.
|
||||
*/
|
||||
type SuiteSummary struct {
|
||||
SuiteDescription string
|
||||
SuiteSucceeded bool
|
||||
SuiteID string
|
||||
|
||||
NumberOfSpecsBeforeParallelization int
|
||||
NumberOfTotalSpecs int
|
||||
NumberOfSpecsThatWillBeRun int
|
||||
NumberOfPendingSpecs int
|
||||
NumberOfSkippedSpecs int
|
||||
NumberOfPassedSpecs int
|
||||
NumberOfFailedSpecs int
|
||||
// Flaked specs are those that failed initially, but then passed on a
|
||||
// subsequent try.
|
||||
NumberOfFlakedSpecs int
|
||||
RunTime time.Duration
|
||||
}
|
||||
|
||||
type SpecSummary struct {
|
||||
ComponentTexts []string
|
||||
ComponentCodeLocations []CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
IsMeasurement bool
|
||||
NumberOfSamples int
|
||||
Measurements map[string]*SpecMeasurement
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
func (s SpecSummary) HasFailureState() bool {
|
||||
return s.State.IsFailure()
|
||||
}
|
||||
|
||||
func (s SpecSummary) TimedOut() bool {
|
||||
return s.State == SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (s SpecSummary) Panicked() bool {
|
||||
return s.State == SpecStatePanicked
|
||||
}
|
||||
|
||||
func (s SpecSummary) Failed() bool {
|
||||
return s.State == SpecStateFailed
|
||||
}
|
||||
|
||||
func (s SpecSummary) Passed() bool {
|
||||
return s.State == SpecStatePassed
|
||||
}
|
||||
|
||||
func (s SpecSummary) Skipped() bool {
|
||||
return s.State == SpecStateSkipped
|
||||
}
|
||||
|
||||
func (s SpecSummary) Pending() bool {
|
||||
return s.State == SpecStatePending
|
||||
}
|
||||
|
||||
type SetupSummary struct {
|
||||
ComponentType SpecComponentType
|
||||
CodeLocation CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
type SpecFailure struct {
|
||||
Message string
|
||||
Location CodeLocation
|
||||
ForwardedPanic string
|
||||
|
||||
ComponentIndex int
|
||||
ComponentType SpecComponentType
|
||||
ComponentCodeLocation CodeLocation
|
||||
}
|
||||
|
||||
type SpecMeasurement struct {
|
||||
Name string
|
||||
Info interface{}
|
||||
Order int
|
||||
|
||||
Results []float64
|
||||
|
||||
Smallest float64
|
||||
Largest float64
|
||||
Average float64
|
||||
StdDeviation float64
|
||||
|
||||
SmallestLabel string
|
||||
LargestLabel string
|
||||
AverageLabel string
|
||||
Units string
|
||||
Precision int
|
||||
}
|
||||
|
||||
func (s SpecMeasurement) PrecisionFmt() string {
|
||||
if s.Precision == 0 {
|
||||
return "%f"
|
||||
}
|
||||
|
||||
str := strconv.Itoa(s.Precision)
|
||||
|
||||
return "%." + str + "f"
|
||||
}
|
||||
|
||||
type SpecState uint
|
||||
|
||||
const (
|
||||
SpecStateInvalid SpecState = iota
|
||||
|
||||
SpecStatePending
|
||||
SpecStateSkipped
|
||||
SpecStatePassed
|
||||
SpecStateFailed
|
||||
SpecStatePanicked
|
||||
SpecStateTimedOut
|
||||
)
|
||||
|
||||
func (state SpecState) IsFailure() bool {
|
||||
return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
|
||||
}
|
||||
|
||||
type SpecComponentType uint
|
||||
|
||||
const (
|
||||
SpecComponentTypeInvalid SpecComponentType = iota
|
||||
|
||||
SpecComponentTypeContainer
|
||||
SpecComponentTypeBeforeSuite
|
||||
SpecComponentTypeAfterSuite
|
||||
SpecComponentTypeBeforeEach
|
||||
SpecComponentTypeJustBeforeEach
|
||||
SpecComponentTypeJustAfterEach
|
||||
SpecComponentTypeAfterEach
|
||||
SpecComponentTypeIt
|
||||
SpecComponentTypeMeasure
|
||||
)
|
||||
|
||||
type FlagType uint
|
||||
|
||||
const (
|
||||
FlagTypeNone FlagType = iota
|
||||
FlagTypeFocused
|
||||
FlagTypePending
|
||||
)
|
@ -1,5 +1,5 @@
|
||||
.DS_Store
|
||||
TODO
|
||||
TODO.md
|
||||
tmp/**/*
|
||||
*.coverprofile
|
||||
.vscode
|
231
vendor/github.com/onsi/ginkgo/CHANGELOG.md → vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
231
vendor/github.com/onsi/ginkgo/CHANGELOG.md → vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
@ -1,9 +1,223 @@
|
||||
## 2.5.1
|
||||
|
||||
### Fixes
|
||||
- skipped tests only show as 'S' when running with -v [3ab38ae]
|
||||
- Fix typo in docs/index.md (#1082) [55fc58d]
|
||||
- Fix typo in docs/index.md (#1081) [8a14f1f]
|
||||
- Fix link notation in docs/index.md (#1080) [2669612]
|
||||
- Fix typo in `--progress` deprecation message (#1076) [b4b7edc]
|
||||
|
||||
### Maintenance
|
||||
- chore: Included githubactions in the dependabot config (#976) [baea341]
|
||||
- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297]
|
||||
|
||||
## 2.5.0
|
||||
|
||||
### Ginkgo output now includes a timeline-view of the spec
|
||||
|
||||
This commit changes Ginkgo's default output. Spec details are now
|
||||
presented as a **timeline** that includes events that occur during the spec
|
||||
lifecycle interleaved with any GinkgoWriter content. This makes is much easier
|
||||
to understand the flow of a spec and where a given failure occurs.
|
||||
|
||||
The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags
|
||||
and the SuppressProgressReporting decorator have all been deprecated. Instead
|
||||
the existing -v and -vv flags better capture the level of verbosity to display. However,
|
||||
a new --show-node-events flag is added to include node `> Enter` and `< Exit` events
|
||||
in the spec timeline.
|
||||
|
||||
In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit
|
||||
reports can be configured and generated using
|
||||
`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)`
|
||||
|
||||
Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that
|
||||
was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format
|
||||
to build tooling on top of as it has stronger guarantees to be stable from version to version.
|
||||
|
||||
### Features
|
||||
- Provide details about which timeout expired [0f2fa27]
|
||||
|
||||
### Fixes
|
||||
- Add Support Policy to docs [c70867a]
|
||||
|
||||
### Maintenance
|
||||
- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2]
|
||||
|
||||
## 2.4.0
|
||||
|
||||
### Features
|
||||
|
||||
- DeferCleanup supports functions with multiple-return values [5e33c75]
|
||||
- Add GinkgoLogr (#1067) [bf78c28]
|
||||
- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f]
|
||||
|
||||
### Fixes
|
||||
- correcting some typos (#1064) [1403d3c]
|
||||
- fix flaky internal_integration interupt specs [2105ba3]
|
||||
- Correct busted link in README [be6b5b9]
|
||||
|
||||
### Maintenance
|
||||
- Bump actions/checkout from 2 to 3 (#1062) [8a2f483]
|
||||
- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8]
|
||||
- Bump github/codeql-action from 1 to 2 (#1061) [da09146]
|
||||
- Bump actions/setup-go from 2 to 3 (#1060) [918040d]
|
||||
- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d]
|
||||
- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122]
|
||||
- Add GHA to dependabot config [4442772]
|
||||
|
||||
## 2.3.1
|
||||
|
||||
## Fixes
|
||||
Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository).
|
||||
|
||||
With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message.
|
||||
|
||||
- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f]
|
||||
- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8]
|
||||
|
||||
### Maintenance
|
||||
- bump gomega to v1.22.0 [822a937]
|
||||
|
||||
## 2.3.0
|
||||
|
||||
### Interruptible Nodes and Timeouts
|
||||
|
||||
Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this:
|
||||
|
||||
```go
|
||||
It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid.
|
||||
// do things until `ctx.Done()` is closed, for example:
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil)
|
||||
Expect(err).NotTo(HaveOccured())
|
||||
_, err := http.DefaultClient.Do(req)
|
||||
Expect(err).NotTo(HaveOccured())
|
||||
|
||||
Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17))
|
||||
}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second))
|
||||
```
|
||||
|
||||
and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline.
|
||||
|
||||
### Features
|
||||
|
||||
- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures.
|
||||
- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested.
|
||||
- DescribeTable now exits with an error if it is not passed any Entries [a4c9865]
|
||||
|
||||
## Fixes
|
||||
- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5]
|
||||
- Make the outline command able to use the DSL import [1be2427]
|
||||
|
||||
## Maintenance
|
||||
- chore(docs): delete no meaning d [57c373c]
|
||||
- chore(docs): Fix hyperlinks [30526d5]
|
||||
- chore(docs): fix code blocks without language settings [cf611c4]
|
||||
- fix intra-doc link [b541bcb]
|
||||
|
||||
## 2.2.0
|
||||
|
||||
### Generate real-time Progress Reports [f91377c]
|
||||
|
||||
Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines.
|
||||
|
||||
These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`.
|
||||
|
||||
In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators.
|
||||
|
||||
Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports.
|
||||
|
||||
Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously.
|
||||
|
||||
### Features
|
||||
- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes.
|
||||
|
||||
As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
|
||||
|
||||
### Maintenance
|
||||
- Modernize the invocation of Ginkgo in github actions [0ffde58]
|
||||
- Update reocmmended CI settings in docs [896bbb9]
|
||||
- Speed up unnecessarily slow integration test [6d3a90e]
|
||||
|
||||
## 2.1.6
|
||||
|
||||
### Fixes
|
||||
- Add `SuppressProgressReporting` decorator to turn off --progress announcements for a given node [dfef62a]
|
||||
- chore: remove duplicate word in comments [7373214]
|
||||
|
||||
## 2.1.5
|
||||
|
||||
### Fixes
|
||||
- drop -mod=mod instructions; fixes #1026 [6ad7138]
|
||||
- Ensure `CurrentSpecReport` and `AddReportEntry` are thread-safe [817c09b]
|
||||
- remove stale importmap gcflags flag test [3cd8b93]
|
||||
- Always emit spec summary [5cf23e2] - even when only one spec has failed
|
||||
- Fix ReportAfterSuite usage in docs [b1864ad]
|
||||
- fixed typo (#997) [219cc00]
|
||||
- TrimRight is not designed to trim Suffix [71ebb74]
|
||||
- refactor: replace strings.Replace with strings.ReplaceAll (#978) [143d208]
|
||||
- fix syntax in examples (#975) [b69554f]
|
||||
|
||||
### Maintenance
|
||||
- Bump github.com/onsi/gomega from 1.20.0 to 1.20.1 (#1027) [e5dfce4]
|
||||
- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#1006) [7ae91c4]
|
||||
- Bump github.com/onsi/gomega from 1.19.0 to 1.20.0 (#1005) [e87a85a]
|
||||
- test: add new Go 1.19 to test matrix (#1014) [bbefe12]
|
||||
- Bump golang.org/x/tools from 0.1.11 to 0.1.12 (#1012) [9327906]
|
||||
- Bump golang.org/x/tools from 0.1.10 to 0.1.11 (#993) [f44af96]
|
||||
- Bump nokogiri from 1.13.3 to 1.13.6 in /docs (#981) [ef336aa]
|
||||
|
||||
## 2.1.4
|
||||
|
||||
### Fixes
|
||||
- Numerous documentation typos
|
||||
- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903]
|
||||
- improve error message when a parallel process fails to report back [a7bd1fe]
|
||||
- guard against concurrent map writes in DeprecationTracker [0976569]
|
||||
- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480]
|
||||
- Fix ginkgo import circle [f779385]
|
||||
|
||||
## 2.1.3
|
||||
|
||||
See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
|
||||
|
||||
### Fixes
|
||||
- Calling By in a container node now emits a useful error. [ff12cee]
|
||||
|
||||
## 2.1.2
|
||||
|
||||
### Fixes
|
||||
|
||||
- Track location of focused specs correctly in `ginkgo unfocus` [a612ff1]
|
||||
- Profiling suites with focused specs no longer generates an erroneous failure message [8fbfa02]
|
||||
- Several documentation typos fixed. Big thanks to everyone who helped catch them and report/fix them!
|
||||
|
||||
## 2.1.1
|
||||
|
||||
See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
|
||||
|
||||
### Fixes
|
||||
- Suites that only import the new dsl packages are now correctly identified as Ginkgo suites [ec17e17]
|
||||
|
||||
## 2.1.0
|
||||
|
||||
See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
|
||||
|
||||
2.1.0 is a minor release with a few tweaks:
|
||||
|
||||
- Introduce new DSL packages to enable users to pick-and-choose which portions of the DSL to dot-import. [90868e2] More details [here](https://onsi.github.io/ginkgo/#alternatives-to-dot-importing-ginkgo).
|
||||
- Add error check for invalid/nil parameters to DescribeTable [6f8577e]
|
||||
- Myriad docs typos fixed (thanks everyone!) [718542a, ecb7098, 146654c, a8f9913, 6bdffde, 03dcd7e]
|
||||
|
||||
## 2.0.0
|
||||
|
||||
See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)
|
||||
|
||||
## 1.16.5
|
||||
|
||||
Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
|
||||
1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
|
||||
|
||||
You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
|
||||
You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
|
||||
|
||||
## 1.16.4
|
||||
|
||||
@ -23,7 +237,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme
|
||||
## 1.16.1
|
||||
|
||||
### Fixes
|
||||
- Supress --stream deprecation warning on windows (#793)
|
||||
- Suppress --stream deprecation warning on windows (#793)
|
||||
|
||||
## 1.16.0
|
||||
|
||||
@ -35,7 +249,6 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme
|
||||
|
||||
- Add slim-sprig template functions to bootstrap/generate (#775) [9162b86]
|
||||
|
||||
### Fixes
|
||||
- Fix accidental reference to 1488 (#784) [9fb7fe4]
|
||||
|
||||
## 1.15.2
|
||||
@ -111,7 +324,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme
|
||||
- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
|
||||
- improve ginkgo performance - makes progress on #644 [a14f98e]
|
||||
- fix convert integration tests [1f8ba69]
|
||||
- fix typo succesful -> successful (#663) [1ea49cf]
|
||||
- fix typo successful -> successful (#663) [1ea49cf]
|
||||
- Fix invalid link (#658) [b886136]
|
||||
- convert utility : Include comments from source (#657) [1077c6d]
|
||||
- Explain what BDD means [d79e7fb]
|
||||
@ -202,10 +415,10 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme
|
||||
- fix: for `go vet` to pass [69338ec]
|
||||
- docs: fix for contributing instructions [7004cb1]
|
||||
- consolidate and streamline contribution docs (#494) [d848015]
|
||||
- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6]
|
||||
- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6]
|
||||
- all: gofmt [000d317]
|
||||
- Increase eventually timeout to 30s [c73579c]
|
||||
- Clarify asynchronous test behaviour [294d8f4]
|
||||
- Clarify asynchronous test behavior [294d8f4]
|
||||
- Travis badge should only show master [26d2143]
|
||||
|
||||
## 1.5.0 5/10/2018
|
||||
@ -223,13 +436,13 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme
|
||||
- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
|
||||
- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
|
||||
- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
|
||||
- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c]
|
||||
- Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c]
|
||||
- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
|
||||
- Add an extra new line after reporting spec run completion for test2json [874520d]
|
||||
- added name name field to junit reported testsuite [ae61c63]
|
||||
- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
|
||||
- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
|
||||
- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
|
||||
- Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
|
||||
- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
|
||||
- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
|
||||
- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
|
||||
@ -314,7 +527,7 @@ Bug Fixes:
|
||||
- Fix incorrect failure message when a panic occurs during a parallel test run
|
||||
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
|
||||
- Be more consistent about handling SIGTERM as well as SIGINT
|
||||
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
|
||||
- When interrupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
|
||||
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
|
||||
|
||||
## 1.1.0 (8/2/2014)
|
13
vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
generated
vendored
Normal file
13
vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# Contributing to Ginkgo
|
||||
|
||||
Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
|
||||
|
||||
- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
|
||||
- Ensure adequate test coverage:
|
||||
- When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
|
||||
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
|
||||
- Make sure all the tests succeed via `ginkgo -r -p`
|
||||
- Vet your changes via `go vet ./...`
|
||||
- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes.
|
||||
|
||||
Thanks for supporting Ginkgo!
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user