Merge pull request #2602 from cmaf/upgrade-opentelemetry

vendor: update OpenTelemetry
This commit is contained in:
James O. D. Hunt 2021-11-05 11:00:25 +00:00 committed by GitHub
commit 4be2c8b190
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
269 changed files with 20622 additions and 12181 deletions

View File

@ -37,17 +37,22 @@ require (
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8
github.com/sirupsen/logrus v1.8.1
github.com/smartystreets/goconvey v1.6.4 // indirect
github.com/stretchr/testify v1.6.1
github.com/stretchr/testify v1.7.0
github.com/urfave/cli v1.22.2
github.com/vishvananda/netlink v1.1.1-0.20210924202909-187053b97868
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae
go.opentelemetry.io/otel v0.15.0
go.opentelemetry.io/otel/exporters/trace/jaeger v0.15.0
go.opentelemetry.io/otel/sdk v0.15.0
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel v1.0.0
go.opentelemetry.io/otel/exporters/jaeger v1.0.0
go.opentelemetry.io/otel/sdk v1.0.0
go.opentelemetry.io/otel/trace v1.0.0
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
google.golang.org/grpc v1.33.2
golang.org/x/text v0.3.5 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb // indirect
google.golang.org/grpc v1.36.0
k8s.io/apimachinery v0.20.6
k8s.io/cri-api v0.20.6
)

View File

@ -45,7 +45,6 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
@ -67,13 +66,10 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -100,6 +96,7 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
@ -179,6 +176,7 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@ -295,9 +293,11 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -517,8 +517,10 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -550,14 +552,17 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/otel v0.15.0 h1:CZFy2lPhxd4HlhZnYK8gRyDotksO3Ip9rBweY1vVYJw=
go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA=
go.opentelemetry.io/otel/exporters/trace/jaeger v0.15.0 h1:OZY+lMaUJiJ6ls1dDtqKhSPJWEVNytLuRUrzuR852jc=
go.opentelemetry.io/otel/exporters/trace/jaeger v0.15.0/go.mod h1:4DeFMzRzr2l5o/Lzh4GfOYEH+mnf7ZrEGDzzJEP6ta8=
go.opentelemetry.io/otel/sdk v0.15.0 h1:Hf2dl1Ad9Hn03qjcAuAq51GP5Pv1SV5puIkS2nRhdd8=
go.opentelemetry.io/otel/sdk v0.15.0/go.mod h1:Qudkwgq81OcA9GYVlbyZ62wkLieeS1eWxIL0ufxgwoc=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/otel v1.0.0 h1:qTTn6x71GVBvoafHK/yaRUmFzI4LcONZD0/kXxl5PHI=
go.opentelemetry.io/otel v1.0.0/go.mod h1:AjRVh9A5/5DE7S+mZtTR6t8vpKKryam+0lREnfmS4cg=
go.opentelemetry.io/otel/exporters/jaeger v1.0.0 h1:cLhx8llHw02h5JTqGqaRbYn+QVKHmrzD9vEbKnSPk5U=
go.opentelemetry.io/otel/exporters/jaeger v1.0.0/go.mod h1:q10N1AolE1JjqKrFJK2tYw0iZpmX+HBaXBtuCzRnBGQ=
go.opentelemetry.io/otel/sdk v1.0.0 h1:BNPMYUONPNbLneMttKSjQhOTlFLOD9U22HNG1KrIN2Y=
go.opentelemetry.io/otel/sdk v1.0.0/go.mod h1:PCrDHlSy5x1kjezSdL37PhbFUMjrsLRshJ2zCzeXwbM=
go.opentelemetry.io/otel/trace v1.0.0 h1:TSBr8GTEtKevYMG/2d21M989r5WJYVimhTHBKVEZuh4=
go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
@ -646,8 +651,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93 h1:alLDrZkL34Y2bnGHfvC1CYBRBXCXgx8AC2vY4MRtYX4=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -657,7 +662,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -705,7 +709,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -716,6 +719,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -724,8 +728,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -776,7 +781,6 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -799,15 +803,14 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts=
google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -823,9 +826,9 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -11,11 +11,12 @@ import (
"github.com/sirupsen/logrus"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/trace/jaeger"
"go.opentelemetry.io/otel/label"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/jaeger"
"go.opentelemetry.io/otel/propagation"
export "go.opentelemetry.io/otel/sdk/export/trace"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.opentelemetry.io/otel/trace"
otelTrace "go.opentelemetry.io/otel/trace"
)
@ -26,10 +27,10 @@ import (
// https: //github.com/kata-containers/tests/blob/master/tracing/tracing-test.sh
type kataSpanExporter struct{}
var _ export.SpanExporter = (*kataSpanExporter)(nil)
var _ sdktrace.SpanExporter = (*kataSpanExporter)(nil)
// ExportSpans exports SpanData to Jaeger.
func (e *kataSpanExporter) ExportSpans(ctx context.Context, spans []*export.SpanData) error {
func (e *kataSpanExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
for _, span := range spans {
kataTraceLogger.Tracef("Reporting span %+v", span)
}
@ -40,9 +41,9 @@ func (e *kataSpanExporter) Shutdown(ctx context.Context) error {
return nil
}
// tracerCloser contains a copy of the closer returned by createTracer() which
// is used by stopTracing().
var tracerCloser func()
// tp is the trace provider created in CreateTracer() and used in StopTracing()
// to flush and shutdown all spans.
var tp *sdktrace.TracerProvider
var kataTraceLogger = logrus.NewEntry(logrus.New())
@ -62,10 +63,10 @@ type JaegerConfig struct {
}
// CreateTracer create a tracer
func CreateTracer(name string, config *JaegerConfig) (func(), error) {
func CreateTracer(name string, config *JaegerConfig) (*sdktrace.TracerProvider, error) {
if !tracing {
otel.SetTracerProvider(trace.NewNoopTracerProvider())
return func() {}, nil
return nil, nil
}
// build kata exporter to log reporting span records
@ -77,37 +78,32 @@ func CreateTracer(name string, config *JaegerConfig) (func(), error) {
collectorEndpoint = "http://localhost:14268/api/traces"
}
jaegerExporter, err := jaeger.NewRawExporter(
jaeger.WithCollectorEndpoint(collectorEndpoint,
jaegerExporter, err := jaeger.New(
jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(collectorEndpoint),
jaeger.WithUsername(config.JaegerUser),
jaeger.WithPassword(config.JaegerPassword),
), jaeger.WithProcess(jaeger.Process{
ServiceName: name,
Tags: []label.KeyValue{
label.String("exporter", "jaeger"),
label.String("lib", "opentelemetry"),
},
}))
),
)
if err != nil {
return nil, err
}
// build tracer provider, that combining both jaeger exporter and kata exporter.
tp := sdktrace.NewTracerProvider(
sdktrace.WithConfig(
sdktrace.Config{
DefaultSampler: sdktrace.AlwaysSample(),
},
),
sdktrace.WithSampler(sdktrace.AlwaysSample()),
sdktrace.WithSyncer(kataExporter),
sdktrace.WithSyncer(jaegerExporter),
sdktrace.WithResource(resource.NewSchemaless(
semconv.ServiceNameKey.String(name),
attribute.String("exporter", "jaeger"),
attribute.String("lib", "opentelemetry"),
)),
)
tracerCloser = jaegerExporter.Flush
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))
return tracerCloser, nil
return tp, nil
}
// StopTracing ends all tracing, reporting the spans to the collector.
@ -122,9 +118,8 @@ func StopTracing(ctx context.Context) {
}
// report all possible spans to the collector
if tracerCloser != nil {
tracerCloser()
}
tp.ForceFlush(ctx)
tp.Shutdown(ctx)
}
// Trace creates a new tracing span based on the specified name and parent context.
@ -139,12 +134,12 @@ func Trace(parent context.Context, logger *logrus.Entry, name string, tags ...ma
parent = context.Background()
}
var otelTags []label.KeyValue
var otelTags []attribute.KeyValue
// do not append tags if tracing is disabled
if tracing {
for _, tagSet := range tags {
for k, v := range tagSet {
otelTags = append(otelTags, label.Key(k).String(v))
otelTags = append(otelTags, attribute.Key(k).String(v))
}
}
}
@ -170,45 +165,31 @@ func addTag(span otelTrace.Span, key string, value interface{}) {
return
}
if value == nil {
span.SetAttributes(label.String(key, "nil"))
span.SetAttributes(attribute.String(key, "nil"))
return
}
switch value := value.(type) {
case string:
span.SetAttributes(label.String(key, value))
span.SetAttributes(attribute.String(key, value))
case bool:
span.SetAttributes(label.Bool(key, value))
span.SetAttributes(attribute.Bool(key, value))
case int:
span.SetAttributes(label.Int(key, value))
span.SetAttributes(attribute.Int(key, value))
case int8:
span.SetAttributes(label.Int(key, int(value)))
span.SetAttributes(attribute.Int(key, int(value)))
case int16:
span.SetAttributes(label.Int(key, int(value)))
case int32:
span.SetAttributes(label.Int32(key, value))
span.SetAttributes(attribute.Int(key, int(value)))
case int64:
span.SetAttributes(label.Int64(key, value))
case uint:
span.SetAttributes(label.Uint(key, value))
case uint8:
span.SetAttributes(label.Uint(key, uint(value)))
case uint16:
span.SetAttributes(label.Uint(key, uint(value)))
case uint32:
span.SetAttributes(label.Uint32(key, value))
case uint64:
span.SetAttributes(label.Uint64(key, value))
case float32:
span.SetAttributes(label.Float32(key, value))
span.SetAttributes(attribute.Int64(key, value))
case float64:
span.SetAttributes(label.Float64(key, value))
span.SetAttributes(attribute.Float64(key, value))
default:
content, err := json.Marshal(value)
if content == nil && err == nil {
span.SetAttributes(label.String(key, "nil"))
span.SetAttributes(attribute.String(key, "nil"))
} else if content != nil && err == nil {
span.SetAttributes(label.String(key, string(content)))
span.SetAttributes(attribute.String(key, string(content)))
} else {
kataTraceLogger.WithField("type", "bug").Error("span attribute value error")
}

View File

@ -1,505 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
)
type TBinaryProtocol struct {
trans TRichTransport
origTransport TTransport
strictRead bool
strictWrite bool
buffer [64]byte
}
type TBinaryProtocolFactory struct {
strictRead bool
strictWrite bool
}
func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
return NewTBinaryProtocol(t, false, true)
}
func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
if et, ok := t.(TRichTransport); ok {
p.trans = et
} else {
p.trans = NewTRichTransport(t)
}
return p
}
func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
return NewTBinaryProtocolFactory(false, true)
}
func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
}
func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
}
/**
* Writing Methods
*/
func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
if p.strictWrite {
version := uint32(VERSION_1) | uint32(typeId)
e := p.WriteI32(int32(version))
if e != nil {
return e
}
e = p.WriteString(name)
if e != nil {
return e
}
e = p.WriteI32(seqId)
return e
} else {
e := p.WriteString(name)
if e != nil {
return e
}
e = p.WriteByte(int8(typeId))
if e != nil {
return e
}
e = p.WriteI32(seqId)
return e
}
return nil
}
func (p *TBinaryProtocol) WriteMessageEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteStructBegin(name string) error {
return nil
}
func (p *TBinaryProtocol) WriteStructEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
e := p.WriteByte(int8(typeId))
if e != nil {
return e
}
e = p.WriteI16(id)
return e
}
func (p *TBinaryProtocol) WriteFieldEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteFieldStop() error {
e := p.WriteByte(STOP)
return e
}
func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
e := p.WriteByte(int8(keyType))
if e != nil {
return e
}
e = p.WriteByte(int8(valueType))
if e != nil {
return e
}
e = p.WriteI32(int32(size))
return e
}
func (p *TBinaryProtocol) WriteMapEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
e := p.WriteByte(int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(int32(size))
return e
}
func (p *TBinaryProtocol) WriteListEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
e := p.WriteByte(int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(int32(size))
return e
}
func (p *TBinaryProtocol) WriteSetEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteBool(value bool) error {
if value {
return p.WriteByte(1)
}
return p.WriteByte(0)
}
func (p *TBinaryProtocol) WriteByte(value int8) error {
e := p.trans.WriteByte(byte(value))
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI16(value int16) error {
v := p.buffer[0:2]
binary.BigEndian.PutUint16(v, uint16(value))
_, e := p.trans.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI32(value int32) error {
v := p.buffer[0:4]
binary.BigEndian.PutUint32(v, uint32(value))
_, e := p.trans.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI64(value int64) error {
v := p.buffer[0:8]
binary.BigEndian.PutUint64(v, uint64(value))
_, err := p.trans.Write(v)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteDouble(value float64) error {
return p.WriteI64(int64(math.Float64bits(value)))
}
func (p *TBinaryProtocol) WriteString(value string) error {
e := p.WriteI32(int32(len(value)))
if e != nil {
return e
}
_, err := p.trans.WriteString(value)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteBinary(value []byte) error {
e := p.WriteI32(int32(len(value)))
if e != nil {
return e
}
_, err := p.trans.Write(value)
return NewTProtocolException(err)
}
/**
* Reading methods
*/
func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
size, e := p.ReadI32()
if e != nil {
return "", typeId, 0, NewTProtocolException(e)
}
if size < 0 {
typeId = TMessageType(size & 0x0ff)
version := int64(int64(size) & VERSION_MASK)
if version != VERSION_1 {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
}
name, e = p.ReadString()
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
seqId, e = p.ReadI32()
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
return name, typeId, seqId, nil
}
if p.strictRead {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
}
name, e2 := p.readStringBody(size)
if e2 != nil {
return name, typeId, seqId, e2
}
b, e3 := p.ReadByte()
if e3 != nil {
return name, typeId, seqId, e3
}
typeId = TMessageType(b)
seqId, e4 := p.ReadI32()
if e4 != nil {
return name, typeId, seqId, e4
}
return name, typeId, seqId, nil
}
func (p *TBinaryProtocol) ReadMessageEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
return
}
func (p *TBinaryProtocol) ReadStructEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
t, err := p.ReadByte()
typeId = TType(t)
if err != nil {
return name, typeId, seqId, err
}
if t != STOP {
seqId, err = p.ReadI16()
}
return name, typeId, seqId, err
}
func (p *TBinaryProtocol) ReadFieldEnd() error {
return nil
}
var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
k, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
kType = TType(k)
v, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
vType = TType(v)
size32, e := p.ReadI32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return kType, vType, size, nil
}
func (p *TBinaryProtocol) ReadMapEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
b, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return
}
func (p *TBinaryProtocol) ReadListEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
b, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return elemType, size, nil
}
func (p *TBinaryProtocol) ReadSetEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadBool() (bool, error) {
b, e := p.ReadByte()
v := true
if b != 1 {
v = false
}
return v, e
}
func (p *TBinaryProtocol) ReadByte() (int8, error) {
v, err := p.trans.ReadByte()
return int8(v), err
}
func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
buf := p.buffer[0:2]
err = p.readAll(buf)
value = int16(binary.BigEndian.Uint16(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
buf := p.buffer[0:4]
err = p.readAll(buf)
value = int32(binary.BigEndian.Uint32(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
buf := p.buffer[0:8]
err = p.readAll(buf)
value = int64(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
buf := p.buffer[0:8]
err = p.readAll(buf)
value = math.Float64frombits(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadString() (value string, err error) {
size, e := p.ReadI32()
if e != nil {
return "", e
}
if size < 0 {
err = invalidDataLength
return
}
return p.readStringBody(size)
}
func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
size, e := p.ReadI32()
if e != nil {
return nil, e
}
if size < 0 {
return nil, invalidDataLength
}
isize := int(size)
buf := make([]byte, isize)
_, err := io.ReadFull(p.trans, buf)
return buf, NewTProtocolException(err)
}
func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
return NewTProtocolException(p.trans.Flush(ctx))
}
func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
return SkipDefaultDepth(p, fieldType)
}
func (p *TBinaryProtocol) Transport() TTransport {
return p.origTransport
}
func (p *TBinaryProtocol) readAll(buf []byte) error {
_, err := io.ReadFull(p.trans, buf)
return NewTProtocolException(err)
}
const readLimit = 32768
func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
if size < 0 {
return "", nil
}
var (
buf bytes.Buffer
e error
b []byte
)
switch {
case int(size) <= len(p.buffer):
b = p.buffer[:size] // avoids allocation for small reads
case int(size) < readLimit:
b = make([]byte, size)
default:
b = make([]byte, readLimit)
}
for size > 0 {
_, e = io.ReadFull(p.trans, b)
buf.Write(b)
if e != nil {
break
}
size -= readLimit
if size < readLimit && size > 0 {
b = b[:size]
}
}
return buf.String(), NewTProtocolException(e)
}

View File

@ -1,270 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"log"
)
type TDebugProtocol struct {
Delegate TProtocol
LogPrefix string
}
type TDebugProtocolFactory struct {
Underlying TProtocolFactory
LogPrefix string
}
func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
return &TDebugProtocolFactory{
Underlying: underlying,
LogPrefix: logPrefix,
}
}
func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return &TDebugProtocol{
Delegate: t.Underlying.GetProtocol(trans),
LogPrefix: t.LogPrefix,
}
}
func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid)
log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
return err
}
func (tdp *TDebugProtocol) WriteMessageEnd() error {
err := tdp.Delegate.WriteMessageEnd()
log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteStructBegin(name string) error {
err := tdp.Delegate.WriteStructBegin(name)
log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
return err
}
func (tdp *TDebugProtocol) WriteStructEnd() error {
err := tdp.Delegate.WriteStructEnd()
log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
err := tdp.Delegate.WriteFieldBegin(name, typeId, id)
log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
return err
}
func (tdp *TDebugProtocol) WriteFieldEnd() error {
err := tdp.Delegate.WriteFieldEnd()
log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteFieldStop() error {
err := tdp.Delegate.WriteFieldStop()
log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
err := tdp.Delegate.WriteMapBegin(keyType, valueType, size)
log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
return err
}
func (tdp *TDebugProtocol) WriteMapEnd() error {
err := tdp.Delegate.WriteMapEnd()
log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error {
err := tdp.Delegate.WriteListBegin(elemType, size)
log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
return err
}
func (tdp *TDebugProtocol) WriteListEnd() error {
err := tdp.Delegate.WriteListEnd()
log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error {
err := tdp.Delegate.WriteSetBegin(elemType, size)
log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
return err
}
func (tdp *TDebugProtocol) WriteSetEnd() error {
err := tdp.Delegate.WriteSetEnd()
log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteBool(value bool) error {
err := tdp.Delegate.WriteBool(value)
log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteByte(value int8) error {
err := tdp.Delegate.WriteByte(value)
log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteI16(value int16) error {
err := tdp.Delegate.WriteI16(value)
log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteI32(value int32) error {
err := tdp.Delegate.WriteI32(value)
log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteI64(value int64) error {
err := tdp.Delegate.WriteI64(value)
log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteDouble(value float64) error {
err := tdp.Delegate.WriteDouble(value)
log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteString(value string) error {
err := tdp.Delegate.WriteString(value)
log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteBinary(value []byte) error {
err := tdp.Delegate.WriteBinary(value)
log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin()
log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
return
}
func (tdp *TDebugProtocol) ReadMessageEnd() (err error) {
err = tdp.Delegate.ReadMessageEnd()
log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) {
name, err = tdp.Delegate.ReadStructBegin()
log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
return
}
func (tdp *TDebugProtocol) ReadStructEnd() (err error) {
err = tdp.Delegate.ReadStructEnd()
log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
name, typeId, id, err = tdp.Delegate.ReadFieldBegin()
log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
return
}
func (tdp *TDebugProtocol) ReadFieldEnd() (err error) {
err = tdp.Delegate.ReadFieldEnd()
log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
keyType, valueType, size, err = tdp.Delegate.ReadMapBegin()
log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
return
}
func (tdp *TDebugProtocol) ReadMapEnd() (err error) {
err = tdp.Delegate.ReadMapEnd()
log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) {
elemType, size, err = tdp.Delegate.ReadListBegin()
log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
return
}
func (tdp *TDebugProtocol) ReadListEnd() (err error) {
err = tdp.Delegate.ReadListEnd()
log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) {
elemType, size, err = tdp.Delegate.ReadSetBegin()
log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
return
}
func (tdp *TDebugProtocol) ReadSetEnd() (err error) {
err = tdp.Delegate.ReadSetEnd()
log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadBool() (value bool, err error) {
value, err = tdp.Delegate.ReadBool()
log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadByte() (value int8, err error) {
value, err = tdp.Delegate.ReadByte()
log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadI16() (value int16, err error) {
value, err = tdp.Delegate.ReadI16()
log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadI32() (value int32, err error) {
value, err = tdp.Delegate.ReadI32()
log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadI64() (value int64, err error) {
value, err = tdp.Delegate.ReadI64()
log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) {
value, err = tdp.Delegate.ReadDouble()
log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadString() (value string, err error) {
value, err = tdp.Delegate.ReadString()
log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) {
value, err = tdp.Delegate.ReadBinary()
log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
err = tdp.Delegate.Skip(fieldType)
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
return
}
func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) {
err = tdp.Delegate.Flush(ctx)
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) Transport() TTransport {
return tdp.Delegate.Transport()
}

View File

@ -1,58 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
type TDeserializer struct {
Transport TTransport
Protocol TProtocol
}
func NewTDeserializer() *TDeserializer {
var transport TTransport
transport = NewTMemoryBufferLen(1024)
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
return &TDeserializer{
transport,
protocol}
}
func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) {
err = nil
if _, err = t.Transport.Write([]byte(s)); err != nil {
return
}
if err = msg.Read(t.Protocol); err != nil {
return
}
return
}
func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) {
err = nil
if _, err = t.Transport.Write(b); err != nil {
return
}
if err = msg.Read(t.Protocol); err != nil {
return
}
return
}

View File

@ -1,79 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// Helper class that encapsulates field metadata.
type field struct {
name string
typeId TType
id int
}
func newField(n string, t TType, i int) *field {
return &field{name: n, typeId: t, id: i}
}
func (p *field) Name() string {
if p == nil {
return ""
}
return p.name
}
func (p *field) TypeId() TType {
if p == nil {
return TType(VOID)
}
return p.typeId
}
func (p *field) Id() int {
if p == nil {
return -1
}
return p.id
}
func (p *field) String() string {
if p == nil {
return "<nil>"
}
return "<TField name:'" + p.name + "' type:" + string(p.typeId) + " field-id:" + string(p.id) + ">"
}
var ANONYMOUS_FIELD *field
type fieldSlice []field
func (p fieldSlice) Len() int {
return len(p)
}
func (p fieldSlice) Less(i, j int) bool {
return p[i].Id() < p[j].Id()
}
func (p fieldSlice) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func init() {
ANONYMOUS_FIELD = newField("", STOP, 0)
}

View File

@ -1,187 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
)
const DEFAULT_MAX_LENGTH = 16384000
type TFramedTransport struct {
transport TTransport
buf bytes.Buffer
reader *bufio.Reader
frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header
buffer [4]byte
maxLength uint32
}
type tFramedTransportFactory struct {
factory TTransportFactory
maxLength uint32
}
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
}
func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
}
func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
tt, err := p.factory.GetTransport(base)
if err != nil {
return nil, err
}
return NewTFramedTransportMaxLength(tt, p.maxLength), nil
}
func NewTFramedTransport(transport TTransport) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
}
func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
}
func (p *TFramedTransport) Open() error {
return p.transport.Open()
}
func (p *TFramedTransport) IsOpen() bool {
return p.transport.IsOpen()
}
func (p *TFramedTransport) Close() error {
return p.transport.Close()
}
func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < uint32(len(buf)) {
frameSize := p.frameSize
tmp := make([]byte, p.frameSize)
l, err = p.Read(tmp)
copy(buf, tmp)
if err == nil {
// Note: It's important to only return an error when l
// is zero.
// In io.Reader.Read interface, it's perfectly fine to
// return partial data and nil error, which means
// "This is all the data we have right now without
// blocking. If you need the full data, call Read again
// or use io.ReadFull instead".
// Returning partial data with an error actually means
// there's no more data after the partial data just
// returned, which is not true in this case
// (it might be that the other end just haven't written
// them yet).
if l == 0 {
err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf)))
}
return
}
}
got, err := p.reader.Read(buf)
p.frameSize = p.frameSize - uint32(got)
//sanity check
if p.frameSize < 0 {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
}
return got, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) ReadByte() (c byte, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < 1 {
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1))
}
c, err = p.reader.ReadByte()
if err == nil {
p.frameSize--
}
return
}
func (p *TFramedTransport) Write(buf []byte) (int, error) {
n, err := p.buf.Write(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) WriteByte(c byte) error {
return p.buf.WriteByte(c)
}
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
return p.buf.WriteString(s)
}
func (p *TFramedTransport) Flush(ctx context.Context) error {
size := p.buf.Len()
buf := p.buffer[:4]
binary.BigEndian.PutUint32(buf, uint32(size))
_, err := p.transport.Write(buf)
if err != nil {
p.buf.Truncate(0)
return NewTTransportExceptionFromError(err)
}
if size > 0 {
if n, err := p.buf.WriteTo(p.transport); err != nil {
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
p.buf.Truncate(0)
return NewTTransportExceptionFromError(err)
}
}
err = p.transport.Flush(ctx)
return NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) readFrameHeader() (uint32, error) {
buf := p.buffer[:4]
if _, err := io.ReadFull(p.reader, buf); err != nil {
return 0, err
}
size := binary.BigEndian.Uint32(buf)
if size < 0 || size > p.maxLength {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
}
return size, nil
}
func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
return uint64(p.frameSize)
}

View File

@ -1,305 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
)
// THeaderProtocol is a thrift protocol that implements THeader:
// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
//
// It supports either binary or compact protocol as the wrapped protocol.
//
// Most of the THeader handlings are happening inside THeaderTransport.
type THeaderProtocol struct {
transport *THeaderTransport
// Will be initialized on first read/write.
protocol TProtocol
}
// NewTHeaderProtocol creates a new THeaderProtocol from the underlying
// transport. The passed in transport will be wrapped with THeaderTransport.
//
// Note that THeaderTransport handles frame and zlib by itself,
// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
// instead of rich transports like TZlibTransport or TFramedTransport.
func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
t := NewTHeaderTransport(trans)
p, _ := THeaderProtocolDefault.GetProtocol(t)
return &THeaderProtocol{
transport: t,
protocol: p,
}
}
type tHeaderProtocolFactory struct{}
func (tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return NewTHeaderProtocol(trans)
}
// NewTHeaderProtocolFactory creates a factory for THeader.
//
// It's a wrapper for NewTHeaderProtocol
func NewTHeaderProtocolFactory() TProtocolFactory {
return tHeaderProtocolFactory{}
}
// Transport returns the underlying transport.
//
// It's guaranteed to be of type *THeaderTransport.
func (p *THeaderProtocol) Transport() TTransport {
return p.transport
}
// GetReadHeaders returns the THeaderMap read from transport.
func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
return p.transport.GetReadHeaders()
}
// SetWriteHeader sets a header for write.
func (p *THeaderProtocol) SetWriteHeader(key, value string) {
p.transport.SetWriteHeader(key, value)
}
// ClearWriteHeaders clears all write headers previously set.
func (p *THeaderProtocol) ClearWriteHeaders() {
p.transport.ClearWriteHeaders()
}
// AddTransform add a transform for writing.
func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
return p.transport.AddTransform(transform)
}
func (p *THeaderProtocol) Flush(ctx context.Context) error {
return p.transport.Flush(ctx)
}
func (p *THeaderProtocol) WriteMessageBegin(name string, typeID TMessageType, seqID int32) error {
newProto, err := p.transport.Protocol().GetProtocol(p.transport)
if err != nil {
return err
}
p.protocol = newProto
p.transport.SequenceID = seqID
return p.protocol.WriteMessageBegin(name, typeID, seqID)
}
func (p *THeaderProtocol) WriteMessageEnd() error {
if err := p.protocol.WriteMessageEnd(); err != nil {
return err
}
return p.transport.Flush(context.Background())
}
func (p *THeaderProtocol) WriteStructBegin(name string) error {
return p.protocol.WriteStructBegin(name)
}
func (p *THeaderProtocol) WriteStructEnd() error {
return p.protocol.WriteStructEnd()
}
func (p *THeaderProtocol) WriteFieldBegin(name string, typeID TType, id int16) error {
return p.protocol.WriteFieldBegin(name, typeID, id)
}
func (p *THeaderProtocol) WriteFieldEnd() error {
return p.protocol.WriteFieldEnd()
}
func (p *THeaderProtocol) WriteFieldStop() error {
return p.protocol.WriteFieldStop()
}
func (p *THeaderProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
return p.protocol.WriteMapBegin(keyType, valueType, size)
}
func (p *THeaderProtocol) WriteMapEnd() error {
return p.protocol.WriteMapEnd()
}
func (p *THeaderProtocol) WriteListBegin(elemType TType, size int) error {
return p.protocol.WriteListBegin(elemType, size)
}
func (p *THeaderProtocol) WriteListEnd() error {
return p.protocol.WriteListEnd()
}
func (p *THeaderProtocol) WriteSetBegin(elemType TType, size int) error {
return p.protocol.WriteSetBegin(elemType, size)
}
func (p *THeaderProtocol) WriteSetEnd() error {
return p.protocol.WriteSetEnd()
}
func (p *THeaderProtocol) WriteBool(value bool) error {
return p.protocol.WriteBool(value)
}
func (p *THeaderProtocol) WriteByte(value int8) error {
return p.protocol.WriteByte(value)
}
func (p *THeaderProtocol) WriteI16(value int16) error {
return p.protocol.WriteI16(value)
}
func (p *THeaderProtocol) WriteI32(value int32) error {
return p.protocol.WriteI32(value)
}
func (p *THeaderProtocol) WriteI64(value int64) error {
return p.protocol.WriteI64(value)
}
func (p *THeaderProtocol) WriteDouble(value float64) error {
return p.protocol.WriteDouble(value)
}
func (p *THeaderProtocol) WriteString(value string) error {
return p.protocol.WriteString(value)
}
func (p *THeaderProtocol) WriteBinary(value []byte) error {
return p.protocol.WriteBinary(value)
}
// ReadFrame calls underlying THeaderTransport's ReadFrame function.
func (p *THeaderProtocol) ReadFrame() error {
return p.transport.ReadFrame()
}
func (p *THeaderProtocol) ReadMessageBegin() (name string, typeID TMessageType, seqID int32, err error) {
if err = p.transport.ReadFrame(); err != nil {
return
}
var newProto TProtocol
newProto, err = p.transport.Protocol().GetProtocol(p.transport)
if err != nil {
tAppExc, ok := err.(TApplicationException)
if !ok {
return
}
if e := p.protocol.WriteMessageBegin("", EXCEPTION, seqID); e != nil {
return
}
if e := tAppExc.Write(p.protocol); e != nil {
return
}
if e := p.protocol.WriteMessageEnd(); e != nil {
return
}
if e := p.transport.Flush(context.Background()); e != nil {
return
}
return
}
p.protocol = newProto
return p.protocol.ReadMessageBegin()
}
func (p *THeaderProtocol) ReadMessageEnd() error {
return p.protocol.ReadMessageEnd()
}
func (p *THeaderProtocol) ReadStructBegin() (name string, err error) {
return p.protocol.ReadStructBegin()
}
func (p *THeaderProtocol) ReadStructEnd() error {
return p.protocol.ReadStructEnd()
}
func (p *THeaderProtocol) ReadFieldBegin() (name string, typeID TType, id int16, err error) {
return p.protocol.ReadFieldBegin()
}
func (p *THeaderProtocol) ReadFieldEnd() error {
return p.protocol.ReadFieldEnd()
}
func (p *THeaderProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
return p.protocol.ReadMapBegin()
}
func (p *THeaderProtocol) ReadMapEnd() error {
return p.protocol.ReadMapEnd()
}
func (p *THeaderProtocol) ReadListBegin() (elemType TType, size int, err error) {
return p.protocol.ReadListBegin()
}
func (p *THeaderProtocol) ReadListEnd() error {
return p.protocol.ReadListEnd()
}
func (p *THeaderProtocol) ReadSetBegin() (elemType TType, size int, err error) {
return p.protocol.ReadSetBegin()
}
func (p *THeaderProtocol) ReadSetEnd() error {
return p.protocol.ReadSetEnd()
}
func (p *THeaderProtocol) ReadBool() (value bool, err error) {
return p.protocol.ReadBool()
}
func (p *THeaderProtocol) ReadByte() (value int8, err error) {
return p.protocol.ReadByte()
}
func (p *THeaderProtocol) ReadI16() (value int16, err error) {
return p.protocol.ReadI16()
}
func (p *THeaderProtocol) ReadI32() (value int32, err error) {
return p.protocol.ReadI32()
}
func (p *THeaderProtocol) ReadI64() (value int64, err error) {
return p.protocol.ReadI64()
}
func (p *THeaderProtocol) ReadDouble() (value float64, err error) {
return p.protocol.ReadDouble()
}
func (p *THeaderProtocol) ReadString() (value string, err error) {
return p.protocol.ReadString()
}
func (p *THeaderProtocol) ReadBinary() (value []byte, err error) {
return p.protocol.ReadBinary()
}
func (p *THeaderProtocol) Skip(fieldType TType) error {
return p.protocol.Skip(fieldType)
}

View File

@ -1,177 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"errors"
"fmt"
)
const (
VERSION_MASK = 0xffff0000
VERSION_1 = 0x80010000
)
type TProtocol interface {
WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
WriteMessageEnd() error
WriteStructBegin(name string) error
WriteStructEnd() error
WriteFieldBegin(name string, typeId TType, id int16) error
WriteFieldEnd() error
WriteFieldStop() error
WriteMapBegin(keyType TType, valueType TType, size int) error
WriteMapEnd() error
WriteListBegin(elemType TType, size int) error
WriteListEnd() error
WriteSetBegin(elemType TType, size int) error
WriteSetEnd() error
WriteBool(value bool) error
WriteByte(value int8) error
WriteI16(value int16) error
WriteI32(value int32) error
WriteI64(value int64) error
WriteDouble(value float64) error
WriteString(value string) error
WriteBinary(value []byte) error
ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
ReadMessageEnd() error
ReadStructBegin() (name string, err error)
ReadStructEnd() error
ReadFieldBegin() (name string, typeId TType, id int16, err error)
ReadFieldEnd() error
ReadMapBegin() (keyType TType, valueType TType, size int, err error)
ReadMapEnd() error
ReadListBegin() (elemType TType, size int, err error)
ReadListEnd() error
ReadSetBegin() (elemType TType, size int, err error)
ReadSetEnd() error
ReadBool() (value bool, err error)
ReadByte() (value int8, err error)
ReadI16() (value int16, err error)
ReadI32() (value int32, err error)
ReadI64() (value int64, err error)
ReadDouble() (value float64, err error)
ReadString() (value string, err error)
ReadBinary() (value []byte, err error)
Skip(fieldType TType) (err error)
Flush(ctx context.Context) (err error)
Transport() TTransport
}
// The maximum recursive depth the skip() function will traverse
const DEFAULT_RECURSION_DEPTH = 64
// Skips over the next data element from the provided input TProtocol object.
func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
}
// Skips over the next data element from the provided input TProtocol object.
func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
if maxDepth <= 0 {
return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
}
switch fieldType {
case BOOL:
_, err = self.ReadBool()
return
case BYTE:
_, err = self.ReadByte()
return
case I16:
_, err = self.ReadI16()
return
case I32:
_, err = self.ReadI32()
return
case I64:
_, err = self.ReadI64()
return
case DOUBLE:
_, err = self.ReadDouble()
return
case STRING:
_, err = self.ReadString()
return
case STRUCT:
if _, err = self.ReadStructBegin(); err != nil {
return err
}
for {
_, typeId, _, _ := self.ReadFieldBegin()
if typeId == STOP {
break
}
err := Skip(self, typeId, maxDepth-1)
if err != nil {
return err
}
self.ReadFieldEnd()
}
return self.ReadStructEnd()
case MAP:
keyType, valueType, size, err := self.ReadMapBegin()
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(self, keyType, maxDepth-1)
if err != nil {
return err
}
self.Skip(valueType)
}
return self.ReadMapEnd()
case SET:
elemType, size, err := self.ReadSetBegin()
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(self, elemType, maxDepth-1)
if err != nil {
return err
}
}
return self.ReadSetEnd()
case LIST:
elemType, size, err := self.ReadListBegin()
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(self, elemType, maxDepth-1)
if err != nil {
return err
}
}
return self.ReadListEnd()
default:
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
}
return nil
}

View File

@ -1,79 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
)
type TSerializer struct {
Transport *TMemoryBuffer
Protocol TProtocol
}
type TStruct interface {
Write(p TProtocol) error
Read(p TProtocol) error
}
func NewTSerializer() *TSerializer {
transport := NewTMemoryBufferLen(1024)
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
return &TSerializer{
transport,
protocol}
}
func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
t.Transport.Reset()
if err = msg.Write(t.Protocol); err != nil {
return
}
if err = t.Protocol.Flush(ctx); err != nil {
return
}
if err = t.Transport.Flush(ctx); err != nil {
return
}
return t.Transport.String(), nil
}
func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
t.Transport.Reset()
if err = msg.Write(t.Protocol); err != nil {
return
}
if err = t.Protocol.Flush(ctx); err != nil {
return
}
if err = t.Transport.Flush(ctx); err != nil {
return
}
b = append(b, t.Transport.Bytes()...)
return
}

View File

@ -1,166 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"net"
"time"
)
type TSocket struct {
conn net.Conn
addr net.Addr
timeout time.Duration
}
// NewTSocket creates a net.Conn-backed TTransport, given a host and port
//
// Example:
// trans, err := thrift.NewTSocket("localhost:9090")
func NewTSocket(hostPort string) (*TSocket, error) {
return NewTSocketTimeout(hostPort, 0)
}
// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port
// it also accepts a timeout as a time.Duration
func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) {
//conn, err := net.DialTimeout(network, address, timeout)
addr, err := net.ResolveTCPAddr("tcp", hostPort)
if err != nil {
return nil, err
}
return NewTSocketFromAddrTimeout(addr, timeout), nil
}
// Creates a TSocket from a net.Addr
func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket {
return &TSocket{addr: addr, timeout: timeout}
}
// Creates a TSocket from an existing net.Conn
func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket {
return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout}
}
// Sets the socket timeout
func (p *TSocket) SetTimeout(timeout time.Duration) error {
p.timeout = timeout
return nil
}
func (p *TSocket) pushDeadline(read, write bool) {
var t time.Time
if p.timeout > 0 {
t = time.Now().Add(time.Duration(p.timeout))
}
if read && write {
p.conn.SetDeadline(t)
} else if read {
p.conn.SetReadDeadline(t)
} else if write {
p.conn.SetWriteDeadline(t)
}
}
// Connects the socket, creating a new socket object if necessary.
func (p *TSocket) Open() error {
if p.IsOpen() {
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
}
if p.addr == nil {
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
}
if len(p.addr.Network()) == 0 {
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
}
if len(p.addr.String()) == 0 {
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
}
var err error
if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil {
return NewTTransportException(NOT_OPEN, err.Error())
}
return nil
}
// Retrieve the underlying net.Conn
func (p *TSocket) Conn() net.Conn {
return p.conn
}
// Returns true if the connection is open
func (p *TSocket) IsOpen() bool {
if p.conn == nil {
return false
}
return true
}
// Closes the socket.
func (p *TSocket) Close() error {
// Close the socket
if p.conn != nil {
err := p.conn.Close()
if err != nil {
return err
}
p.conn = nil
}
return nil
}
//Returns the remote address of the socket.
func (p *TSocket) Addr() net.Addr {
return p.addr
}
func (p *TSocket) Read(buf []byte) (int, error) {
if !p.IsOpen() {
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
}
p.pushDeadline(true, false)
n, err := p.conn.Read(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TSocket) Write(buf []byte) (int, error) {
if !p.IsOpen() {
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
}
p.pushDeadline(false, true)
return p.conn.Write(buf)
}
func (p *TSocket) Flush(ctx context.Context) error {
return nil
}
func (p *TSocket) Interrupt() error {
if !p.IsOpen() {
return nil
}
return p.conn.Close()
}
func (p *TSocket) RemainingBytes() (num_bytes uint64) {
const maxSize = ^uint64(0)
return maxSize // the truth is, we just don't know unless framed is used
}

View File

@ -1,176 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"crypto/tls"
"net"
"time"
)
type TSSLSocket struct {
conn net.Conn
// hostPort contains host:port (e.g. "asdf.com:12345"). The field is
// only valid if addr is nil.
hostPort string
// addr is nil when hostPort is not "", and is only used when the
// TSSLSocket is constructed from a net.Addr.
addr net.Addr
timeout time.Duration
cfg *tls.Config
}
// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration
//
// Example:
// trans, err := thrift.NewTSSLSocket("localhost:9090", nil)
func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
return NewTSSLSocketTimeout(hostPort, cfg, 0)
}
// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port
// it also accepts a tls Configuration and a timeout as a time.Duration
func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) {
if cfg.MinVersion == 0 {
cfg.MinVersion = tls.VersionTLS10
}
return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil
}
// Creates a TSSLSocket from a net.Addr
func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg}
}
// Creates a TSSLSocket from an existing net.Conn
func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg}
}
// Sets the socket timeout
func (p *TSSLSocket) SetTimeout(timeout time.Duration) error {
p.timeout = timeout
return nil
}
func (p *TSSLSocket) pushDeadline(read, write bool) {
var t time.Time
if p.timeout > 0 {
t = time.Now().Add(time.Duration(p.timeout))
}
if read && write {
p.conn.SetDeadline(t)
} else if read {
p.conn.SetReadDeadline(t)
} else if write {
p.conn.SetWriteDeadline(t)
}
}
// Connects the socket, creating a new socket object if necessary.
func (p *TSSLSocket) Open() error {
var err error
// If we have a hostname, we need to pass the hostname to tls.Dial for
// certificate hostname checks.
if p.hostPort != "" {
if p.conn, err = tls.DialWithDialer(&net.Dialer{
Timeout: p.timeout}, "tcp", p.hostPort, p.cfg); err != nil {
return NewTTransportException(NOT_OPEN, err.Error())
}
} else {
if p.IsOpen() {
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
}
if p.addr == nil {
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
}
if len(p.addr.Network()) == 0 {
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
}
if len(p.addr.String()) == 0 {
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
}
if p.conn, err = tls.DialWithDialer(&net.Dialer{
Timeout: p.timeout}, p.addr.Network(), p.addr.String(), p.cfg); err != nil {
return NewTTransportException(NOT_OPEN, err.Error())
}
}
return nil
}
// Retrieve the underlying net.Conn
func (p *TSSLSocket) Conn() net.Conn {
return p.conn
}
// Returns true if the connection is open
func (p *TSSLSocket) IsOpen() bool {
if p.conn == nil {
return false
}
return true
}
// Closes the socket.
func (p *TSSLSocket) Close() error {
// Close the socket
if p.conn != nil {
err := p.conn.Close()
if err != nil {
return err
}
p.conn = nil
}
return nil
}
func (p *TSSLSocket) Read(buf []byte) (int, error) {
if !p.IsOpen() {
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
}
p.pushDeadline(true, false)
n, err := p.conn.Read(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TSSLSocket) Write(buf []byte) (int, error) {
if !p.IsOpen() {
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
}
p.pushDeadline(false, true)
return p.conn.Write(buf)
}
func (p *TSSLSocket) Flush(ctx context.Context) error {
return nil
}
func (p *TSSLSocket) Interrupt() error {
if !p.IsOpen() {
return nil
}
return p.conn.Close()
}
func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
}

View File

@ -13,12 +13,42 @@ const (
compareGreater
)
var (
intType = reflect.TypeOf(int(1))
int8Type = reflect.TypeOf(int8(1))
int16Type = reflect.TypeOf(int16(1))
int32Type = reflect.TypeOf(int32(1))
int64Type = reflect.TypeOf(int64(1))
uintType = reflect.TypeOf(uint(1))
uint8Type = reflect.TypeOf(uint8(1))
uint16Type = reflect.TypeOf(uint16(1))
uint32Type = reflect.TypeOf(uint32(1))
uint64Type = reflect.TypeOf(uint64(1))
float32Type = reflect.TypeOf(float32(1))
float64Type = reflect.TypeOf(float64(1))
stringType = reflect.TypeOf("")
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
obj1Value := reflect.ValueOf(obj1)
obj2Value := reflect.ValueOf(obj2)
// throughout this switch we try and avoid calling .Convert() if possible,
// as this has a pretty big performance impact
switch kind {
case reflect.Int:
{
intobj1 := obj1.(int)
intobj2 := obj2.(int)
intobj1, ok := obj1.(int)
if !ok {
intobj1 = obj1Value.Convert(intType).Interface().(int)
}
intobj2, ok := obj2.(int)
if !ok {
intobj2 = obj2Value.Convert(intType).Interface().(int)
}
if intobj1 > intobj2 {
return compareGreater, true
}
@ -31,8 +61,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Int8:
{
int8obj1 := obj1.(int8)
int8obj2 := obj2.(int8)
int8obj1, ok := obj1.(int8)
if !ok {
int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
}
int8obj2, ok := obj2.(int8)
if !ok {
int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
}
if int8obj1 > int8obj2 {
return compareGreater, true
}
@ -45,8 +81,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Int16:
{
int16obj1 := obj1.(int16)
int16obj2 := obj2.(int16)
int16obj1, ok := obj1.(int16)
if !ok {
int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
}
int16obj2, ok := obj2.(int16)
if !ok {
int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
}
if int16obj1 > int16obj2 {
return compareGreater, true
}
@ -59,8 +101,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Int32:
{
int32obj1 := obj1.(int32)
int32obj2 := obj2.(int32)
int32obj1, ok := obj1.(int32)
if !ok {
int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
}
int32obj2, ok := obj2.(int32)
if !ok {
int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
}
if int32obj1 > int32obj2 {
return compareGreater, true
}
@ -73,8 +121,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Int64:
{
int64obj1 := obj1.(int64)
int64obj2 := obj2.(int64)
int64obj1, ok := obj1.(int64)
if !ok {
int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
}
int64obj2, ok := obj2.(int64)
if !ok {
int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
}
if int64obj1 > int64obj2 {
return compareGreater, true
}
@ -87,8 +141,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint:
{
uintobj1 := obj1.(uint)
uintobj2 := obj2.(uint)
uintobj1, ok := obj1.(uint)
if !ok {
uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
}
uintobj2, ok := obj2.(uint)
if !ok {
uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
}
if uintobj1 > uintobj2 {
return compareGreater, true
}
@ -101,8 +161,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint8:
{
uint8obj1 := obj1.(uint8)
uint8obj2 := obj2.(uint8)
uint8obj1, ok := obj1.(uint8)
if !ok {
uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
}
uint8obj2, ok := obj2.(uint8)
if !ok {
uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
}
if uint8obj1 > uint8obj2 {
return compareGreater, true
}
@ -115,8 +181,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint16:
{
uint16obj1 := obj1.(uint16)
uint16obj2 := obj2.(uint16)
uint16obj1, ok := obj1.(uint16)
if !ok {
uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
}
uint16obj2, ok := obj2.(uint16)
if !ok {
uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
}
if uint16obj1 > uint16obj2 {
return compareGreater, true
}
@ -129,8 +201,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint32:
{
uint32obj1 := obj1.(uint32)
uint32obj2 := obj2.(uint32)
uint32obj1, ok := obj1.(uint32)
if !ok {
uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
}
uint32obj2, ok := obj2.(uint32)
if !ok {
uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
}
if uint32obj1 > uint32obj2 {
return compareGreater, true
}
@ -143,8 +221,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint64:
{
uint64obj1 := obj1.(uint64)
uint64obj2 := obj2.(uint64)
uint64obj1, ok := obj1.(uint64)
if !ok {
uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
}
uint64obj2, ok := obj2.(uint64)
if !ok {
uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
}
if uint64obj1 > uint64obj2 {
return compareGreater, true
}
@ -157,8 +241,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Float32:
{
float32obj1 := obj1.(float32)
float32obj2 := obj2.(float32)
float32obj1, ok := obj1.(float32)
if !ok {
float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
}
float32obj2, ok := obj2.(float32)
if !ok {
float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
}
if float32obj1 > float32obj2 {
return compareGreater, true
}
@ -171,8 +261,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Float64:
{
float64obj1 := obj1.(float64)
float64obj2 := obj2.(float64)
float64obj1, ok := obj1.(float64)
if !ok {
float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
}
float64obj2, ok := obj2.(float64)
if !ok {
float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
}
if float64obj1 > float64obj2 {
return compareGreater, true
}
@ -185,8 +281,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.String:
{
stringobj1 := obj1.(string)
stringobj2 := obj2.(string)
stringobj1, ok := obj1.(string)
if !ok {
stringobj1 = obj1Value.Convert(stringType).Interface().(string)
}
stringobj2, ok := obj2.(string)
if !ok {
stringobj2 = obj2Value.Convert(stringType).Interface().(string)
}
if stringobj1 > stringobj2 {
return compareGreater, true
}
@ -240,6 +342,24 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
}
// Positive asserts that the specified element is positive
//
// assert.Positive(t, 1)
// assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
}
// Negative asserts that the specified element is negative
//
// assert.Negative(t, -1)
// assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()

View File

@ -114,6 +114,24 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
return Error(t, err, append([]interface{}{msg}, args...)...)
}
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
}
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
}
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
@ -321,6 +339,54 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil
return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
}
// IsDecreasingf asserts that the collection is decreasing
//
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsIncreasingf asserts that the collection is increasing
//
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsNonDecreasingf asserts that the collection is not decreasing
//
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsNonIncreasingf asserts that the collection is not increasing
//
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsTypef asserts that the specified objects are of the same type.
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@ -375,6 +441,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// Negativef asserts that the specified element is negative
//
// assert.Negativef(t, -1, "error message %s", "formatted")
// assert.Negativef(t, -1.23, "error message %s", "formatted")
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Negative(t, e, append([]interface{}{msg}, args...)...)
}
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@ -476,6 +553,15 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
}
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
@ -572,6 +658,17 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str
return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
}
// Positivef asserts that the specified element is positive
//
// assert.Positivef(t, 1, "error message %s", "formatted")
// assert.Positivef(t, 1.23, "error message %s", "formatted")
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Positive(t, e, append([]interface{}{msg}, args...)...)
}
// Regexpf asserts that a specified regexp matches a string.
//
// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")

View File

@ -204,6 +204,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...)
}
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorAs(a.t, err, target, msgAndArgs...)
}
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorAsf(a.t, err, target, msg, args...)
}
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorIs(a.t, err, target, msgAndArgs...)
}
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorIsf(a.t, err, target, msg, args...)
}
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
@ -631,6 +667,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo
return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
}
// IsDecreasing asserts that the collection is decreasing
//
// a.IsDecreasing([]int{2, 1, 0})
// a.IsDecreasing([]float{2, 1})
// a.IsDecreasing([]string{"b", "a"})
func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsDecreasing(a.t, object, msgAndArgs...)
}
// IsDecreasingf asserts that the collection is decreasing
//
// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsDecreasingf(a.t, object, msg, args...)
}
// IsIncreasing asserts that the collection is increasing
//
// a.IsIncreasing([]int{1, 2, 3})
// a.IsIncreasing([]float{1, 2})
// a.IsIncreasing([]string{"a", "b"})
func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsIncreasing(a.t, object, msgAndArgs...)
}
// IsIncreasingf asserts that the collection is increasing
//
// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsIncreasingf(a.t, object, msg, args...)
}
// IsNonDecreasing asserts that the collection is not decreasing
//
// a.IsNonDecreasing([]int{1, 1, 2})
// a.IsNonDecreasing([]float{1, 2})
// a.IsNonDecreasing([]string{"a", "b"})
func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNonDecreasing(a.t, object, msgAndArgs...)
}
// IsNonDecreasingf asserts that the collection is not decreasing
//
// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNonDecreasingf(a.t, object, msg, args...)
}
// IsNonIncreasing asserts that the collection is not increasing
//
// a.IsNonIncreasing([]int{2, 1, 1})
// a.IsNonIncreasing([]float{2, 1})
// a.IsNonIncreasing([]string{"b", "a"})
func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNonIncreasing(a.t, object, msgAndArgs...)
}
// IsNonIncreasingf asserts that the collection is not increasing
//
// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNonIncreasingf(a.t, object, msg, args...)
}
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@ -739,6 +871,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
return Lessf(a.t, e1, e2, msg, args...)
}
// Negative asserts that the specified element is negative
//
// a.Negative(-1)
// a.Negative(-1.23)
func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Negative(a.t, e, msgAndArgs...)
}
// Negativef asserts that the specified element is negative
//
// a.Negativef(-1, "error message %s", "formatted")
// a.Negativef(-1.23, "error message %s", "formatted")
func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Negativef(a.t, e, msg, args...)
}
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@ -941,6 +1095,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
return NotEqualf(a.t, expected, actual, msg, args...)
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotErrorIs(a.t, err, target, msgAndArgs...)
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotErrorIsf(a.t, err, target, msg, args...)
}
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
@ -1133,6 +1305,28 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b
return Panicsf(a.t, f, msg, args...)
}
// Positive asserts that the specified element is positive
//
// a.Positive(1)
// a.Positive(1.23)
func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Positive(a.t, e, msgAndArgs...)
}
// Positivef asserts that the specified element is positive
//
// a.Positivef(1, "error message %s", "formatted")
// a.Positivef(1.23, "error message %s", "formatted")
func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Positivef(a.t, e, msg, args...)
}
// Regexp asserts that a specified regexp matches a string.
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")

View File

@ -0,0 +1,81 @@
package assert
import (
"fmt"
"reflect"
)
// isOrdered checks that collection contains orderable elements.
func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
objKind := reflect.TypeOf(object).Kind()
if objKind != reflect.Slice && objKind != reflect.Array {
return false
}
objValue := reflect.ValueOf(object)
objLen := objValue.Len()
if objLen <= 1 {
return true
}
value := objValue.Index(0)
valueInterface := value.Interface()
firstValueKind := value.Kind()
for i := 1; i < objLen; i++ {
prevValue := value
prevValueInterface := valueInterface
value = objValue.Index(i)
valueInterface = value.Interface()
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
}
}
return true
}
// IsIncreasing asserts that the collection is increasing
//
// assert.IsIncreasing(t, []int{1, 2, 3})
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
}
// IsNonIncreasing asserts that the collection is not increasing
//
// assert.IsNonIncreasing(t, []int{2, 1, 1})
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
}
// IsDecreasing asserts that the collection is decreasing
//
// assert.IsDecreasing(t, []int{2, 1, 0})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
}
// IsNonDecreasing asserts that the collection is not decreasing
//
// assert.IsNonDecreasing(t, []int{1, 1, 2})
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
}

View File

@ -172,8 +172,8 @@ func isTest(name, prefix string) bool {
if len(name) == len(prefix) { // "Test" is ok
return true
}
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(rune)
r, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(r)
}
func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
@ -1622,6 +1622,7 @@ var spewConfig = spew.ConfigState{
DisableCapacities: true,
SortKeys: true,
DisableMethods: true,
MaxDepth: 10,
}
type tHelper interface {
@ -1693,3 +1694,81 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
}
}
}
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if errors.Is(err, target) {
return true
}
var expectedText string
if target != nil {
expectedText = target.Error()
}
chain := buildErrorChainString(err)
return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
"expected: %q\n"+
"in chain: %s", expectedText, chain,
), msgAndArgs...)
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if !errors.Is(err, target) {
return true
}
var expectedText string
if target != nil {
expectedText = target.Error()
}
chain := buildErrorChainString(err)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+
"in chain: %s", expectedText, chain,
), msgAndArgs...)
}
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if errors.As(err, target) {
return true
}
chain := buildErrorChainString(err)
return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
"expected: %q\n"+
"in chain: %s", target, chain,
), msgAndArgs...)
}
func buildErrorChainString(err error) string {
if err == nil {
return ""
}
e := errors.Unwrap(err)
chain := fmt.Sprintf("%q", err.Error())
for e != nil {
chain += fmt.Sprintf("\n\t%q", e.Error())
e = errors.Unwrap(e)
}
return chain
}

View File

@ -1,17 +0,0 @@
language: go
go_import_path: go.opencensus.io
go:
- 1.11.x
env:
global:
GO111MODULE=on
before_script:
- make install-tools
script:
- make travis-ci
- go run internal/check/version.go # TODO move this to makefile

View File

@ -1,15 +1,12 @@
module go.opencensus.io
require (
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6
github.com/golang/protobuf v1.3.1
github.com/google/go-cmp v0.3.0
github.com/stretchr/testify v1.4.0
golang.org/x/net v0.0.0-20190620200207-3b0461eec859
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect
golang.org/x/text v0.3.2 // indirect
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect
google.golang.org/grpc v1.20.1
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
github.com/golang/protobuf v1.4.3
github.com/google/go-cmp v0.5.3
github.com/stretchr/testify v1.6.1
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
google.golang.org/grpc v1.33.2
)
go 1.13

View File

@ -1,25 +1,47 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -30,24 +52,25 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd h1:r7DufRZuZbWB7j439YfAzP8RPDa9unLkpwQKUYbIMPI=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -55,20 +78,39 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -49,6 +49,16 @@ type Attribute struct {
value interface{}
}
// Key returns the attribute's key
func (a *Attribute) Key() string {
return a.key
}
// Value returns the attribute's value
func (a *Attribute) Value() interface{} {
return a.value
}
// BoolAttribute returns a bool-valued attribute.
func BoolAttribute(key string, value bool) Attribute {
return Attribute{key: key, value: value}

View File

@ -48,8 +48,10 @@ func (i internalOnly) ReportActiveSpans(name string) []*SpanData {
var out []*SpanData
s.mu.Lock()
defer s.mu.Unlock()
for span := range s.active {
out = append(out, span.makeSpanData())
for activeSpan := range s.active {
if s, ok := activeSpan.(*span); ok {
out = append(out, s.makeSpanData())
}
}
return out
}
@ -185,7 +187,7 @@ func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency t
// bucketed by latency.
type spanStore struct {
mu sync.Mutex // protects everything below.
active map[*Span]struct{}
active map[SpanInterface]struct{}
errors map[int32]*bucket
latency []bucket
maxSpansPerErrorBucket int
@ -194,7 +196,7 @@ type spanStore struct {
// newSpanStore creates a span store.
func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore {
s := &spanStore{
active: make(map[*Span]struct{}),
active: make(map[SpanInterface]struct{}),
latency: make([]bucket, len(defaultLatencies)+1),
maxSpansPerErrorBucket: errorBucketSize,
}
@ -271,7 +273,7 @@ func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) {
}
// add adds a span to the active bucket of the spanStore.
func (s *spanStore) add(span *Span) {
func (s *spanStore) add(span SpanInterface) {
s.mu.Lock()
s.active[span] = struct{}{}
s.mu.Unlock()
@ -279,7 +281,7 @@ func (s *spanStore) add(span *Span) {
// finished removes a span from the active set, and adds a corresponding
// SpanData to a latency or error bucket.
func (s *spanStore) finished(span *Span, sd *SpanData) {
func (s *spanStore) finished(span SpanInterface, sd *SpanData) {
latency := sd.EndTime.Sub(sd.StartTime)
if latency < 0 {
latency = 0

View File

@ -28,12 +28,16 @@ import (
"go.opencensus.io/trace/tracestate"
)
type tracer struct{}
var _ Tracer = &tracer{}
// Span represents a span of a trace. It has an associated SpanContext, and
// stores data accumulated while the span is active.
//
// Ideally users should interact with Spans by calling the functions in this
// package that take a Context parameter.
type Span struct {
type span struct {
// data contains information recorded about the span.
//
// It will be non-nil if we are exporting the span or recording events for it.
@ -66,7 +70,7 @@ type Span struct {
// IsRecordingEvents returns true if events are being recorded for this span.
// Use this check to avoid computing expensive annotations when they will never
// be used.
func (s *Span) IsRecordingEvents() bool {
func (s *span) IsRecordingEvents() bool {
if s == nil {
return false
}
@ -109,13 +113,13 @@ type SpanContext struct {
type contextKey struct{}
// FromContext returns the Span stored in a context, or nil if there isn't one.
func FromContext(ctx context.Context) *Span {
func (t *tracer) FromContext(ctx context.Context) *Span {
s, _ := ctx.Value(contextKey{}).(*Span)
return s
}
// NewContext returns a new context with the given Span attached.
func NewContext(parent context.Context, s *Span) context.Context {
func (t *tracer) NewContext(parent context.Context, s *Span) context.Context {
return context.WithValue(parent, contextKey{}, s)
}
@ -166,12 +170,14 @@ func WithSampler(sampler Sampler) StartOption {
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
var opts StartOptions
var parent SpanContext
if p := FromContext(ctx); p != nil {
p.addChild()
parent = p.spanContext
if p := t.FromContext(ctx); p != nil {
if ps, ok := p.internal.(*span); ok {
ps.addChild()
}
parent = p.SpanContext()
}
for _, op := range o {
op(&opts)
@ -180,7 +186,8 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont
ctx, end := startExecutionTracerTask(ctx, name)
span.executionTracerTaskEnd = end
return NewContext(ctx, span), span
extSpan := NewSpan(span)
return t.NewContext(ctx, extSpan), extSpan
}
// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
@ -190,7 +197,7 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
var opts StartOptions
for _, op := range o {
op(&opts)
@ -198,19 +205,24 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont
span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts)
ctx, end := startExecutionTracerTask(ctx, name)
span.executionTracerTaskEnd = end
return NewContext(ctx, span), span
extSpan := NewSpan(span)
return t.NewContext(ctx, extSpan), extSpan
}
func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span {
span := &Span{}
span.spanContext = parent
func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span {
s := &span{}
s.spanContext = parent
cfg := config.Load().(*Config)
if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok {
// lazy initialization
gen.init()
}
if !hasParent {
span.spanContext.TraceID = cfg.IDGenerator.NewTraceID()
s.spanContext.TraceID = cfg.IDGenerator.NewTraceID()
}
span.spanContext.SpanID = cfg.IDGenerator.NewSpanID()
s.spanContext.SpanID = cfg.IDGenerator.NewSpanID()
sampler := cfg.DefaultSampler
if !hasParent || remoteParent || o.Sampler != nil {
@ -222,47 +234,47 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa
if o.Sampler != nil {
sampler = o.Sampler
}
span.spanContext.setIsSampled(sampler(SamplingParameters{
s.spanContext.setIsSampled(sampler(SamplingParameters{
ParentContext: parent,
TraceID: span.spanContext.TraceID,
SpanID: span.spanContext.SpanID,
TraceID: s.spanContext.TraceID,
SpanID: s.spanContext.SpanID,
Name: name,
HasRemoteParent: remoteParent}).Sample)
}
if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() {
return span
if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() {
return s
}
span.data = &SpanData{
SpanContext: span.spanContext,
s.data = &SpanData{
SpanContext: s.spanContext,
StartTime: time.Now(),
SpanKind: o.SpanKind,
Name: name,
HasRemoteParent: remoteParent,
}
span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan)
span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan)
span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan)
span.links = newEvictedQueue(cfg.MaxLinksPerSpan)
s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan)
s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan)
s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan)
s.links = newEvictedQueue(cfg.MaxLinksPerSpan)
if hasParent {
span.data.ParentSpanID = parent.SpanID
s.data.ParentSpanID = parent.SpanID
}
if internal.LocalSpanStoreEnabled {
var ss *spanStore
ss = spanStoreForNameCreateIfNew(name)
if ss != nil {
span.spanStore = ss
ss.add(span)
s.spanStore = ss
ss.add(s)
}
}
return span
return s
}
// End ends the span.
func (s *Span) End() {
func (s *span) End() {
if s == nil {
return
}
@ -292,7 +304,7 @@ func (s *Span) End() {
// makeSpanData produces a SpanData representing the current state of the Span.
// It requires that s.data is non-nil.
func (s *Span) makeSpanData() *SpanData {
func (s *span) makeSpanData() *SpanData {
var sd SpanData
s.mu.Lock()
sd = *s.data
@ -317,7 +329,7 @@ func (s *Span) makeSpanData() *SpanData {
}
// SpanContext returns the SpanContext of the span.
func (s *Span) SpanContext() SpanContext {
func (s *span) SpanContext() SpanContext {
if s == nil {
return SpanContext{}
}
@ -325,7 +337,7 @@ func (s *Span) SpanContext() SpanContext {
}
// SetName sets the name of the span, if it is recording events.
func (s *Span) SetName(name string) {
func (s *span) SetName(name string) {
if !s.IsRecordingEvents() {
return
}
@ -335,7 +347,7 @@ func (s *Span) SetName(name string) {
}
// SetStatus sets the status of the span, if it is recording events.
func (s *Span) SetStatus(status Status) {
func (s *span) SetStatus(status Status) {
if !s.IsRecordingEvents() {
return
}
@ -344,7 +356,7 @@ func (s *Span) SetStatus(status Status) {
s.mu.Unlock()
}
func (s *Span) interfaceArrayToLinksArray() []Link {
func (s *span) interfaceArrayToLinksArray() []Link {
linksArr := make([]Link, 0, len(s.links.queue))
for _, value := range s.links.queue {
linksArr = append(linksArr, value.(Link))
@ -352,7 +364,7 @@ func (s *Span) interfaceArrayToLinksArray() []Link {
return linksArr
}
func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent {
func (s *span) interfaceArrayToMessageEventArray() []MessageEvent {
messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue))
for _, value := range s.messageEvents.queue {
messageEventArr = append(messageEventArr, value.(MessageEvent))
@ -360,7 +372,7 @@ func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent {
return messageEventArr
}
func (s *Span) interfaceArrayToAnnotationArray() []Annotation {
func (s *span) interfaceArrayToAnnotationArray() []Annotation {
annotationArr := make([]Annotation, 0, len(s.annotations.queue))
for _, value := range s.annotations.queue {
annotationArr = append(annotationArr, value.(Annotation))
@ -368,7 +380,7 @@ func (s *Span) interfaceArrayToAnnotationArray() []Annotation {
return annotationArr
}
func (s *Span) lruAttributesToAttributeMap() map[string]interface{} {
func (s *span) lruAttributesToAttributeMap() map[string]interface{} {
attributes := make(map[string]interface{}, s.lruAttributes.len())
for _, key := range s.lruAttributes.keys() {
value, ok := s.lruAttributes.get(key)
@ -380,13 +392,13 @@ func (s *Span) lruAttributesToAttributeMap() map[string]interface{} {
return attributes
}
func (s *Span) copyToCappedAttributes(attributes []Attribute) {
func (s *span) copyToCappedAttributes(attributes []Attribute) {
for _, a := range attributes {
s.lruAttributes.add(a.key, a.value)
}
}
func (s *Span) addChild() {
func (s *span) addChild() {
if !s.IsRecordingEvents() {
return
}
@ -398,7 +410,7 @@ func (s *Span) addChild() {
// AddAttributes sets attributes in the span.
//
// Existing attributes whose keys appear in the attributes parameter are overwritten.
func (s *Span) AddAttributes(attributes ...Attribute) {
func (s *span) AddAttributes(attributes ...Attribute) {
if !s.IsRecordingEvents() {
return
}
@ -407,49 +419,27 @@ func (s *Span) AddAttributes(attributes ...Attribute) {
s.mu.Unlock()
}
// copyAttributes copies a slice of Attributes into a map.
func copyAttributes(m map[string]interface{}, attributes []Attribute) {
for _, a := range attributes {
m[a.key] = a.value
}
}
func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) {
func (s *span) printStringInternal(attributes []Attribute, str string) {
now := time.Now()
msg := fmt.Sprintf(format, a...)
var m map[string]interface{}
s.mu.Lock()
var am map[string]interface{}
if len(attributes) != 0 {
m = make(map[string]interface{}, len(attributes))
copyAttributes(m, attributes)
am = make(map[string]interface{}, len(attributes))
for _, attr := range attributes {
am[attr.key] = attr.value
}
}
s.annotations.add(Annotation{
Time: now,
Message: msg,
Attributes: m,
})
s.mu.Unlock()
}
func (s *Span) printStringInternal(attributes []Attribute, str string) {
now := time.Now()
var a map[string]interface{}
s.mu.Lock()
if len(attributes) != 0 {
a = make(map[string]interface{}, len(attributes))
copyAttributes(a, attributes)
}
s.annotations.add(Annotation{
Time: now,
Message: str,
Attributes: a,
Attributes: am,
})
s.mu.Unlock()
}
// Annotate adds an annotation with attributes.
// Attributes can be nil.
func (s *Span) Annotate(attributes []Attribute, str string) {
func (s *span) Annotate(attributes []Attribute, str string) {
if !s.IsRecordingEvents() {
return
}
@ -457,11 +447,11 @@ func (s *Span) Annotate(attributes []Attribute, str string) {
}
// Annotatef adds an annotation with attributes.
func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
if !s.IsRecordingEvents() {
return
}
s.lazyPrintfInternal(attributes, format, a...)
s.printStringInternal(attributes, fmt.Sprintf(format, a...))
}
// AddMessageSendEvent adds a message send event to the span.
@ -470,7 +460,7 @@ func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}
// unique in this span and the same between the send event and the receive
// event (this allows to identify a message between the sender and receiver).
// For example, this could be a sequence id.
func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
if !s.IsRecordingEvents() {
return
}
@ -492,7 +482,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy
// unique in this span and the same between the send event and the receive
// event (this allows to identify a message between the sender and receiver).
// For example, this could be a sequence id.
func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
if !s.IsRecordingEvents() {
return
}
@ -509,7 +499,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse
}
// AddLink adds a link to the span.
func (s *Span) AddLink(l Link) {
func (s *span) AddLink(l Link) {
if !s.IsRecordingEvents() {
return
}
@ -518,7 +508,7 @@ func (s *Span) AddLink(l Link) {
s.mu.Unlock()
}
func (s *Span) String() string {
func (s *span) String() string {
if s == nil {
return "<nil>"
}
@ -534,20 +524,9 @@ func (s *Span) String() string {
var config atomic.Value // access atomically
func init() {
gen := &defaultIDGenerator{}
// initialize traceID and spanID generators.
var rngSeed int64
for _, p := range []interface{}{
&rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc,
} {
binary.Read(crand.Reader, binary.LittleEndian, p)
}
gen.traceIDRand = rand.New(rand.NewSource(rngSeed))
gen.spanIDInc |= 1
config.Store(&Config{
DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
IDGenerator: gen,
IDGenerator: &defaultIDGenerator{},
MaxAttributesPerSpan: DefaultMaxAttributesPerSpan,
MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan,
MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan,
@ -571,6 +550,24 @@ type defaultIDGenerator struct {
traceIDAdd [2]uint64
traceIDRand *rand.Rand
initOnce sync.Once
}
// init initializes the generator on the first call to avoid consuming entropy
// unnecessarily.
func (gen *defaultIDGenerator) init() {
gen.initOnce.Do(func() {
// initialize traceID and spanID generators.
var rngSeed int64
for _, p := range []interface{}{
&rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc,
} {
binary.Read(crand.Reader, binary.LittleEndian, p)
}
gen.traceIDRand = rand.New(rand.NewSource(rngSeed))
gen.spanIDInc |= 1
})
}
// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.

265
src/runtime/vendor/go.opencensus.io/trace/trace_api.go generated vendored Normal file
View File

@ -0,0 +1,265 @@
// Copyright 2020, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
)
// DefaultTracer is the tracer used when package-level exported functions are invoked.
var DefaultTracer Tracer = &tracer{}
// Tracer can start spans and access context functions.
type Tracer interface {
// StartSpan starts a new child span of the current span in the context. If
// there is no span in the context, creates a new trace and span.
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span)
// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
//
// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
// preferred for cases where the parent is propagated via an incoming request.
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span)
// FromContext returns the Span stored in a context, or nil if there isn't one.
FromContext(ctx context.Context) *Span
// NewContext returns a new context with the given Span attached.
NewContext(parent context.Context, s *Span) context.Context
}
// StartSpan starts a new child span of the current span in the context. If
// there is no span in the context, creates a new trace and span.
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
return DefaultTracer.StartSpan(ctx, name, o...)
}
// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
//
// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
// preferred for cases where the parent is propagated via an incoming request.
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...)
}
// FromContext returns the Span stored in a context, or a Span that is not
// recording events if there isn't one.
func FromContext(ctx context.Context) *Span {
return DefaultTracer.FromContext(ctx)
}
// NewContext returns a new context with the given Span attached.
func NewContext(parent context.Context, s *Span) context.Context {
return DefaultTracer.NewContext(parent, s)
}
// SpanInterface represents a span of a trace. It has an associated SpanContext, and
// stores data accumulated while the span is active.
//
// Ideally users should interact with Spans by calling the functions in this
// package that take a Context parameter.
type SpanInterface interface {
// IsRecordingEvents returns true if events are being recorded for this span.
// Use this check to avoid computing expensive annotations when they will never
// be used.
IsRecordingEvents() bool
// End ends the span.
End()
// SpanContext returns the SpanContext of the span.
SpanContext() SpanContext
// SetName sets the name of the span, if it is recording events.
SetName(name string)
// SetStatus sets the status of the span, if it is recording events.
SetStatus(status Status)
// AddAttributes sets attributes in the span.
//
// Existing attributes whose keys appear in the attributes parameter are overwritten.
AddAttributes(attributes ...Attribute)
// Annotate adds an annotation with attributes.
// Attributes can be nil.
Annotate(attributes []Attribute, str string)
// Annotatef adds an annotation with attributes.
Annotatef(attributes []Attribute, format string, a ...interface{})
// AddMessageSendEvent adds a message send event to the span.
//
// messageID is an identifier for the message, which is recommended to be
// unique in this span and the same between the send event and the receive
// event (this allows to identify a message between the sender and receiver).
// For example, this could be a sequence id.
AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64)
// AddMessageReceiveEvent adds a message receive event to the span.
//
// messageID is an identifier for the message, which is recommended to be
// unique in this span and the same between the send event and the receive
// event (this allows to identify a message between the sender and receiver).
// For example, this could be a sequence id.
AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64)
// AddLink adds a link to the span.
AddLink(l Link)
// String prints a string representation of a span.
String() string
}
// NewSpan is a convenience function for creating a *Span out of a *span
func NewSpan(s SpanInterface) *Span {
return &Span{internal: s}
}
// Span is a struct wrapper around the SpanInt interface, which allows correctly handling
// nil spans, while also allowing the SpanInterface implementation to be swapped out.
type Span struct {
internal SpanInterface
}
// Internal returns the underlying implementation of the Span
func (s *Span) Internal() SpanInterface {
return s.internal
}
// IsRecordingEvents returns true if events are being recorded for this span.
// Use this check to avoid computing expensive annotations when they will never
// be used.
func (s *Span) IsRecordingEvents() bool {
if s == nil {
return false
}
return s.internal.IsRecordingEvents()
}
// End ends the span.
func (s *Span) End() {
if s == nil {
return
}
s.internal.End()
}
// SpanContext returns the SpanContext of the span.
func (s *Span) SpanContext() SpanContext {
if s == nil {
return SpanContext{}
}
return s.internal.SpanContext()
}
// SetName sets the name of the span, if it is recording events.
func (s *Span) SetName(name string) {
if !s.IsRecordingEvents() {
return
}
s.internal.SetName(name)
}
// SetStatus sets the status of the span, if it is recording events.
func (s *Span) SetStatus(status Status) {
if !s.IsRecordingEvents() {
return
}
s.internal.SetStatus(status)
}
// AddAttributes sets attributes in the span.
//
// Existing attributes whose keys appear in the attributes parameter are overwritten.
func (s *Span) AddAttributes(attributes ...Attribute) {
if !s.IsRecordingEvents() {
return
}
s.internal.AddAttributes(attributes...)
}
// Annotate adds an annotation with attributes.
// Attributes can be nil.
func (s *Span) Annotate(attributes []Attribute, str string) {
if !s.IsRecordingEvents() {
return
}
s.internal.Annotate(attributes, str)
}
// Annotatef adds an annotation with attributes.
func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
if !s.IsRecordingEvents() {
return
}
s.internal.Annotatef(attributes, format, a...)
}
// AddMessageSendEvent adds a message send event to the span.
//
// messageID is an identifier for the message, which is recommended to be
// unique in this span and the same between the send event and the receive
// event (this allows to identify a message between the sender and receiver).
// For example, this could be a sequence id.
func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
if !s.IsRecordingEvents() {
return
}
s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize)
}
// AddMessageReceiveEvent adds a message receive event to the span.
//
// messageID is an identifier for the message, which is recommended to be
// unique in this span and the same between the send event and the receive
// event (this allows to identify a message between the sender and receiver).
// For example, this could be a sequence id.
func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
if !s.IsRecordingEvents() {
return
}
s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize)
}
// AddLink adds a link to the span.
func (s *Span) AddLink(l Link) {
if !s.IsRecordingEvents() {
return
}
s.internal.AddLink(l)
}
// String prints a string representation of a span.
func (s *Span) String() string {
if s == nil {
return "<nil>"
}
return s.internal.String()
}

View File

@ -0,0 +1,3 @@
* text=auto eol=lf
*.{cmd,[cC][mM][dD]} text eol=crlf
*.{bat,[bB][aA][tT]} text eol=crlf

View File

@ -10,14 +10,12 @@ coverage.*
gen/
/example/basic/basic
/example/grpc/client/client
/example/grpc/server/server
/example/http/client/client
/example/http/server/server
/example/fib/fib
/example/jaeger/jaeger
/example/namedtracer/namedtracer
/example/opencensus/opencensus
/example/passthrough/passthrough
/example/prometheus/prometheus
/example/prom-collector/prom-collector
/example/zipkin/zipkin
/example/otel-collector/otel-collector

View File

@ -7,7 +7,7 @@ linters:
enable:
- misspell
- goimports
- golint
- revive
- gofmt
issues:
@ -16,12 +16,12 @@ issues:
- path: _test\.go
text: "context.Context should be the first parameter of a function"
linters:
- golint
- revive
# Yes, they are, but it's okay in a test
- path: _test\.go
text: "exported func.*returns unexported type.*which can be annoying to use"
linters:
- golint
- revive
linters-settings:
misspell:

View File

@ -0,0 +1,16 @@
{
"ignorePatterns": [
{
"pattern": "^http(s)?://localhost"
}
],
"replacementPatterns": [
{
"pattern": "^/registry",
"replacement": "https://opentelemetry.io/registry"
}
],
"retryOn429": true,
"retryCount": 5,
"fallbackRetryDelay": "30s"
}

View File

@ -0,0 +1,29 @@
# Default state for all rules
default: true
# ul-style
MD004: false
# hard-tabs
MD010: false
# line-length
MD013: false
# no-duplicate-header
MD024:
siblings_only: true
#single-title
MD025: false
# ol-prefix
MD029:
style: ordered
# no-inline-html
MD033: false
# fenced-code-language
MD040: false

View File

@ -8,6 +8,569 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
### Changed
- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237)
## [1.0.0] - 2021-09-20
This is the first stable release for the project.
This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md).
### Added
- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242)
### Fixed
- Slice-valued attributes can correctly be used as map keys. (#2223)
### Removed
- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248)
- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234)
- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233)
- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package.
Use the typed functions and methods added to the package instead. (#2235)
- The `Key.Array` method is removed.
- The `Array` function is removed.
- The `Any` function is removed.
- The `ArrayValue` function is removed.
- The `AsArray` function is removed.
## [1.0.0-RC3] - 2021-09-02
### Added
- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149)
- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163)
- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162)
- `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package.
- Added the `go.opentelemetry.io/otel/example/fib` example package.
Included is an example application that computes Fibonacci numbers. (#2203)
### Changed
- Metric instruments have been renamed to match the (feature-frozen) metric API specification:
- ValueRecorder becomes Histogram
- ValueObserver becomes Gauge
- SumObserver becomes CounterObserver
- UpDownSumObserver becomes UpDownCounterObserver
The API exported from this project is still considered experimental. (#2202)
- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091)
- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
### Deprecated
- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated.
All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package.
The functions from that package should be used instead. (#2166)
- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated.
Use the typed `*Slice` functions and types added to the package instead. (#2162)
- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated.
Use the typed functions instead. (#2181)
- The `go.opentelemetry.io/otel/oteltest` package is deprecated.
The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188)
### Removed
- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105)
### Fixed
- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138)
- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195)
- Fixed typos in resources.go. (#2201)
## [1.0.0-RC2] - 2021-07-26
### Added
- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840)
- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840)
- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095)
- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115)
- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`.
This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK.
For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118)
- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package.
This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132)
### Changed
- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027)
- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095)
### Deprecated
- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114)
- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123)
- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated.
Use the `trace.ParseTraceState` function instead. (#2122)
### Removed
- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020)
- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020)
- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function.
The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097)
- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095)
- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118)
### Fixed
- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032)
- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073)
- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092)
- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package.
This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102)
- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108)
- Use `6831` as default Jaeger agent port instead of `6832`. (#2131)
## [Experimental Metrics v0.22.0] - 2021-07-19
### Added
- Adds HTTP support for OTLP metrics exporter. (#2022)
### Removed
- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020)
## [1.0.0-RC1] / 0.21.0 - 2021-06-18
With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1`
while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules
with major version 0.
### Added
- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832)
- The following status codes are defined as transient errors:
| gRPC Status Code | Description |
| ---------------- | ----------- |
| 1 | Cancelled |
| 4 | Deadline Exceeded |
| 8 | Resource Exhausted |
| 10 | Aborted |
| 10 | Out of Range |
| 14 | Unavailable |
| 15 | Data Loss |
- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874)
- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package.
This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873)
- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886)
- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889)
- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912)
- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package.
It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937)
- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package.
This method returns the number of list-members the `TraceState` holds. (#1937)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data.
Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922)
- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967)
- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package.
These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967)
- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
- Several builtin resource detectors now correctly populate the schema URL. (#1938)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005)
- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005)
- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009)
### Changed
- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item.
`NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798)
- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810)
- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846)
- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865)
- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860)
- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855)
- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871)
- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method.
This method returns the status of a span using the new `Status` type. (#1874)
- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`.
This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873)
- Unembed `SpanContext` in `Link`. (#1877)
- Generate Semantic conventions from the specification YAML. (#1891)
- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901)
- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902)
- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903)
- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921)
- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921)
- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
- Refactored option types according to the contribution style guide. (#1882)
- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package.
This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use.
The new `ParseTraceState` function should be used to create a `TraceState`. (#1931)
- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931)
- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931)
- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931)
- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987)
- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988)
### Deprecated
- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993)
- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993)
- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993)
### Removed
- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810)
- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810)
- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`.
The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate.
The `IsRecording` method returns if the span is recording or not.
A read-only span value does not need to know if updates to it will be recorded or not.
By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873)
- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package.
The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type.
When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873)
- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package.
Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own.
The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900)
- The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009)
- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919)
- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931)
- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package.
Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967)
- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed.
These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985)
- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990)
- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005)
### Fixed
- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851)
- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856)
- BatchSpanProcessor now drops span batches that failed to be exported. (#1860)
- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898)
- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931)
- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973)
- Avoid transport security when OTLP endpoint is a Unix socket. (#2001)
### Security
## [0.20.0] - 2021-04-23
### Added
- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373)
- Adds semantic conventions for exceptions. (#1492)
- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT`
These environment variables can be used to override Jaeger agent hostname and port (#1752)
- Option `ExportTimeout` was added to batch span processor. (#1755)
- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770)
- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771)
- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771)
- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772)
- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785)
- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788)
- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788)
- `process.pid`
- `process.executable.name`
- `process.executable.path`
- `process.command_args`
- `process.owner`
- `process.runtime.name`
- `process.runtime.version`
- `process.runtime.description`
- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789)
- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811)
- `OTEL_EXPORTER_OTLP_ENDPOINT`
- `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`
- `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`
- `OTEL_EXPORTER_OTLP_HEADERS`
- `OTEL_EXPORTER_OTLP_TRACES_HEADERS`
- `OTEL_EXPORTER_OTLP_METRICS_HEADERS`
- `OTEL_EXPORTER_OTLP_COMPRESSION`
- `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION`
- `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION`
- `OTEL_EXPORTER_OTLP_TIMEOUT`
- `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`
- `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT`
- `OTEL_EXPORTER_OTLP_CERTIFICATE`
- `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
- `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE`
- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821)
- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853)
### Fixed
- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750)
- The Jaeger exporter now correctly sets tags for the Span status code and message.
This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761)
- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag.
Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768)
- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688)
- Fixed typo for default service name in Jaeger Exporter. (#1797)
- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814)
- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit.
Instead, the exporter now splits the batch into smaller sendable batches. (#1828)
### Changed
- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492)
- Jaeger exporter was updated to use thrift v0.14.1. (#1712)
- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713)
- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713)
- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span.
The Span's SpanContext can now self-identify as being remote or not.
This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731)
- Improve OTLP/gRPC exporter connection errors. (#1737)
- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field.
The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748)
- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span.
This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749)
- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD`
to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752)
- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757)
- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself.
It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771)
- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773)
- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777)
- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778)
- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796)
- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800)
- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create.
This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822)
- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824)
- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument.
The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824)
- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830)
### Removed
- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS`
These environment variables will no longer be used to override values of the Jaeger exporter (#1752)
- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root.
This is unspecified behavior that the OpenTelemetry community plans to standardize in the future.
To prevent backwards incompatible changes when it is specified, these links are removed. (#1726)
- Setting error status while recording error with Span from oteltest package. (#1729)
- The concept of a remote and local Span stored in a context is unified to just the current Span.
Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770)
- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package.
The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804)
- Remove the `WithDisabled` option from the Jaeger exporter.
To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806)
- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter.
These functions for retrieving specific environment variable values are redundant of other internal functions and
are not intended for end user use. (#1824)
- Removed the Jaeger exporter `WithSDKOptions` `Option`.
This option was used to set SDK options for the exporter creation convenience functions.
These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases.
If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825)
- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed.
The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
- The Jaeger exporter `Option` type is removed.
The type is no longer used by the exporter to configure anything.
All the previous configurations these options provided were duplicates of SDK configuration.
They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
## [0.19.0] - 2021-03-18
### Added
- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586)
- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608)
- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702)
- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701)
- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703)
### Changed
- `trace.SpanContext` is now immutable and has no exported fields. (#1573)
- `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known.
- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608)
- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608)
- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612)
- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656)
- Added non-empty string check for trace `Attribute` keys. (#1659)
- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662)
- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673)
- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673)
- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692)
- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693)
- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693)
### Removed
- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549)
- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633)
- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs.
These are now returned as a SpanProcessor interface from their respective constructors. (#1638)
- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660)
- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663)
- Removed `jaeger.WithProcess` configuration option. (#1673)
- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693)
### Fixed
- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626)
- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655)
- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678)
- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681)
- Synchronization issues in global trace delegate implementation. (#1686)
- Reduced excess memory usage by global `TracerProvider`. (#1687)
## [0.18.0] - 2021-03-03
### Added
- Added `resource.Default()` for use with meter and tracer providers. (#1507)
- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535)
- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544)
- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558)
- Compatibility testing suite in the CI system for the following systems. (#1567)
| OS | Go Version | Architecture |
| ------- | ---------- | ------------ |
| Ubuntu | 1.15 | amd64 |
| Ubuntu | 1.14 | amd64 |
| Ubuntu | 1.15 | 386 |
| Ubuntu | 1.14 | 386 |
| MacOS | 1.15 | amd64 |
| MacOS | 1.14 | amd64 |
| Windows | 1.15 | amd64 |
| Windows | 1.14 | amd64 |
| Windows | 1.15 | 386 |
| Windows | 1.14 | 386 |
### Changed
- Replaced interface `oteltest.SpanRecorder` with its existing implementation
`StandardSpanRecorder`. (#1542)
- Default span limit values to 128. (#1535)
- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535)
- Renamed the `otel/label` package to `otel/attribute`. (#1541)
- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551)
- Parallelize the CI linting and testing. (#1567)
- Stagger timestamps in exact aggregator tests. (#1569)
- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621)
- Prevent end-users from implementing some interfaces (#1575)
```
"otel/exporters/otlp/otlphttp".Option
"otel/exporters/stdout".Option
"otel/oteltest".Option
"otel/trace".TracerOption
"otel/trace".SpanOption
"otel/trace".EventOption
"otel/trace".LifeCycleOption
"otel/trace".InstrumentationOption
"otel/sdk/resource".Option
"otel/sdk/trace".ParentBasedSamplerOption
"otel/sdk/trace".ReadOnlySpan
"otel/sdk/trace".ReadWriteSpan
```
### Removed
- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545)
- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567)
- Removed the `test-386` make target.
This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567)
### Fixed
- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572)
- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577)
- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579)
- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581)
- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570)
## [0.17.0] - 2021-02-12
### Changed
- Rename project default branch from `master` to `main`. (#1505)
- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501)
- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528)
- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528)
- Move metric-related public global APIs from otel to otel/metric/global. (#1528)
## Fixed
- Fixed otlpgrpc reconnection issue.
- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513)
- The otel-collector example now uses the default OTLP receiver port of the collector.
## [0.16.0] - 2021-01-13
### Added
- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360)
- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369)
- Added documentation about the project's versioning policy. (#1388)
- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418)
- Added codeql worfklow to GitHub Actions (#1428)
- Added Gosec workflow to GitHub Actions (#1429)
- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420)
- Add an OpenCensus exporter bridge. (#1444)
### Changed
- Rename `internal/testing` to `internal/internaltest`. (#1449)
- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360)
- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360)
- Improve span duration accuracy. (#1360)
- Migrated CI/CD from CircleCI to GitHub Actions (#1382)
- Remove duplicate checkout from GitHub Actions workflow (#1407)
- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412)
- Metric `exact` aggregator includes per-point timestamps (#1412)
- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412)
- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
- Unify endpoint API that related to OTel exporter. (#1401)
- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435)
- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430)
- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434)
- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432)
- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420)
- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447)
- Metric Push and Pull Controller components are combined into a single "basic" Controller:
- `WithExporter()` and `Start()` to configure Push behavior
- `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior
- `Start()` and `Stop()` accept Context. (#1378)
- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452)
### Removed
- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360)
- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412)
- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412)
### Fixed
- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443)
## [0.15.0] - 2020-12-10
### Added
@ -23,7 +586,6 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374)
- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375)
### Fixed
- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381)
@ -35,6 +597,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254)
- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259)
- `SpanContextFromContext` returns `SpanContext` from context. (#1255)
- `TraceState` has been added to `SpanContext`. (#1340)
- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323)
- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305)
- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333)
@ -80,6 +643,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`.
`Tracer` and `Span` from the same module should be used in their place instead. (#1306)
- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350)
- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314)
### Fixed
@ -508,7 +1072,7 @@ This release implements the v0.5.0 version of the OpenTelemetry specification.
- Rename `Observer` instrument to `ValueObserver`. (#734)
- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. 727)
- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727)
### Fixed
@ -611,7 +1175,6 @@ This release implements the v0.5.0 version of the OpenTelemetry specification.
- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
## [0.4.2] - 2020-03-31
### Fixed
@ -671,7 +1234,7 @@ There is still a possibility of breaking changes.
- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
- The zipkin trace exporter. (#495)
- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
- The `StatusMessage` field was add to the trace `Span`. (#524)
- Add `StatusMessage` field to the trace `Span`. (#524)
- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
- The `Resource` type was added to the SDK. (#528)
- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
@ -764,14 +1327,12 @@ There is still a possibility of breaking changes.
- `AlwaysParentSample` sampler to the trace API. (#455)
- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
### Changed
- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
- Move correlation context propagation to correlation package. (#479)
- Do not default to putting remote span context into links. (#480)
- Propagators extrac
- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
- Renamed the `export` package to `metric` to match directory structure. (#432)
@ -919,7 +1480,6 @@ There is still a possibility of breaking changes.
This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
This was corrected. (#333)
## [0.1.2] - 2019-11-18
### Fixed
@ -979,8 +1539,17 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v0.15.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.0.0...HEAD
[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0
[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3
[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2
[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0
[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1
[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0
[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0
[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0
[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0
[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0
[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0
[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0
[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0

View File

@ -5,13 +5,13 @@
#####################################################
#
# Learn about membership in OpenTelemetry community:
# https://github.com/open-telemetry/community/blob/master/community-membership.md
# https://github.com/open-telemetry/community/blob/main/community-membership.md
#
#
# Learn about CODEOWNERS file format:
# https://help.github.com/en/articles/about-code-owners
#
* @jmacd @lizthegrey @MrAlias @Aneurysm9 @evantorrie @XSAM
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared
CODEOWNERS @MrAlias @Aneurysm9

View File

@ -9,13 +9,13 @@ See the [public meeting
notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
for a summary description of past meetings. To request edit access,
join the meeting or get in touch on
[Gitter](https://gitter.im/open-telemetry/opentelemetry-go).
[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
## Development
You can view and edit the source code by cloning this repository:
```bash
```sh
git clone https://github.com/open-telemetry/opentelemetry-go.git
```
@ -43,7 +43,7 @@ To create a new PR, fork the project in GitHub and clone the upstream
repo:
```sh
$ go get -d go.opentelemetry.io/otel
go get -d go.opentelemetry.io/otel
```
(This may print some warning about "build constraints exclude all Go
@ -53,7 +53,7 @@ This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
can alternatively use `git` directly with:
```sh
$ git clone https://github.com/open-telemetry/opentelemetry-go
git clone https://github.com/open-telemetry/opentelemetry-go
```
(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
@ -66,20 +66,20 @@ current working directory.
Enter the newly created directory and add your fork as a new remote:
```sh
$ git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
```
Check out a new branch, make modifications, run linters and tests, update
`CHANGELOG.md`, and push the branch to your fork:
```sh
$ git checkout -b <YOUR_BRANCH_NAME>
git checkout -b <YOUR_BRANCH_NAME>
# edit files
# update changelog
$ make precommit
$ git add -p
$ git commit
$ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
make precommit
git add -p
git commit
git push <YOUR_FORK> <YOUR_BRANCH_NAME>
```
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
@ -100,13 +100,20 @@ A PR is considered to be **ready to merge** when:
different companies). This is not enforced through technical means
and a PR may be **ready to merge** with a single approval if the change
and its approach have been discussed and consensus reached.
* Major feedbacks are resolved.
* Feedback has been addressed.
* Any substantive changes to your PR will require that you clear any prior
Approval reviews, this includes changes resulting from other feedback. Unless
the approver explicitly stated that their approval will persist across
changes it should be assumed that the PR needs their review again. Other
project members (e.g. approvers, maintainers) can help with this if there are
any questions or if you forget to clear reviews.
* It has been open for review for at least one working day. This gives
people reasonable time to review.
* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for
one day and may be merged with a single Maintainer's approval.
* `CHANGELOG.md` has been updated to reflect what has been
added, changed, removed, or fixed.
* `README.md` has been updated if necessary.
* Urgent fix can take exception as long as it has been actively
communicated.
@ -118,7 +125,7 @@ As with other OpenTelemetry clients, opentelemetry-go follows the
[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification).
It's especially valuable to read through the [library
guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/library-guidelines.md).
guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md).
### Focus on Capabilities, Not Structure Compliance
@ -134,8 +141,28 @@ It is preferable to have contributions follow the idioms of the
language rather than conform to specific API names or argument
patterns in the spec.
For a deeper discussion, see:
https://github.com/open-telemetry/opentelemetry-specification/issues/165
For a deeper discussion, see
[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
## Documentation
Each non-example Go Module should have its own `README.md` containing:
- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/).
- Brief description.
- Installation instructions (and requirements if applicable).
- Hyperlink to an example. Depending on the component the example can be:
- An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go).
- A sample Go application with its own `README.md`, like [here](example/zipkin).
- Additional documentation sections such us:
- Configuration,
- Contributing,
- References.
[Here](exporters/jaeger/README.md) is an example of a concise `README.md`.
Moreover, it should be possible to navigate to any `README.md` from the
root `README.md`.
## Style Guide
@ -160,16 +187,16 @@ to the reasons why.
### Configuration
When creating an instantiation function for a complex `struct` it is useful
to allow variable number of options to be applied. However, the strong type
system of Go restricts the function design options. There are a few ways to
solve this problem, but we have landed on the following design.
When creating an instantiation function for a complex `type T struct`, it is
useful to allow variable number of options to be applied. However, the strong
type system of Go restricts the function design options. There are a few ways
to solve this problem, but we have landed on the following design.
#### `config`
Configuration should be held in a `struct` named `config`, or prefixed with
specific type name this Configuration applies to if there are multiple
`config` in the package. This `struct` must contain configuration options.
`config` in the package. This type must contain configuration options.
```go
// config contains configuration options for a thing.
@ -178,14 +205,22 @@ type config struct {
}
```
In general the `config` `struct` will not need to be used externally to the
In general the `config` type will not need to be used externally to the
package and should be unexported. If, however, it is expected that the user
will likely want to build custom options for the configuration, the `config`
should be exported. Please, include in the documentation for the `config`
how the user can extend the configuration.
It is important that `config` are not shared across package boundaries.
Meaning a `config` from one package should not be directly used by another.
It is important that internal `config` are not shared across package boundaries.
Meaning a `config` from one package should not be directly used by another. The
one exception is the API packages. The configs from the base API, eg.
`go.opentelemetry.io/otel/trace.TracerConfig` and
`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
by the SDK therefor it is expected that these are exported.
When a config is exported we want to maintain forward and backward
compatibility, to achieve this no fields should be exported but should
instead be accessed by methods.
Optionally, it is common to include a `newConfig` function (with the same
naming scheme). This function wraps any defaults setting and looping over
@ -197,7 +232,7 @@ func newConfig([]Option) config {
// Set default values for config.
config := config{/* […] */}
for _, option := range options {
option.Apply(&config)
option.apply(&config)
}
// Preform any validation here.
return config
@ -218,10 +253,14 @@ To set the value of the options a `config` contains, a corresponding
```go
type Option interface {
Apply(*config)
apply(*config)
}
```
Having `apply` unexported makes sure that it will not be used externally.
Moreover, the interface becomes sealed so the user cannot easily implement
the interface on its own.
The name of the interface should be prefixed in the same way the
corresponding `config` is (if at all).
@ -244,11 +283,11 @@ func With*(…) Option { … }
```go
type defaultFalseOption bool
func (o defaultFalseOption) Apply(c *config) {
func (o defaultFalseOption) apply(c *config) {
c.Bool = bool(o)
}
// WithOption sets a T* to have an option included.
// WithOption sets a T to have an option included.
func WithOption() Option {
return defaultFalseOption(true)
}
@ -257,15 +296,15 @@ func WithOption() Option {
```go
type defaultTrueOption bool
func (o defaultTrueOption) Apply(c *config) {
func (o defaultTrueOption) apply(c *config) {
c.Bool = bool(o)
}
// WithoutOption sets a T* to have Bool option excluded.
// WithoutOption sets a T to have Bool option excluded.
func WithoutOption() Option {
return defaultTrueOption(false)
}
````
```
##### Declared Type Options
@ -274,23 +313,40 @@ type myTypeOption struct {
MyType MyType
}
func (o myTypeOption) Apply(c *config) {
func (o myTypeOption) apply(c *config) {
c.MyType = o.MyType
}
// WithMyType sets T* to have include MyType.
// WithMyType sets T to have include MyType.
func WithMyType(t MyType) Option {
return myTypeOption{t}
}
```
##### Functional Options
```go
type optionFunc func(*config)
func (fn optionFunc) apply(c *config) {
fn(c)
}
// WithMyType sets t as MyType.
func WithMyType(t MyType) Option {
return optionFunc(func(c *config) {
c.MyType = t
})
}
```
#### Instantiation
Using this configuration pattern to configure instantiation with a `New*`
Using this configuration pattern to configure instantiation with a `NewT`
function.
```go
func NewT*(options ...Option) T* {…}
func NewT(options ...Option) T {…}
```
Any required parameters can be declared before the variadic `options`.
@ -314,12 +370,12 @@ type config struct {
// DogOption apply Dog specific options.
type DogOption interface {
ApplyDog(*config)
applyDog(*config)
}
// BirdOption apply Bird specific options.
type BirdOption interface {
ApplyBird(*config)
applyBird(*config)
}
// Option apply options for all animals.
@ -329,23 +385,23 @@ type Option interface {
}
type weightOption float64
func (o weightOption) ApplyDog(c *config) { c.Weight = float64(o) }
func (o weightOption) ApplyBird(c *config) { c.Weight = float64(o) }
func (o weightOption) applyDog(c *config) { c.Weight = float64(o) }
func (o weightOption) applyBird(c *config) { c.Weight = float64(o) }
func WithWeight(w float64) Option { return weightOption(w) }
type furColorOption string
func (o furColorOption) ApplyDog(c *config) { c.Color = string(o) }
func (o furColorOption) applyDog(c *config) { c.Color = string(o) }
func WithFurColor(c string) DogOption { return furColorOption(c) }
type maxAltitudeOption float64
func (o maxAltitudeOption) ApplyBird(c *config) { c.MaxAltitude = float64(o) }
func (o maxAltitudeOption) applyBird(c *config) { c.MaxAltitude = float64(o) }
func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
func NewDog(name string, o ...DogOption) Dog {…}
func NewBird(name string, o ...BirdOption) Bird {…}
```
### Interface Type
### Interfaces
To allow other developers to better comprehend the code, it is important
to ensure it is sufficiently documented. One simple measure that contributes
@ -353,21 +409,87 @@ to this aim is self-documenting by naming method parameters. Therefore,
where appropriate, methods of every exported interface type should have
their parameters appropriately named.
#### Interface Stability
All exported stable interfaces that include the following warning in their
doumentation are allowed to be extended with additional methods.
> Warning: methods may be added to this interface in minor releases.
Otherwise, stable interfaces MUST NOT be modified.
If new functionality is needed for an interface that cannot be changed it MUST
be added by including an additional interface. That added interface can be a
simple interface for the specific functionality that you want to add or it can
be a super-set of the original interface. For example, if you wanted to a
`Close` method to the `Exporter` interface:
```go
type Exporter interface {
Export()
}
```
A new interface, `Closer`, can be added:
```go
type Closer interface {
Close()
}
```
Code that is passed the `Exporter` interface can now check to see if the passed
value also satisfies the new interface. E.g.
```go
func caller(e Exporter) {
/* ... */
if c, ok := e.(Closer); ok {
c.Close()
}
/* ... */
}
```
Alternatively, a new type that is the super-set of an `Exporter` can be created.
```go
type ClosingExporter struct {
Exporter
Close()
}
```
This new type can be used similar to the simple interface above in that a
passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
and the `Close` method called.
This super-set approach can be useful if there is explicit behavior that needs
to be coupled with the original type and passed as a unified type to a new
function, but, because of this coupling, it also limits the applicability of
the added functionality. If there exist other interfaces where this
functionality should be added, each one will need their own super-set
interfaces and will duplicate the pattern. For this reason, the simple targeted
interface that defines the specific functionality should be preferred.
## Approvers and Maintainers
Approvers:
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Josh MacDonald](https://github.com/jmacd), LightStep
- [Sam Xie](https://github.com/XSAM)
- [David Ashpole](https://github.com/dashpole), Google
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Aaron Clawson](https://github.com/MadVikingGod)
- [Robert Pająk](https://github.com/pellared), Splunk
Maintainers:
- [Anthony Mirabella](https://github.com/Aneurysm9), Centene
- [Tyler Yahn](https://github.com/MrAlias), New Relic
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
- [Tyler Yahn](https://github.com/MrAlias), Splunk
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
repo](https://github.com/open-telemetry/community/blob/master/community-membership.md).
repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).

View File

@ -21,141 +21,138 @@ ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example' | sort)) $(shell find ./example -type f -name 'go.mod' -exec dirname {} \; | sort)
ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
# Mac OS Catalina 10.5.x doesn't support 386. Hence skip 386 test
SKIP_386_TEST = false
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Darwin)
SW_VERS := $(shell sw_vers -productVersion)
ifeq ($(shell echo $(SW_VERS) | egrep '^(10.1[5-9]|1[1-9]|[2-9])'), $(SW_VERS))
SKIP_386_TEST = true
endif
endif
GOTEST_MIN = go test -timeout 30s
GOTEST = $(GOTEST_MIN) -race
GOTEST_WITH_COVERAGE = $(GOTEST) -coverprofile=coverage.out -covermode=atomic -coverpkg=./...
GO = go
TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit
.PHONY: precommit ci
precommit: dependabot-check license-check lint build examples test-default
ci: precommit check-clean-work-tree test-coverage
TOOLS_DIR := $(abspath ./.tools)
# Tools
$(TOOLS_DIR)/golangci-lint: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
TOOLS = $(CURDIR)/.tools
$(TOOLS):
@mkdir -p $@
$(TOOLS)/%: | $(TOOLS)
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint
$(GO) build -o $@ $(PACKAGE)
$(TOOLS_DIR)/misspell: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/misspell github.com/client9/misspell/cmd/misspell
SEMCONVGEN = $(TOOLS)/semconvgen
$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
$(TOOLS_DIR)/stringer: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/stringer golang.org/x/tools/cmd/stringer
CROSSLINK = $(TOOLS)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink
$(TOOLS_DIR)/gojq: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/gojq github.com/itchyny/gojq/cmd/gojq
GOLANGCI_LINT = $(TOOLS)/golangci-lint
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
precommit: dependabot-check license-check generate build lint examples test-benchmarks test
MISSPELL = $(TOOLS)/misspell
$(TOOLS)/misspell: PACKAGE= github.com/client9/misspell/cmd/misspell
.PHONY: test-with-coverage
test-with-coverage:
set -e; \
GOCOVMERGE = $(TOOLS)/gocovmerge
$(TOOLS)/gocovmerge: PACKAGE= github.com/wadey/gocovmerge
STRINGER = $(TOOLS)/stringer
$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
.PHONY: tools
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(TOOLS)/gojq $(SEMCONVGEN)
# Build
.PHONY: examples generate build
examples:
@set -e; for dir in $(EXAMPLES); do \
echo "$(GO) build $${dir}/..."; \
(cd "$${dir}" && \
$(GO) build .); \
done
generate: $(STRINGER)
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) generate $${dir}/..."; \
(cd "$${dir}" && \
PATH="$(TOOLS):$${PATH}" $(GO) generate ./...); \
done
build: generate
# Build all package code including testing code.
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) build $${dir}/..."; \
(cd "$${dir}" && \
$(GO) build ./... && \
$(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null); \
done
# Tests
TEST_TARGETS := test-default test-bench test-short test-verbose test-race
.PHONY: $(TEST_TARGETS) test
test-default: ARGS=-v -race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short
test-verbose: ARGS=-v
test-race: ARGS=-race
$(TEST_TARGETS): test
test:
@set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $${dir}/..."; \
(cd "$${dir}" && \
$(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)); \
done
COVERAGE_MODE = atomic
COVERAGE_PROFILE = coverage.out
.PHONY: test-coverage
test-coverage: | $(GOCOVMERGE)
@set -e; \
printf "" > coverage.txt; \
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
echo "go test ./... + coverage in $${dir}"; \
echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
(cd "$${dir}" && \
$(GOTEST_WITH_COVERAGE) ./... && \
go tool cover -html=coverage.out -o coverage.html); \
[ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \
$(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
$(GO) tool cover -html=coverage.out -o coverage.html); \
done; \
sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt
.PHONY: ci
ci: precommit check-clean-work-tree test-with-coverage test-386
.PHONY: check-clean-work-tree
check-clean-work-tree:
@if ! git diff --quiet; then \
echo; \
echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
echo; \
git status; \
exit 1; \
fi
.PHONY: build
build:
# TODO: Fix this on windows.
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "compiling all packages in $${dir}"; \
(cd "$${dir}" && \
go build ./... && \
go test -run xxxxxMatchNothingxxxxx ./... >/dev/null); \
done
.PHONY: test
test:
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "go test ./... + race in $${dir}"; \
(cd "$${dir}" && \
$(GOTEST) ./...); \
done
.PHONY: test-386
test-386:
if [ $(SKIP_386_TEST) = true ] ; then \
echo "skipping the test for GOARCH 386 as it is not supported on the current OS"; \
else \
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "go test ./... GOARCH 386 in $${dir}"; \
(cd "$${dir}" && \
GOARCH=386 $(GOTEST_MIN) ./...); \
done; \
fi
.PHONY: examples
examples:
@set -e; for ex in $(EXAMPLES); do \
echo "Building $${ex}"; \
(cd "$${ex}" && \
go build .); \
done
.PHONY: test-benchmarks
test-benchmarks:
@set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "test benchmarks in $${dir}"; \
(cd "$${dir}" && go test -test.benchtime=1ms -run=NONE -bench=. ./...) > /dev/null; \
done
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
.PHONY: lint
lint: $(TOOLS_DIR)/golangci-lint $(TOOLS_DIR)/misspell
lint: misspell lint-modules | $(GOLANGCI_LINT)
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "golangci-lint in $${dir}"; \
(cd "$${dir}" && \
$(TOOLS_DIR)/golangci-lint run --fix && \
$(TOOLS_DIR)/golangci-lint run); \
done
$(TOOLS_DIR)/misspell -w $(ALL_DOCS)
set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
echo "go mod tidy in $${dir}"; \
(cd "$${dir}" && \
go mod tidy); \
$(GOLANGCI_LINT) run --fix && \
$(GOLANGCI_LINT) run); \
done
generate: $(TOOLS_DIR)/stringer
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "running generators in $${dir}"; \
.PHONY: misspell
misspell: | $(MISSPELL)
$(MISSPELL) -w $(ALL_DOCS)
.PHONY: lint-modules
lint-modules: | $(CROSSLINK)
set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
echo "$(GO) mod tidy in $${dir}"; \
(cd "$${dir}" && \
PATH="$(TOOLS_DIR):$${PATH}" go generate ./...); \
$(GO) mod tidy); \
done
echo "cross-linking all go modules"
$(CROSSLINK)
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path './vendor/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*') ; do \
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
done); \
if [ -n "$${licRes}" ]; then \
@ -166,12 +163,23 @@ license-check:
.PHONY: dependabot-check
dependabot-check:
@result=$$( \
for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \
do grep -q "$$f" .github/dependabot.yml \
for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.//' ); \
do grep -q "directory: \+$$f" .github/dependabot.yml \
|| echo "$$f"; \
done; \
); \
if [ -n "$$result" ]; then \
echo "missing go.mod dependabot check:"; echo "$$result"; \
echo "new modules need to be added to the .github/dependabot.yml file"; \
exit 1; \
fi
.PHONY: check-clean-work-tree
check-clean-work-tree:
@if ! git diff --quiet; then \
echo; \
echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
echo; \
git status; \
exit 1; \
fi

View File

@ -1,129 +0,0 @@
# -*- mode: makefile; -*-
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This Makefile.proto has rules to generate go code for otlp
# exporter. It does it by copying the proto files from
# `exporters/otlp/internal/opentelemetry-proto` (which is a
# submodule that needs to be checked out) into `gen/proto`, changing
# the go_package option to a valid string, generating the go files and
# finally copying the files into the module. The files are not
# generated in place, because protoc generates a too-deep directory
# structure.
#
# Currently, all the generated code is in
# `exporters/otlp/internal/opentelemetry-proto-gen`.
#
# Prereqs: wget (for downloading the zip file with protoc binary),
# unzip (for unpacking the archive), rsync (for copying back the
# generated files).
PROTOC_VERSION := 3.14.0
TOOLS_DIR := $(abspath ./.tools)
TOOLS_MOD_DIR := ./internal/tools
PROTOBUF_VERSION := v1
OTEL_PROTO_SUBMODULE := exporters/otlp/internal/opentelemetry-proto
GEN_TEMP_DIR := gen
SUBMODULE_PROTO_FILES := $(wildcard $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/*/$(PROTOBUF_VERSION)/*.proto) $(wildcard $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/collector/*/$(PROTOBUF_VERSION)/*.proto)
ifeq ($(strip $(SUBMODULE_PROTO_FILES)),)
$(error Submodule at $(OTEL_PROTO_SUBMODULE) is not checked out, use "git submodule update --init")
endif
PROTOBUF_GEN_DIR := exporters/otlp/internal/opentelemetry-proto-gen
PROTOBUF_TEMP_DIR := $(GEN_TEMP_DIR)/pb-go
PROTO_SOURCE_DIR := $(GEN_TEMP_DIR)/proto
SOURCE_PROTO_FILES := $(subst $(OTEL_PROTO_SUBMODULE),$(PROTO_SOURCE_DIR),$(SUBMODULE_PROTO_FILES))
.DEFAULT_GOAL := protobuf
UNAME_S := $(shell uname -s)
UNAME_M := $(shell uname -m)
ifeq ($(UNAME_S),Linux)
PROTOC_OS := linux
PROTOC_ARCH := $(UNAME_M)
else ifeq ($(UNAME_S),Darwin)
PROTOC_OS := osx
PROTOC_ARCH := x86_64
endif
PROTOC_ZIP_URL := https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS)-$(PROTOC_ARCH).zip
$(TOOLS_DIR)/PROTOC_$(PROTOC_VERSION):
@rm -f "$(TOOLS_DIR)"/PROTOC_* && \
touch "$@"
# Depend on a versioned file (like PROTOC_3.14.0), so when version
# gets bumped, we will depend on a nonexistent file and thus download
# a newer version.
$(TOOLS_DIR)/protoc/bin/protoc: $(TOOLS_DIR)/PROTOC_$(PROTOC_VERSION)
echo "Fetching protoc $(PROTOC_VERSION)" && \
rm -rf $(TOOLS_DIR)/protoc && \
wget -O $(TOOLS_DIR)/protoc.zip $(PROTOC_ZIP_URL) && \
unzip $(TOOLS_DIR)/protoc.zip -d $(TOOLS_DIR)/protoc-tmp && \
rm $(TOOLS_DIR)/protoc.zip && \
touch $(TOOLS_DIR)/protoc-tmp/bin/protoc && \
mv $(TOOLS_DIR)/protoc-tmp $(TOOLS_DIR)/protoc
$(TOOLS_DIR)/protoc-gen-gogofast: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/protoc-gen-gogofast github.com/gogo/protobuf/protoc-gen-gogofast && \
go mod tidy
# Return a sed expression for replacing the go_package option in proto
# file with a one that's valid for us.
#
# Example: $(call get-sed-expr,$(PROTOBUF_GEN_DIR))
define get-sed-expr
's,go_package = "github.com/open-telemetry/opentelemetry-proto/gen/go,go_package = "go.opentelemetry.io/otel/$(1),'
endef
.PHONY: protobuf
protobuf: protobuf-source gen-protobuf copy-protobufs
.PHONY: protobuf-source
protobuf-source: $(SOURCE_PROTO_FILES)
# This copies proto files from submodule into $(PROTO_SOURCE_DIR),
# thus satisfying the $(SOURCE_PROTO_FILES) prerequisite. The copies
# have their package name replaced by go.opentelemetry.io/otel.
$(PROTO_SOURCE_DIR)/%.proto: $(OTEL_PROTO_SUBMODULE)/%.proto
@ \
mkdir -p $(@D); \
sed -e $(call get-sed-expr,$(PROTOBUF_GEN_DIR)) "$<" >"$@.tmp"; \
mv "$@.tmp" "$@"
.PHONY: gen-protobuf
gen-protobuf: $(SOURCE_PROTO_FILES) $(TOOLS_DIR)/protoc-gen-gogofast $(TOOLS_DIR)/protoc/bin/protoc
@ \
mkdir -p "$(PROTOBUF_TEMP_DIR)"; \
set -e; for f in $^; do \
if [[ "$${f}" == $(TOOLS_DIR)/* ]]; then continue; fi; \
echo "protoc $${f#"$(PROTO_SOURCE_DIR)/"}"; \
PATH="$(TOOLS_DIR):$${PATH}" $(TOOLS_DIR)/protoc/bin/protoc --proto_path="$(PROTO_SOURCE_DIR)" --gogofast_out="plugins=grpc:$(PROTOBUF_TEMP_DIR)" "$${f}"; \
done
.PHONY: copy-protobufs
copy-protobufs:
@rsync -a $(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/exporters .
.PHONY: clean
clean:
rm -rf $(GEN_TEMP_DIR)

View File

@ -1,30 +1,57 @@
# OpenTelemetry-Go
[![Circle CI](https://circleci.com/gh/open-telemetry/opentelemetry-go.svg?style=svg)](https://circleci.com/gh/open-telemetry/opentelemetry-go)
[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel)
[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
[![Gitter](https://badges.gitter.im/open-telemetry/opentelemetry-go.svg)](https://gitter.im/open-telemetry/opentelemetry-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
The Go [OpenTelemetry](https://opentelemetry.io/) implementation.
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
## Project Status
**Warning**: this project is currently in a pre-GA phase. Backwards
incompatible changes may be introduced in subsequent minor version releases as
we work to track the evolving OpenTelemetry specification and user feedback.
| Signal | Status | Project |
| ------- | ---------- | ------- |
| Traces | Stable | N/A |
| Metrics | Alpha | N/A |
| Logs | Frozen [1] | N/A |
Our progress towards a GA release candidate is tracked in [this project
board](https://github.com/orgs/open-telemetry/projects/5). This release
candidate will follow semantic versioning and will be released with a major
version greater than zero.
- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics.
No Logs Pull Requests are currently being accepted.
Progress and status specific to this repository is tracked in our local
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
and
[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
Project versioning information and stability guarantees can be found in the
[versioning documentation](./VERSIONING.md).
### Compatibility
This project is tested on the following systems.
| OS | Go Version | Architecture |
| ------- | ---------- | ------------ |
| Ubuntu | 1.16 | amd64 |
| Ubuntu | 1.15 | amd64 |
| Ubuntu | 1.16 | 386 |
| Ubuntu | 1.15 | 386 |
| MacOS | 1.16 | amd64 |
| MacOS | 1.15 | amd64 |
| Windows | 1.16 | amd64 |
| Windows | 1.15 | amd64 |
| Windows | 1.16 | 386 |
| Windows | 1.15 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
## Getting Started
You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/).
OpenTelemetry's goal is to provide a single set of APIs to capture distributed
traces and metrics from your application and send them to an observability
platform. This project allows you to do just that for applications written in
@ -37,7 +64,7 @@ To start capturing distributed traces and metric events from your application
it first needs to be instrumented. The easiest way to do this is by using an
instrumentation library for your code. Be sure to check out [the officially
supported instrumentation
libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/instrumentation).
libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation).
If you need to extend the telemetry an instrumentation library provides or want
to build your own instrumentation for your application directly you will need
@ -51,15 +78,17 @@ practical uses of this process.
Now that your application is instrumented to collect telemetry, it needs an
export pipeline to send that telemetry to an observability platform.
You can find officially supported exporters [here](./exporters/) and in the
companion [contrib
repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/exporters/metric).
Additionally, there are many vendor specific or 3rd party exporters for
OpenTelemetry. These exporters are broken down by
[trace](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/trace?tab=importedby)
and
[metric](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/metric?tab=importedby)
support.
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
| Exporter | Metrics | Traces |
| :-----------------------------------: | :-----: | :----: |
| [Jaeger](./exporters/jaeger/) | | ✓ |
| [OTLP](./exporters/otlp/) | ✓ | ✓ |
| [Prometheus](./exporters/prometheus/) | ✓ | |
| [stdout](./exporters/stdout/) | ✓ | ✓ |
| [Zipkin](./exporters/zipkin/) | | ✓ |
Additionally, OpenTelemetry community supported exporters can be found in the [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters).
## Contributing

View File

@ -1,5 +1,37 @@
# Release Process
## Semantic Convention Generation
If a new version of the OpenTelemetry Specification has been released it will be necessary to generate a new
semantic convention package from the YAML definitions in the specification repository. There is a `semconvgen` utility
installed by `make tools` that can be used to generate the a package with the name matching the specification
version number under the `semconv` package. This will ideally be done soon after the specification release is
tagged. Make sure that the specification repo contains a checkout of the the latest tagged release so that the
generated files match the released semantic conventions.
There are currently two categories of semantic conventions that must be generated, `resource` and `trace`.
```
.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/resource -t semconv/template.j2
.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/trace -t semconv/template.j2
```
Using default values for all options other than `input` will result in using the `template.j2` template to
generate `resource.go` and `trace.go` in `/path/to/otelgo/repo/semconv/<version>`.
There are several ancillary files that are not generated and should be copied into the new package from the
prior package, with updates made as appropriate to canonical import path statements and constant values.
These files include:
* doc.go
* exception.go
* http(_test)?.go
* schema.go
Uses of the previous schema version in this repository should be updated to use the newly generated version.
No tooling for this exists at present, so use find/replace in your editor of choice or craft a `grep | sed`
pipeline if you like living on the edge.
## Pre-Release
Update go.mod for submodules to depend on the new release which will happen in the next step.
@ -13,7 +45,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
2. Verify the changes.
```
git diff master
git diff main
```
This should have changed the version for all modules to be `<new tag>`.
@ -32,7 +64,6 @@ Update go.mod for submodules to depend on the new release which will happen in t
4. Push the changes to upstream and create a Pull Request on GitHub.
Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
## Tag
Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
@ -44,7 +75,7 @@ Failure to do so will leave things in a broken state.
It is critical you make sure the version you push upstream is correct.
[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
1. Run the tag.sh script using the `<commit-hash>` of the commit on the master branch for the merged Pull Request.
1. Run the tag.sh script using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
```
./tag.sh <new tag> <commit-hash>
@ -76,6 +107,13 @@ After releasing verify that examples build outside of the repository.
The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
This ensures they build with the published release, not the local copy.
## Contrib Repository
## Post-Release
Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/master/RELEASING.md) that uses this release.
### Contrib Repository
Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
### Website Documentation
Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.

View File

@ -0,0 +1,224 @@
# Versioning
This document describes the versioning policy for this repository. This policy
is designed so the following goals can be achieved.
**Users are provided a codebase of value that is stable and secure.**
## Policy
* Versioning of this project will be idiomatic of a Go project using [Go
modules](https://github.com/golang/go/wiki/Modules).
* [Semantic import
versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
will be used.
* Versions will comply with [semver
2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
* New methods may be added to exported API interfaces. All exported
interfaces that fall within this exception will include the following
paragraph in their public documentation.
> Warning: methods may be added to this interface in minor releases.
* If a module is version `v2` or higher, the major version of the module
must be included as a `/vN` at the end of the module paths used in
`go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
(e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
paths used in `go get` commands (e.g., `go get
go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a
`@v2.0.1` in that example. One way to think about it is that the module
name now includes the `/v2`, so include `/v2` whenever you are using the
module name).
* If a module is version `v0` or `v1`, do not include the major version in
either the module path or the import path.
* Modules will be used to encapsulate signals and components.
* Experimental modules still under active development will be versioned at
`v0` to imply the stability guarantee defined by
[semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
> Major version zero (0.y.z) is for initial development. Anything MAY
> change at any time. The public API SHOULD NOT be considered stable.
* Mature modules for which we guarantee a stable public API will be versioned
with a major version greater than `v0`.
* The decision to make a module stable will be made on a case-by-case
basis by the maintainers of this project.
* Experimental modules will start their versioning at `v0.0.0` and will
increment their minor version when backwards incompatible changes are
released and increment their patch version when backwards compatible
changes are released.
* All stable modules that use the same major version number will use the
same entire version number.
* Stable modules may be released with an incremented minor or patch
version even though that module has not been changed, but rather so
that it will remain at the same version as other stable modules that
did undergo change.
* When an experimental module becomes stable a new stable module version
will be released and will include this now stable module. The new
stable module version will be an increment of the minor version number
and will be applied to all existing stable modules as well as the newly
stable module being released.
* Versioning of the associated [contrib
repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of
this project will be idiomatic of a Go project using [Go
modules](https://github.com/golang/go/wiki/Modules).
* [Semantic import
versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
will be used.
* Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
* If a module is version `v2` or higher, the
major version of the module must be included as a `/vN` at the end of the
module paths used in `go.mod` files (e.g., `module
go.opentelemetry.io/contrib/instrumentation/host/v2`, `require
go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the
package import path (e.g., `import
"go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes
the paths used in `go get` commands (e.g., `go get
go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there
is both a `/v2` and a `@v2.0.1` in that example. One way to think about
it is that the module name now includes the `/v2`, so include `/v2`
whenever you are using the module name).
* If a module is version `v0` or `v1`, do not include the major version
in either the module path or the import path.
* In addition to public APIs, telemetry produced by stable instrumentation
will remain stable and backwards compatible. This is to avoid breaking
alerts and dashboard.
* Modules will be used to encapsulate instrumentation, detectors, exporters,
propagators, and any other independent sets of related components.
* Experimental modules still under active development will be versioned at
`v0` to imply the stability guarantee defined by
[semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
> Major version zero (0.y.z) is for initial development. Anything MAY
> change at any time. The public API SHOULD NOT be considered stable.
* Mature modules for which we guarantee a stable public API and telemetry will
be versioned with a major version greater than `v0`.
* Experimental modules will start their versioning at `v0.0.0` and will
increment their minor version when backwards incompatible changes are
released and increment their patch version when backwards compatible
changes are released.
* Stable contrib modules cannot depend on experimental modules from this
project.
* All stable contrib modules of the same major version with this project
will use the same entire version as this project.
* Stable modules may be released with an incremented minor or patch
version even though that module's code has not been changed. Instead
the only change that will have been included is to have updated that
modules dependency on this project's stable APIs.
* When an experimental module in contrib becomes stable a new stable
module version will be released and will include this now stable
module. The new stable module version will be an increment of the minor
version number and will be applied to all existing stable contrib
modules, this project's modules, and the newly stable module being
released.
* Contrib modules will be kept up to date with this project's releases.
* Due to the dependency contrib modules will implicitly have on this
project's modules the release of stable contrib modules to match the
released version number will be staggered after this project's release.
There is no explicit time guarantee for how long after this projects
release the contrib release will be. Effort should be made to keep them
as close in time as possible.
* No additional stable release in this project can be made until the
contrib repository has a matching stable release.
* No release can be made in the contrib repository after this project's
stable release except for a stable release of the contrib repository.
* GitHub releases will be made for all releases.
* Go modules will be made available at Go package mirrors.
## Example Versioning Lifecycle
To better understand the implementation of the above policy the following
example is provided. This project is simplified to include only the following
modules and their versions:
* `otel`: `v0.14.0`
* `otel/trace`: `v0.14.0`
* `otel/metric`: `v0.14.0`
* `otel/baggage`: `v0.14.0`
* `otel/sdk/trace`: `v0.14.0`
* `otel/sdk/metric`: `v0.14.0`
These modules have been developed to a point where the `otel/trace`,
`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they
should be considered for a stable release. The `otel/metric` and
`otel/sdk/metric` are still under active development and the `otel` module
depends on both `otel/trace` and `otel/metric`.
The `otel` package is refactored to remove its dependencies on `otel/metric` so
it can be released as stable as well. With that done the following release
candidates are made:
* `otel`: `v1.0.0-RC1`
* `otel/trace`: `v1.0.0-RC1`
* `otel/baggage`: `v1.0.0-RC1`
* `otel/sdk/trace`: `v1.0.0-RC1`
The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
A few minor issues are discovered in the `otel/trace` package. These issues are
resolved with some minor, but backwards incompatible, changes and are released
as a second release candidate:
* `otel`: `v1.0.0-RC2`
* `otel/trace`: `v1.0.0-RC2`
* `otel/baggage`: `v1.0.0-RC2`
* `otel/sdk/trace`: `v1.0.0-RC2`
Notice that all module version numbers are incremented to adhere to our
versioning policy.
After these release candidates have been evaluated to satisfaction, they are
released as version `v1.0.0`.
* `otel`: `v1.0.0`
* `otel/trace`: `v1.0.0`
* `otel/baggage`: `v1.0.0`
* `otel/sdk/trace`: `v1.0.0`
Since both the `go` utility and the Go module system support [the semantic
versioning definition of
precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release
will correctly be interpreted as the successor to the previous release
candidates.
Active development of this project continues. The `otel/metric` module now has
backwards incompatible changes to its API that need to be released and the
`otel/baggage` module has a minor bug fix that needs to be released. The
following release is made:
* `otel`: `v1.0.1`
* `otel/trace`: `v1.0.1`
* `otel/metric`: `v0.15.0`
* `otel/baggage`: `v1.0.1`
* `otel/sdk/trace`: `v1.0.1`
* `otel/sdk/metric`: `v0.15.0`
Notice that, again, all stable module versions are incremented in unison and
the `otel/sdk/metric` package, which depends on the `otel/metric` package, also
bumped its version. This bump of the `otel/sdk/metric` package makes sense
given their coupling, though it is not explicitly required by our versioning
policy.
As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
point where they should be evaluated for stability. The `otel` module is
reintegrated with the `otel/metric` package and the following release is made:
* `otel`: `v1.1.0-RC1`
* `otel/trace`: `v1.1.0-RC1`
* `otel/metric`: `v1.1.0-RC1`
* `otel/baggage`: `v1.1.0-RC1`
* `otel/sdk/trace`: `v1.1.0-RC1`
* `otel/sdk/metric`: `v1.1.0-RC1`
All the modules are evaluated and determined to a viable stable release. They
are then released as version `v1.1.0` (the minor version is incremented to
indicate the addition of new signal).
* `otel`: `v1.1.0`
* `otel/trace`: `v1.1.0`
* `otel/metric`: `v1.1.0`
* `otel/baggage`: `v1.1.0`
* `otel/sdk/trace`: `v1.1.0`
* `otel/sdk/metric`: `v1.1.0`

View File

@ -12,14 +12,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.11
package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
)
func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
return ctx, func() {}
}
// Package attribute provides key and value attributes.
package attribute // import "go.opentelemetry.io/otel/attribute"

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package label // import "go.opentelemetry.io/otel/label"
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"bytes"
@ -28,7 +28,7 @@ type (
Encoder interface {
// Encode returns the serialized encoding of the label
// set using its Iterator. This result may be cached
// by a label.Set.
// by a attribute.Set.
Encode(iterator Iterator) string
// ID returns a value that is unique for each class of

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package label // import "go.opentelemetry.io/otel/label"
package attribute // import "go.opentelemetry.io/otel/attribute"
// Iterator allows iterating over the set of labels in order,
// sorted by key.
@ -55,7 +55,7 @@ func (i *Iterator) Attribute() KeyValue {
return i.Label()
}
// IndexedLabel returns current index and label. Must be called only
// IndexedLabel returns current index and attribute. Must be called only
// after Next returns true.
func (i *Iterator) IndexedLabel() (int, KeyValue) {
return i.idx, i.Label()

View File

@ -0,0 +1,134 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute"
// Key represents the key part in key-value pairs. It's a string. The
// allowed character set in the key depends on the use of the key.
type Key string
// Bool creates a KeyValue instance with a BOOL Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Bool(name, value).
func (k Key) Bool(v bool) KeyValue {
return KeyValue{
Key: k,
Value: BoolValue(v),
}
}
// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- BoolSlice(name, value).
func (k Key) BoolSlice(v []bool) KeyValue {
return KeyValue{
Key: k,
Value: BoolSliceValue(v),
}
}
// Int creates a KeyValue instance with an INT64 Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Int(name, value).
func (k Key) Int(v int) KeyValue {
return KeyValue{
Key: k,
Value: IntValue(v),
}
}
// IntSlice creates a KeyValue instance with an INT64SLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- IntSlice(name, value).
func (k Key) IntSlice(v []int) KeyValue {
return KeyValue{
Key: k,
Value: IntSliceValue(v),
}
}
// Int64 creates a KeyValue instance with an INT64 Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Int64(name, value).
func (k Key) Int64(v int64) KeyValue {
return KeyValue{
Key: k,
Value: Int64Value(v),
}
}
// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Int64Slice(name, value).
func (k Key) Int64Slice(v []int64) KeyValue {
return KeyValue{
Key: k,
Value: Int64SliceValue(v),
}
}
// Float64 creates a KeyValue instance with a FLOAT64 Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Float64(name, value).
func (k Key) Float64(v float64) KeyValue {
return KeyValue{
Key: k,
Value: Float64Value(v),
}
}
// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Float64(name, value).
func (k Key) Float64Slice(v []float64) KeyValue {
return KeyValue{
Key: k,
Value: Float64SliceValue(v),
}
}
// String creates a KeyValue instance with a STRING Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- String(name, value).
func (k Key) String(v string) KeyValue {
return KeyValue{
Key: k,
Value: StringValue(v),
}
}
// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- StringSlice(name, value).
func (k Key) StringSlice(v []string) KeyValue {
return KeyValue{
Key: k,
Value: StringSliceValue(v),
}
}
// Defined returns true for non-empty keys.
func (k Key) Defined() bool {
return len(k) != 0
}

View File

@ -0,0 +1,86 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"fmt"
)
// KeyValue holds a key and value pair.
type KeyValue struct {
Key Key
Value Value
}
// Valid returns if kv is a valid OpenTelemetry attribute.
func (kv KeyValue) Valid() bool {
return kv.Key != "" && kv.Value.Type() != INVALID
}
// Bool creates a KeyValue with a BOOL Value type.
func Bool(k string, v bool) KeyValue {
return Key(k).Bool(v)
}
// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
func BoolSlice(k string, v []bool) KeyValue {
return Key(k).BoolSlice(v)
}
// Int creates a KeyValue with an INT64 Value type.
func Int(k string, v int) KeyValue {
return Key(k).Int(v)
}
// IntSlice creates a KeyValue with an INT64SLICE Value type.
func IntSlice(k string, v []int) KeyValue {
return Key(k).IntSlice(v)
}
// Int64 creates a KeyValue with an INT64 Value type.
func Int64(k string, v int64) KeyValue {
return Key(k).Int64(v)
}
// Int64Slice creates a KeyValue with an INT64SLICE Value type.
func Int64Slice(k string, v []int64) KeyValue {
return Key(k).Int64Slice(v)
}
// Float64 creates a KeyValue with a FLOAT64 Value type.
func Float64(k string, v float64) KeyValue {
return Key(k).Float64(v)
}
// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
func Float64Slice(k string, v []float64) KeyValue {
return Key(k).Float64Slice(v)
}
// String creates a KeyValue with a STRING Value type.
func String(k, v string) KeyValue {
return Key(k).String(v)
}
// StringSlice creates a KeyValue with a STRINGSLICE Value type.
func StringSlice(k string, v []string) KeyValue {
return Key(k).StringSlice(v)
}
// Stringer creates a new key-value pair with a passed name and a string
// value generated by the passed Stringer interface.
func Stringer(k string, v fmt.Stringer) KeyValue {
return Key(k).String(v.String())
}

View File

@ -12,13 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package label // import "go.opentelemetry.io/otel/label"
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"encoding/json"
"reflect"
"sort"
"sync"
)
type (
@ -35,10 +34,6 @@ type (
// 3. Correlation map (TODO)
Set struct {
equivalent Distinct
lock sync.Mutex
encoders [maxConcurrentEncoders]EncoderID
encoded [maxConcurrentEncoders]string
}
// Distinct wraps a variable-size array of `KeyValue`,
@ -76,8 +71,6 @@ var (
}
)
const maxConcurrentEncoders = 3
// EmptySet returns a reference to a Set with no elements.
//
// This is a convenience provided for optimized calling utility.
@ -182,51 +175,13 @@ func (l *Set) Equals(o *Set) bool {
}
// Encoded returns the encoded form of this set, according to
// `encoder`. The result will be cached in this `*Set`.
// `encoder`.
func (l *Set) Encoded(encoder Encoder) string {
if l == nil || encoder == nil {
return ""
}
id := encoder.ID()
if !id.Valid() {
// Invalid IDs are not cached.
return encoder.Encode(l.Iter())
}
var lookup *string
l.lock.Lock()
for idx := 0; idx < maxConcurrentEncoders; idx++ {
if l.encoders[idx] == id {
lookup = &l.encoded[idx]
break
}
}
l.lock.Unlock()
if lookup != nil {
return *lookup
}
r := encoder.Encode(l.Iter())
l.lock.Lock()
defer l.lock.Unlock()
for idx := 0; idx < maxConcurrentEncoders; idx++ {
if l.encoders[idx] == id {
return l.encoded[idx]
}
if !l.encoders[idx].Valid() {
l.encoders[idx] = id
l.encoded[idx] = r
return r
}
}
// TODO: This is a performance cliff. Find a way for this to
// generate a warning.
return r
}
func empty() Set {
@ -246,7 +201,7 @@ func NewSet(kvs ...KeyValue) Set {
return empty()
}
s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil)
return s //nolint
return s
}
// NewSetWithSortable returns a new `Set`. See the documentation for
@ -259,7 +214,7 @@ func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
return empty()
}
s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
return s //nolint
return s
}
// NewSetWithFiltered returns a new `Set`. See the documentation for
@ -295,7 +250,7 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// - allocating a `Set` for storing the return value of this
// constructor.
//
// The result maintains a cache of encoded labels, by label.EncoderID.
// The result maintains a cache of encoded labels, by attribute.EncoderID.
// This value should not be copied after its first use.
//
// The second `[]KeyValue` return value is a list of labels that were

View File

@ -1,6 +1,6 @@
// Code generated by "stringer -type=Type"; DO NOT EDIT.
package label
package attribute
import "strconv"
@ -10,19 +10,18 @@ func _() {
var x [1]struct{}
_ = x[INVALID-0]
_ = x[BOOL-1]
_ = x[INT32-2]
_ = x[INT64-3]
_ = x[UINT32-4]
_ = x[UINT64-5]
_ = x[FLOAT32-6]
_ = x[FLOAT64-7]
_ = x[STRING-8]
_ = x[ARRAY-9]
_ = x[INT64-2]
_ = x[FLOAT64-3]
_ = x[STRING-4]
_ = x[BOOLSLICE-5]
_ = x[INT64SLICE-6]
_ = x[FLOAT64SLICE-7]
_ = x[STRINGSLICE-8]
}
const _Type_name = "INVALIDBOOLINT32INT64UINT32UINT64FLOAT32FLOAT64STRINGARRAY"
const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
var _Type_index = [...]uint8{0, 7, 11, 16, 21, 27, 33, 40, 47, 53, 58}
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
func (i Type) String() string {
if i < 0 || i >= Type(len(_Type_index)-1) {

View File

@ -0,0 +1,271 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"encoding/json"
"fmt"
"strconv"
"go.opentelemetry.io/otel/internal"
)
//go:generate stringer -type=Type
// Type describes the type of the data Value holds.
type Type int
// Value represents the value part in key-value pairs.
type Value struct {
vtype Type
numeric uint64
stringly string
slice interface{}
}
const (
// INVALID is used for a Value with no value set.
INVALID Type = iota
// BOOL is a boolean Type Value.
BOOL
// INT64 is a 64-bit signed integral Type Value.
INT64
// FLOAT64 is a 64-bit floating point Type Value.
FLOAT64
// STRING is a string Type Value.
STRING
// BOOLSLICE is a slice of booleans Type Value.
BOOLSLICE
// INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
INT64SLICE
// FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
FLOAT64SLICE
// STRINGSLICE is a slice of strings Type Value.
STRINGSLICE
)
// BoolValue creates a BOOL Value.
func BoolValue(v bool) Value {
return Value{
vtype: BOOL,
numeric: internal.BoolToRaw(v),
}
}
// BoolSliceValue creates a BOOLSLICE Value.
func BoolSliceValue(v []bool) Value {
cp := make([]bool, len(v))
copy(cp, v)
return Value{
vtype: BOOLSLICE,
slice: &cp,
}
}
// IntValue creates an INT64 Value.
func IntValue(v int) Value {
return Int64Value(int64(v))
}
// IntSliceValue creates an INTSLICE Value.
func IntSliceValue(v []int) Value {
cp := make([]int64, 0, len(v))
for _, i := range v {
cp = append(cp, int64(i))
}
return Value{
vtype: INT64SLICE,
slice: &cp,
}
}
// Int64Value creates an INT64 Value.
func Int64Value(v int64) Value {
return Value{
vtype: INT64,
numeric: internal.Int64ToRaw(v),
}
}
// Int64SliceValue creates an INT64SLICE Value.
func Int64SliceValue(v []int64) Value {
cp := make([]int64, len(v))
copy(cp, v)
return Value{
vtype: INT64SLICE,
slice: &cp,
}
}
// Float64Value creates a FLOAT64 Value.
func Float64Value(v float64) Value {
return Value{
vtype: FLOAT64,
numeric: internal.Float64ToRaw(v),
}
}
// Float64SliceValue creates a FLOAT64SLICE Value.
func Float64SliceValue(v []float64) Value {
cp := make([]float64, len(v))
copy(cp, v)
return Value{
vtype: FLOAT64SLICE,
slice: &cp,
}
}
// StringValue creates a STRING Value.
func StringValue(v string) Value {
return Value{
vtype: STRING,
stringly: v,
}
}
// StringSliceValue creates a STRINGSLICE Value.
func StringSliceValue(v []string) Value {
cp := make([]string, len(v))
copy(cp, v)
return Value{
vtype: STRINGSLICE,
slice: &cp,
}
}
// Type returns a type of the Value.
func (v Value) Type() Type {
return v.vtype
}
// AsBool returns the bool value. Make sure that the Value's type is
// BOOL.
func (v Value) AsBool() bool {
return internal.RawToBool(v.numeric)
}
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
// BOOLSLICE.
func (v Value) AsBoolSlice() []bool {
if s, ok := v.slice.(*[]bool); ok {
return *s
}
return nil
}
// AsInt64 returns the int64 value. Make sure that the Value's type is
// INT64.
func (v Value) AsInt64() int64 {
return internal.RawToInt64(v.numeric)
}
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
// INT64SLICE.
func (v Value) AsInt64Slice() []int64 {
if s, ok := v.slice.(*[]int64); ok {
return *s
}
return nil
}
// AsFloat64 returns the float64 value. Make sure that the Value's
// type is FLOAT64.
func (v Value) AsFloat64() float64 {
return internal.RawToFloat64(v.numeric)
}
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
// INT64SLICE.
func (v Value) AsFloat64Slice() []float64 {
if s, ok := v.slice.(*[]float64); ok {
return *s
}
return nil
}
// AsString returns the string value. Make sure that the Value's type
// is STRING.
func (v Value) AsString() string {
return v.stringly
}
// AsStringSlice returns the []string value. Make sure that the Value's type is
// INT64SLICE.
func (v Value) AsStringSlice() []string {
if s, ok := v.slice.(*[]string); ok {
return *s
}
return nil
}
type unknownValueType struct{}
// AsInterface returns Value's data as interface{}.
func (v Value) AsInterface() interface{} {
switch v.Type() {
case BOOL:
return v.AsBool()
case BOOLSLICE:
return v.AsBoolSlice()
case INT64:
return v.AsInt64()
case INT64SLICE:
return v.AsInt64Slice()
case FLOAT64:
return v.AsFloat64()
case FLOAT64SLICE:
return v.AsFloat64Slice()
case STRING:
return v.stringly
case STRINGSLICE:
return v.AsStringSlice()
}
return unknownValueType{}
}
// Emit returns a string representation of Value's data.
func (v Value) Emit() string {
switch v.Type() {
case BOOLSLICE:
return fmt.Sprint(*(v.slice.(*[]bool)))
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64SLICE:
return fmt.Sprint(*(v.slice.(*[]int64)))
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64SLICE:
return fmt.Sprint(*(v.slice.(*[]float64)))
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRINGSLICE:
return fmt.Sprint(*(v.slice.(*[]string)))
case STRING:
return v.stringly
default:
return "unknown"
}
}
// MarshalJSON returns the JSON encoding of the Value.
func (v Value) MarshalJSON() ([]byte, error) {
var jsonVal struct {
Type string
Value interface{}
}
jsonVal.Type = v.Type().String()
jsonVal.Value = v.AsInterface()
return json.Marshal(jsonVal)
}

View File

@ -0,0 +1,509 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage
import (
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"go.opentelemetry.io/otel/internal/baggage"
)
const (
maxMembers = 180
maxBytesPerMembers = 4096
maxBytesPerBaggageString = 8192
listDelimiter = ","
keyValueDelimiter = "="
propertyDelimiter = ";"
keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
)
var (
keyRe = regexp.MustCompile(`^` + keyDef + `$`)
valueRe = regexp.MustCompile(`^` + valueDef + `$`)
propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
)
var (
errInvalidKey = errors.New("invalid key")
errInvalidValue = errors.New("invalid value")
errInvalidProperty = errors.New("invalid baggage list-member property")
errInvalidMember = errors.New("invalid baggage list-member")
errMemberNumber = errors.New("too many list-members in baggage-string")
errMemberBytes = errors.New("list-member too large")
errBaggageBytes = errors.New("baggage-string too large")
)
// Property is an additional metadata entry for a baggage list-member.
type Property struct {
key, value string
// hasValue indicates if a zero-value value means the property does not
// have a value or if it was the zero-value.
hasValue bool
}
func NewKeyProperty(key string) (Property, error) {
p := Property{}
if !keyRe.MatchString(key) {
return p, fmt.Errorf("%w: %q", errInvalidKey, key)
}
p.key = key
return p, nil
}
func NewKeyValueProperty(key, value string) (Property, error) {
p := Property{}
if !keyRe.MatchString(key) {
return p, fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return p, fmt.Errorf("%w: %q", errInvalidValue, value)
}
p.key = key
p.value = value
p.hasValue = true
return p, nil
}
// parseProperty attempts to decode a Property from the passed string. It
// returns an error if the input is invalid according to the W3C Baggage
// specification.
func parseProperty(property string) (Property, error) {
p := Property{}
if property == "" {
return p, nil
}
match := propertyRe.FindStringSubmatch(property)
if len(match) != 4 {
return p, fmt.Errorf("%w: %q", errInvalidProperty, property)
}
if match[1] != "" {
p.key = match[1]
} else {
p.key = match[2]
p.value = match[3]
p.hasValue = true
}
return p, nil
}
// validate ensures p conforms to the W3C Baggage specification, returning an
// error otherwise.
func (p Property) validate() error {
errFunc := func(err error) error {
return fmt.Errorf("invalid property: %w", err)
}
if !keyRe.MatchString(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
if p.hasValue && !valueRe.MatchString(p.value) {
return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
}
if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value"))
}
return nil
}
// Key returns the Property key.
func (p Property) Key() string {
return p.key
}
// Value returns the Property value. Additionally a boolean value is returned
// indicating if the returned value is the empty if the Property has a value
// that is empty or if the value is not set.
func (p Property) Value() (string, bool) {
return p.value, p.hasValue
}
// String encodes Property into a string compliant with the W3C Baggage
// specification.
func (p Property) String() string {
if p.hasValue {
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
}
return p.key
}
type properties []Property
func fromInternalProperties(iProps []baggage.Property) properties {
if len(iProps) == 0 {
return nil
}
props := make(properties, len(iProps))
for i, p := range iProps {
props[i] = Property{
key: p.Key,
value: p.Value,
hasValue: p.HasValue,
}
}
return props
}
func (p properties) asInternal() []baggage.Property {
if len(p) == 0 {
return nil
}
iProps := make([]baggage.Property, len(p))
for i, prop := range p {
iProps[i] = baggage.Property{
Key: prop.key,
Value: prop.value,
HasValue: prop.hasValue,
}
}
return iProps
}
func (p properties) Copy() properties {
if len(p) == 0 {
return nil
}
props := make(properties, len(p))
copy(props, p)
return props
}
// validate ensures each Property in p conforms to the W3C Baggage
// specification, returning an error otherwise.
func (p properties) validate() error {
for _, prop := range p {
if err := prop.validate(); err != nil {
return err
}
}
return nil
}
// String encodes properties into a string compliant with the W3C Baggage
// specification.
func (p properties) String() string {
props := make([]string, len(p))
for i, prop := range p {
props[i] = prop.String()
}
return strings.Join(props, propertyDelimiter)
}
// Member is a list-member of a baggage-string as defined by the W3C Baggage
// specification.
type Member struct {
key, value string
properties properties
}
// NewMember returns a new Member from the passed arguments. An error is
// returned if the created Member would be invalid according to the W3C
// Baggage specification.
func NewMember(key, value string, props ...Property) (Member, error) {
m := Member{key: key, value: value, properties: properties(props).Copy()}
if err := m.validate(); err != nil {
return Member{}, err
}
return m, nil
}
// parseMember attempts to decode a Member from the passed string. It returns
// an error if the input is invalid according to the W3C Baggage
// specification.
func parseMember(member string) (Member, error) {
if n := len(member); n > maxBytesPerMembers {
return Member{}, fmt.Errorf("%w: %d", errMemberBytes, n)
}
var (
key, value string
props properties
)
parts := strings.SplitN(member, propertyDelimiter, 2)
switch len(parts) {
case 2:
// Parse the member properties.
for _, pStr := range strings.Split(parts[1], propertyDelimiter) {
p, err := parseProperty(pStr)
if err != nil {
return Member{}, err
}
props = append(props, p)
}
fallthrough
case 1:
// Parse the member key/value pair.
// Take into account a value can contain equal signs (=).
kv := strings.SplitN(parts[0], keyValueDelimiter, 2)
if len(kv) != 2 {
return Member{}, fmt.Errorf("%w: %q", errInvalidMember, member)
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
key, value = strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
if !keyRe.MatchString(key) {
return Member{}, fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return Member{}, fmt.Errorf("%w: %q", errInvalidValue, value)
}
default:
// This should never happen unless a developer has changed the string
// splitting somehow. Panic instead of failing silently and allowing
// the bug to slip past the CI checks.
panic("failed to parse baggage member")
}
return Member{key: key, value: value, properties: props}, nil
}
// validate ensures m conforms to the W3C Baggage specification, returning an
// error otherwise.
func (m Member) validate() error {
if !keyRe.MatchString(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
if !valueRe.MatchString(m.value) {
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
}
return m.properties.validate()
}
// Key returns the Member key.
func (m Member) Key() string { return m.key }
// Value returns the Member value.
func (m Member) Value() string { return m.value }
// Properties returns a copy of the Member properties.
func (m Member) Properties() []Property { return m.properties.Copy() }
// String encodes Member into a string compliant with the W3C Baggage
// specification.
func (m Member) String() string {
// A key is just an ASCII string, but a value is URL encoded UTF-8.
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
if len(m.properties) > 0 {
s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
}
return s
}
// Baggage is a list of baggage members representing the baggage-string as
// defined by the W3C Baggage specification.
type Baggage struct { //nolint:golint
list baggage.List
}
// New returns a new valid Baggage. It returns an error if the passed members
// are invalid according to the W3C Baggage specification or if it results in
// a Baggage exceeding limits set in that specification.
func New(members ...Member) (Baggage, error) {
if len(members) == 0 {
return Baggage{}, nil
}
b := make(baggage.List)
for _, m := range members {
if err := m.validate(); err != nil {
return Baggage{}, err
}
// OpenTelemetry resolves duplicates by last-one-wins.
b[m.key] = baggage.Item{
Value: m.value,
Properties: m.properties.asInternal(),
}
}
// Check member numbers after deduplicating.
if len(b) > maxMembers {
return Baggage{}, errMemberNumber
}
bag := Baggage{b}
if n := len(bag.String()); n > maxBytesPerBaggageString {
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
}
return bag, nil
}
// Parse attempts to decode a baggage-string from the passed string. It
// returns an error if the input is invalid according to the W3C Baggage
// specification.
//
// If there are duplicate list-members contained in baggage, the last one
// defined (reading left-to-right) will be the only one kept. This diverges
// from the W3C Baggage specification which allows duplicate list-members, but
// conforms to the OpenTelemetry Baggage specification.
func Parse(bStr string) (Baggage, error) {
if bStr == "" {
return Baggage{}, nil
}
if n := len(bStr); n > maxBytesPerBaggageString {
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
}
b := make(baggage.List)
for _, memberStr := range strings.Split(bStr, listDelimiter) {
m, err := parseMember(memberStr)
if err != nil {
return Baggage{}, err
}
// OpenTelemetry resolves duplicates by last-one-wins.
b[m.key] = baggage.Item{
Value: m.value,
Properties: m.properties.asInternal(),
}
}
// OpenTelemetry does not allow for duplicate list-members, but the W3C
// specification does. Now that we have deduplicated, ensure the baggage
// does not exceed list-member limits.
if len(b) > maxMembers {
return Baggage{}, errMemberNumber
}
return Baggage{b}, nil
}
// Member returns the baggage list-member identified by key.
//
// If there is no list-member matching the passed key the returned Member will
// be a zero-value Member.
func (b Baggage) Member(key string) Member {
v, ok := b.list[key]
if !ok {
// We do not need to worry about distiguising between the situation
// where a zero-valued Member is included in the Baggage because a
// zero-valued Member is invalid according to the W3C Baggage
// specification (it has an empty key).
return Member{}
}
return Member{
key: key,
value: v.Value,
properties: fromInternalProperties(v.Properties),
}
}
// Members returns all the baggage list-members.
// The order of the returned list-members does not have significance.
func (b Baggage) Members() []Member {
if len(b.list) == 0 {
return nil
}
members := make([]Member, 0, len(b.list))
for k, v := range b.list {
members = append(members, Member{
key: k,
value: v.Value,
properties: fromInternalProperties(v.Properties),
})
}
return members
}
// SetMember returns a copy the Baggage with the member included. If the
// baggage contains a Member with the same key the existing Member is
// replaced.
//
// If member is invalid according to the W3C Baggage specification, an error
// is returned with the original Baggage.
func (b Baggage) SetMember(member Member) (Baggage, error) {
if err := member.validate(); err != nil {
return b, fmt.Errorf("%w: %s", errInvalidMember, err)
}
n := len(b.list)
if _, ok := b.list[member.key]; !ok {
n++
}
list := make(baggage.List, n)
for k, v := range b.list {
// Do not copy if we are just going to overwrite.
if k == member.key {
continue
}
list[k] = v
}
list[member.key] = baggage.Item{
Value: member.value,
Properties: member.properties.asInternal(),
}
return Baggage{list: list}, nil
}
// DeleteMember returns a copy of the Baggage with the list-member identified
// by key removed.
func (b Baggage) DeleteMember(key string) Baggage {
n := len(b.list)
if _, ok := b.list[key]; ok {
n--
}
list := make(baggage.List, n)
for k, v := range b.list {
if k == key {
continue
}
list[k] = v
}
return Baggage{list: list}
}
// Len returns the number of list-members in the Baggage.
func (b Baggage) Len() int {
return len(b.list)
}
// String encodes Baggage into a string compliant with the W3C Baggage
// specification. The returned string will be invalid if the Baggage contains
// any invalid list-members.
func (b Baggage) String() string {
members := make([]string, 0, len(b.list))
for k, v := range b.list {
members = append(members, Member{
key: k,
value: v.Value,
properties: fromInternalProperties(v.Properties),
}.String())
}
return strings.Join(members, listDelimiter)
}

View File

@ -0,0 +1,39 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/baggage"
import (
"context"
"go.opentelemetry.io/otel/internal/baggage"
)
// ContextWithBaggage returns a copy of parent with baggage.
func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
// Delegate so any hooks for the OpenTracing bridge are handled.
return baggage.ContextWithList(parent, b.list)
}
// ContextWithoutBaggage returns a copy of parent with no baggage.
func ContextWithoutBaggage(parent context.Context) context.Context {
// Delegate so any hooks for the OpenTracing bridge are handled.
return baggage.ContextWithList(parent, nil)
}
// FromContext returns the baggage contained in ctx.
func FromContext(ctx context.Context) Baggage {
// Delegate so any hooks for the OpenTracing bridge are handled.
return Baggage{list: baggage.ListFromContext(ctx)}
}

View File

@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package unit provides units.
//
// This package is currently in a pre-GA phase. Backwards incompatible changes
// may be introduced in subsequent minor version releases as we work to track
// the evolving OpenTelemetry specification and user feedback.
package unit // import "go.opentelemetry.io/otel/unit"
/*
Package baggage provides functionality for storing and retrieving
baggage items in Go context. For propagating the baggage, see the
go.opentelemetry.io/otel/propagation package.
*/
package baggage // import "go.opentelemetry.io/otel/baggage"

View File

@ -15,11 +15,7 @@
/*
Package codes defines the canonical error codes used by OpenTelemetry.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track
the evolving OpenTelemetry specification and user feedback.
It conforms to [the OpenTelemetry
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#statuscanonicalcode).
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode).
*/
package codes // import "go.opentelemetry.io/otel/codes"

View File

@ -16,16 +16,14 @@
Package otel provides global access to the OpenTelemetry API. The subpackages of
the otel package provide an implementation of the OpenTelemetry API.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
The provided API is used to instrument code and measure data about that code's
performance and operation. The measured data, by default, is not processed or
transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
exporters are used to process and transport this data.
To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/.
To read more about tracing, see go.opentelemetry.io/otel/trace.
To read more about metrics, see go.opentelemetry.io/otel/metric.

View File

@ -16,7 +16,23 @@ package otel // import "go.opentelemetry.io/otel"
// ErrorHandler handles irremediable events.
type ErrorHandler interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Handle handles any error deemed irremediable by an OpenTelemetry
// component.
Handle(error)
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
}
// ErrorHandlerFunc is a convenience adapter to allow the use of a function
// as an ErrorHandler.
type ErrorHandlerFunc func(error)
var _ ErrorHandler = ErrorHandlerFunc(nil)
// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
func (f ErrorHandlerFunc) Handle(err error) {
f(err)
}

View File

@ -0,0 +1,50 @@
# OpenTelemetry-Go Jaeger Exporter
[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/jaeger.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger)
[OpenTelemetry span exporter for Jaeger](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md) implementation.
## Installation
```
go get -u go.opentelemetry.io/otel/exporters/jaeger
```
## Example
See [../../example/jaeger](../../example/jaeger).
## Configuration
The exporter can be used to send spans to:
- Jaeger agent using `jaeger.thrift` over compact thrift protocol via
[`WithAgentEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentEndpoint) option.
- Jaeger collector using `jaeger.thrift` over HTTP via
[`WithCollectorEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithCollectorEndpoint) option.
### Environment Variables
The following environment variables can be used
(instead of options objects) to override the default configuration.
| Environment variable | Option | Default value |
| --------------------------------- | --------------------------------------------------------------------------------------------- | ----------------------------------- |
| `OTEL_EXPORTER_JAEGER_AGENT_HOST` | [`WithAgentHost`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentHost) | `localhost` |
| `OTEL_EXPORTER_JAEGER_AGENT_PORT` | [`WithAgentPort`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentPort) | `6831` |
| `OTEL_EXPORTER_JAEGER_ENDPOINT` | [`WithEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithEndpoint) | `http://localhost:14268/api/traces` |
| `OTEL_EXPORTER_JAEGER_USER` | [`WithUsername`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithUsername) | |
| `OTEL_EXPORTER_JAEGER_PASSWORD` | [`WithPassword`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithPassword) | |
Configuration using options have precedence over the environment variables.
## Contributing
This exporter uses a vendored copy of the Apache Thrift library (v0.14.1) at a custom import path.
When re-generating Thrift code in the future, please adapt import paths as necessary.
## References
- [Jaeger](https://www.jaegertracing.io/)
- [OpenTelemetry to Jaeger Transformation](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md)
- [OpenTelemetry Environment Variable Specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md)

View File

@ -0,0 +1,203 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
import (
"context"
"fmt"
"io"
"log"
"net"
"strings"
"time"
"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
genAgent "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent"
gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
)
// udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent
const udpPacketMaxLength = 65000
// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface.
type agentClientUDP struct {
genAgent.Agent
io.Closer
connUDP udpConn
client *genAgent.AgentClient
maxPacketSize int // max size of datagram in bytes
thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
thriftProtocol thrift.TProtocol
}
type udpConn interface {
Write([]byte) (int, error)
SetWriteBuffer(int) error
Close() error
}
type agentClientUDPParams struct {
Host string
Port string
MaxPacketSize int
Logger *log.Logger
AttemptReconnecting bool
AttemptReconnectInterval time.Duration
}
// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) {
hostPort := net.JoinHostPort(params.Host, params.Port)
// validate hostport
if _, _, err := net.SplitHostPort(hostPort); err != nil {
return nil, err
}
if params.MaxPacketSize <= 0 {
params.MaxPacketSize = udpPacketMaxLength
}
if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 {
params.AttemptReconnectInterval = time.Second * 30
}
thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
protocolFactory := thrift.NewTCompactProtocolFactoryConf(&thrift.TConfiguration{})
thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
client := genAgent.NewAgentClientFactory(thriftBuffer, protocolFactory)
var connUDP udpConn
var err error
if params.AttemptReconnecting {
// host is hostname, setup resolver loop in case host record changes during operation
connUDP, err = newReconnectingUDPConn(hostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger)
if err != nil {
return nil, err
}
} else {
destAddr, err := net.ResolveUDPAddr("udp", hostPort)
if err != nil {
return nil, err
}
connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr)
if err != nil {
return nil, err
}
}
if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil {
return nil, err
}
return &agentClientUDP{
connUDP: connUDP,
client: client,
maxPacketSize: params.MaxPacketSize,
thriftBuffer: thriftBuffer,
thriftProtocol: thriftProtocol,
}, nil
}
// EmitBatch buffers batch to fit into UDP packets and sends the data to the agent.
func (a *agentClientUDP) EmitBatch(ctx context.Context, batch *gen.Batch) error {
var errs []error
processSize, err := a.calcSizeOfSerializedThrift(ctx, batch.Process)
if err != nil {
// drop the batch if serialization of process fails.
return err
}
totalSize := processSize
var spans []*gen.Span
for _, span := range batch.Spans {
spanSize, err := a.calcSizeOfSerializedThrift(ctx, span)
if err != nil {
errs = append(errs, fmt.Errorf("thrift serialization failed: %v", span))
continue
}
if spanSize+processSize >= a.maxPacketSize {
// drop the span that exceeds the limit.
errs = append(errs, fmt.Errorf("span too large to send: %v", span))
continue
}
if totalSize+spanSize >= a.maxPacketSize {
if err := a.flush(ctx, &gen.Batch{
Process: batch.Process,
Spans: spans,
}); err != nil {
errs = append(errs, err)
}
spans = spans[:0]
totalSize = processSize
}
totalSize += spanSize
spans = append(spans, span)
}
if len(spans) > 0 {
if err := a.flush(ctx, &gen.Batch{
Process: batch.Process,
Spans: spans,
}); err != nil {
errs = append(errs, err)
}
}
if len(errs) == 1 {
return errs[0]
} else if len(errs) > 1 {
joined := a.makeJoinedErrorString(errs)
return fmt.Errorf("multiple errors during transform: %s", joined)
}
return nil
}
// makeJoinedErrorString join all the errors to one error message.
func (a *agentClientUDP) makeJoinedErrorString(errs []error) string {
var errMsgs []string
for _, err := range errs {
errMsgs = append(errMsgs, err.Error())
}
return strings.Join(errMsgs, ", ")
}
// flush will send the batch of spans to the agent.
func (a *agentClientUDP) flush(ctx context.Context, batch *gen.Batch) error {
a.thriftBuffer.Reset()
if err := a.client.EmitBatch(ctx, batch); err != nil {
return err
}
if a.thriftBuffer.Len() > a.maxPacketSize {
return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d",
a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
}
_, err := a.connUDP.Write(a.thriftBuffer.Bytes())
return err
}
// calcSizeOfSerializedThrift calculate the serialized thrift packet size.
func (a *agentClientUDP) calcSizeOfSerializedThrift(ctx context.Context, thriftStruct thrift.TStruct) (int, error) {
a.thriftBuffer.Reset()
err := thriftStruct.Write(ctx, a.thriftProtocol)
return a.thriftBuffer.Len(), err
}
// Close implements Close() of io.Closer and closes the underlying UDP connection.
func (a *agentClientUDP) Close() error {
return a.connUDP.Close()
}

View File

@ -13,8 +13,4 @@
// limitations under the License.
// Package jaeger contains an OpenTelemetry tracing exporter for Jaeger.
//
// This package is currently in a pre-GA phase. Backwards incompatible changes
// may be introduced in subsequent minor version releases as we work to track
// the evolving OpenTelemetry specification and user feedback.
package jaeger // import "go.opentelemetry.io/otel/exporters/trace/jaeger"
package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"

View File

@ -0,0 +1,44 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
import (
"os"
)
// Environment variable names
const (
// Hostname for the Jaeger agent, part of address where exporter sends spans
// i.e. "localhost"
envAgentHost = "OTEL_EXPORTER_JAEGER_AGENT_HOST"
// Port for the Jaeger agent, part of address where exporter sends spans
// i.e. 6831
envAgentPort = "OTEL_EXPORTER_JAEGER_AGENT_PORT"
// The HTTP endpoint for sending spans directly to a collector,
// i.e. http://jaeger-collector:14268/api/traces.
envEndpoint = "OTEL_EXPORTER_JAEGER_ENDPOINT"
// Username to send as part of "Basic" authentication to the collector endpoint.
envUser = "OTEL_EXPORTER_JAEGER_USER"
// Password to send as part of "Basic" authentication to the collector endpoint.
envPassword = "OTEL_EXPORTER_JAEGER_PASSWORD"
)
// envOr returns an env variable's value if it is exists or the default if not
func envOr(key, defaultValue string) string {
if v, ok := os.LookupEnv(key); ok && v != "" {
return v
}
return defaultValue
}

View File

@ -0,0 +1,75 @@
module go.opentelemetry.io/otel/exporters/jaeger
go 1.15
require (
github.com/google/go-cmp v0.5.6
github.com/stretchr/testify v1.7.0
go.opentelemetry.io/otel v1.0.0
go.opentelemetry.io/otel/sdk v1.0.0
go.opentelemetry.io/otel/trace v1.0.0
)
replace go.opentelemetry.io/otel/bridge/opencensus => ../../bridge/opencensus
replace go.opentelemetry.io/otel/bridge/opentracing => ../../bridge/opentracing
replace go.opentelemetry.io/otel/example/jaeger => ../../example/jaeger
replace go.opentelemetry.io/otel/example/namedtracer => ../../example/namedtracer
replace go.opentelemetry.io/otel/example/opencensus => ../../example/opencensus
replace go.opentelemetry.io/otel/example/otel-collector => ../../example/otel-collector
replace go.opentelemetry.io/otel/example/prom-collector => ../../example/prom-collector
replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus
replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin
replace go.opentelemetry.io/otel/exporters/prometheus => ../prometheus
replace go.opentelemetry.io/otel/exporters/otlp => ../otlp
replace go.opentelemetry.io/otel/exporters/jaeger => ./
replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools
replace go.opentelemetry.io/otel/metric => ../../metric
replace go.opentelemetry.io/otel/sdk/export/metric => ../../sdk/export/metric
replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric
replace go.opentelemetry.io/otel/trace => ../../trace
replace go.opentelemetry.io/otel/example/passthrough => ../../example/passthrough
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../otlp/otlptrace
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../otlp/otlptrace/otlptracegrpc
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel => ../..
replace go.opentelemetry.io/otel/exporters/zipkin => ../zipkin
replace go.opentelemetry.io/otel/sdk => ../../sdk
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../otlp/otlpmetric/otlpmetricgrpc
replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ../stdout/stdoutmetric
replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../stdout/stdouttrace
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../otlp/otlpmetric/otlpmetrichttp
replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test
replace go.opentelemetry.io/otel/example/fib => ../../example/fib

View File

@ -0,0 +1,18 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -0,0 +1,6 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package agent
var GoUnusedProtection__ int;

View File

@ -0,0 +1,27 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package agent
import (
"bytes"
"context"
"fmt"
"time"
"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
var _ = jaeger.GoUnusedProtection__
var _ = zipkincore.GoUnusedProtection__
func init() {
}

View File

@ -0,0 +1,412 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package agent
import (
"bytes"
"context"
"fmt"
"time"
"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
var _ = jaeger.GoUnusedProtection__
var _ = zipkincore.GoUnusedProtection__
type Agent interface {
// Parameters:
// - Spans
EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
// Parameters:
// - Batch
EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
}
type AgentClient struct {
c thrift.TClient
meta thrift.ResponseMeta
}
func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
return &AgentClient{
c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
}
}
func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
return &AgentClient{
c: thrift.NewTStandardClient(iprot, oprot),
}
}
func NewAgentClient(c thrift.TClient) *AgentClient {
return &AgentClient{
c: c,
}
}
func (p *AgentClient) Client_() thrift.TClient {
return p.c
}
func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
return p.meta
}
func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
p.meta = meta
}
// Parameters:
// - Spans
func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
var _args0 AgentEmitZipkinBatchArgs
_args0.Spans = spans
p.SetLastResponseMeta_(thrift.ResponseMeta{})
if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
return err
}
return nil
}
// Parameters:
// - Batch
func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
var _args1 AgentEmitBatchArgs
_args1.Batch = batch
p.SetLastResponseMeta_(thrift.ResponseMeta{})
if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
return err
}
return nil
}
type AgentProcessor struct {
processorMap map[string]thrift.TProcessorFunction
handler Agent
}
func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
p.processorMap[key] = processor
}
func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
processor, ok = p.processorMap[key]
return processor, ok
}
func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
return p.processorMap
}
func NewAgentProcessor(handler Agent) *AgentProcessor {
self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
return self2
}
func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
if err2 != nil {
return false, thrift.WrapTException(err2)
}
if processor, ok := p.GetProcessorFunction(name); ok {
return processor.Process(ctx, seqId, iprot, oprot)
}
iprot.Skip(ctx, thrift.STRUCT)
iprot.ReadMessageEnd(ctx)
x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
x3.Write(ctx, oprot)
oprot.WriteMessageEnd(ctx)
oprot.Flush(ctx)
return false, x3
}
type agentProcessorEmitZipkinBatch struct {
handler Agent
}
func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := AgentEmitZipkinBatchArgs{}
var err2 error
if err2 = args.Read(ctx, iprot); err2 != nil {
iprot.ReadMessageEnd(ctx)
return false, thrift.WrapTException(err2)
}
iprot.ReadMessageEnd(ctx)
tickerCancel := func() {}
_ = tickerCancel
if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
tickerCancel()
return true, thrift.WrapTException(err2)
}
tickerCancel()
return true, nil
}
type agentProcessorEmitBatch struct {
handler Agent
}
func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := AgentEmitBatchArgs{}
var err2 error
if err2 = args.Read(ctx, iprot); err2 != nil {
iprot.ReadMessageEnd(ctx)
return false, thrift.WrapTException(err2)
}
iprot.ReadMessageEnd(ctx)
tickerCancel := func() {}
_ = tickerCancel
if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
tickerCancel()
return true, thrift.WrapTException(err2)
}
tickerCancel()
return true, nil
}
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - Spans
type AgentEmitZipkinBatchArgs struct {
Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
}
func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
return &AgentEmitZipkinBatchArgs{}
}
func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
return p.Spans
}
func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if fieldTypeId == thrift.LIST {
if err := p.ReadField1(ctx, iprot); err != nil {
return err
}
} else {
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
default:
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(ctx); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin(ctx)
if err != nil {
return thrift.PrependError("error reading list begin: ", err)
}
tSlice := make([]*zipkincore.Span, 0, size)
p.Spans = tSlice
for i := 0; i < size; i++ {
_elem4 := &zipkincore.Span{}
if err := _elem4.Read(ctx, iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
}
p.Spans = append(p.Spans, _elem4)
}
if err := iprot.ReadListEnd(ctx); err != nil {
return thrift.PrependError("error reading list end: ", err)
}
return nil
}
func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
if err := p.writeField1(ctx, oprot); err != nil {
return err
}
}
if err := oprot.WriteFieldStop(ctx); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(ctx); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
}
if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.Spans {
if err := v.Write(ctx, oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
}
}
if err := oprot.WriteListEnd(ctx); err != nil {
return thrift.PrependError("error writing list end: ", err)
}
if err := oprot.WriteFieldEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
}
return err
}
func (p *AgentEmitZipkinBatchArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
}
// Attributes:
// - Batch
type AgentEmitBatchArgs struct {
Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
}
func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
return &AgentEmitBatchArgs{}
}
var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
if !p.IsSetBatch() {
return AgentEmitBatchArgs_Batch_DEFAULT
}
return p.Batch
}
func (p *AgentEmitBatchArgs) IsSetBatch() bool {
return p.Batch != nil
}
func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if fieldTypeId == thrift.STRUCT {
if err := p.ReadField1(ctx, iprot); err != nil {
return err
}
} else {
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
default:
if err := iprot.Skip(ctx, fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(ctx); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
p.Batch = &jaeger.Batch{}
if err := p.Batch.Read(ctx, iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
}
return nil
}
func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
if err := p.writeField1(ctx, oprot); err != nil {
return err
}
}
if err := oprot.WriteFieldStop(ctx); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(ctx); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
}
if err := p.Batch.Write(ctx, oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
}
if err := oprot.WriteFieldEnd(ctx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
}
return err
}
func (p *AgentEmitBatchArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
}

View File

@ -0,0 +1,6 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package jaeger
var GoUnusedProtection__ int;

View File

@ -1,5 +1,4 @@
// Autogenerated by Thrift Compiler (0.11.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package jaeger
@ -7,16 +6,16 @@ import (
"bytes"
"context"
"fmt"
"reflect"
"time"
"github.com/apache/thrift/lib/go/thrift"
"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = reflect.DeepEqual
var _ = time.Now
var _ = bytes.Equal
func init() {

View File

@ -0,0 +1,6 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package zipkincore
var GoUnusedProtection__ int;

View File

@ -0,0 +1,39 @@
// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
package zipkincore
import (
"bytes"
"context"
"fmt"
"time"
"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
const CLIENT_SEND = "cs"
const CLIENT_RECV = "cr"
const SERVER_SEND = "ss"
const SERVER_RECV = "sr"
const MESSAGE_SEND = "ms"
const MESSAGE_RECV = "mr"
const WIRE_SEND = "ws"
const WIRE_RECV = "wr"
const CLIENT_SEND_FRAGMENT = "csf"
const CLIENT_RECV_FRAGMENT = "crf"
const SERVER_SEND_FRAGMENT = "ssf"
const SERVER_RECV_FRAGMENT = "srf"
const LOCAL_COMPONENT = "lc"
const CLIENT_ADDR = "ca"
const SERVER_ADDR = "sa"
const MESSAGE_ADDR = "ma"
func init() {
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,306 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------
SOFTWARE DISTRIBUTED WITH THRIFT:
The Apache Thrift software includes a number of subcomponents with
separate copyright notices and license terms. Your use of the source
code for the these subcomponents is subject to the terms and
conditions of the following licenses.
--------------------------------------------------
Portions of the following files are licensed under the MIT License:
lib/erl/src/Makefile.am
Please see doc/otp-base-license.txt for the full terms of this license.
--------------------------------------------------
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
#
# Copying and distribution of this file, with or without
# modification, are permitted in any medium without royalty provided
# the copyright notice and this notice are preserved.
--------------------------------------------------
For the lib/nodejs/lib/thrift/json_parse.js:
/*
json_parse.js
2015-05-02
Public Domain.
NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
*/
(By Douglas Crockford <douglas@crockford.com>)
--------------------------------------------------
For lib/cpp/src/thrift/windows/SocketPair.cpp
/* socketpair.c
* Copyright 2007 by Nathan C. Myers <ncm@cantrip.org>; some rights reserved.
* This code is Free Software. It may be copied freely, in original or
* modified form, subject only to the restrictions that (1) the author is
* relieved from all responsibilities for any use for any purpose, and (2)
* this copyright notice must be retained, unchanged, in its entirety. If
* for any reason the author might be held responsible for any consequences
* of copying or use, license is withheld.
*/
--------------------------------------------------
For lib/py/compat/win32/stdint.h
// ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
--------------------------------------------------
Codegen template in t_html_generator.h
* Bootstrap v2.0.3
*
* Copyright 2012 Twitter, Inc
* Licensed under the Apache License v2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Designed and built with all the love in the world @twitter by @mdo and @fat.
---------------------------------------------------
For t_cl_generator.cc
* Copyright (c) 2008- Patrick Collison <patrick@collison.ie>
* Copyright (c) 2006- Facebook
---------------------------------------------------

View File

@ -19,6 +19,10 @@
package thrift
import (
"context"
)
const (
UNKNOWN_APPLICATION_EXCEPTION = 0
UNKNOWN_METHOD = 1
@ -51,8 +55,8 @@ var defaultApplicationExceptionMessage = map[int32]string{
type TApplicationException interface {
TException
TypeId() int32
Read(iprot TProtocol) error
Write(oprot TProtocol) error
Read(ctx context.Context, iprot TProtocol) error
Write(ctx context.Context, oprot TProtocol) error
}
type tApplicationException struct {
@ -60,6 +64,12 @@ type tApplicationException struct {
type_ int32
}
var _ TApplicationException = (*tApplicationException)(nil)
func (tApplicationException) TExceptionType() TExceptionType {
return TExceptionTypeApplication
}
func (e tApplicationException) Error() string {
if e.message != "" {
return e.message
@ -75,9 +85,9 @@ func (p *tApplicationException) TypeId() int32 {
return p.type_
}
func (p *tApplicationException) Read(iprot TProtocol) error {
func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
// TODO: this should really be generated by the compiler
_, err := iprot.ReadStructBegin()
_, err := iprot.ReadStructBegin(ctx)
if err != nil {
return err
}
@ -86,7 +96,7 @@ func (p *tApplicationException) Read(iprot TProtocol) error {
type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
for {
_, ttype, id, err := iprot.ReadFieldBegin()
_, ttype, id, err := iprot.ReadFieldBegin(ctx)
if err != nil {
return err
}
@ -96,34 +106,34 @@ func (p *tApplicationException) Read(iprot TProtocol) error {
switch id {
case 1:
if ttype == STRING {
if message, err = iprot.ReadString(); err != nil {
if message, err = iprot.ReadString(ctx); err != nil {
return err
}
} else {
if err = SkipDefaultDepth(iprot, ttype); err != nil {
if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
return err
}
}
case 2:
if ttype == I32 {
if type_, err = iprot.ReadI32(); err != nil {
if type_, err = iprot.ReadI32(ctx); err != nil {
return err
}
} else {
if err = SkipDefaultDepth(iprot, ttype); err != nil {
if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
return err
}
}
default:
if err = SkipDefaultDepth(iprot, ttype); err != nil {
if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
return err
}
}
if err = iprot.ReadFieldEnd(); err != nil {
if err = iprot.ReadFieldEnd(ctx); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
if err := iprot.ReadStructEnd(ctx); err != nil {
return err
}
@ -133,38 +143,38 @@ func (p *tApplicationException) Read(iprot TProtocol) error {
return nil
}
func (p *tApplicationException) Write(oprot TProtocol) (err error) {
err = oprot.WriteStructBegin("TApplicationException")
func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
err = oprot.WriteStructBegin(ctx, "TApplicationException")
if len(p.Error()) > 0 {
err = oprot.WriteFieldBegin("message", STRING, 1)
err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
if err != nil {
return
}
err = oprot.WriteString(p.Error())
err = oprot.WriteString(ctx, p.Error())
if err != nil {
return
}
err = oprot.WriteFieldEnd()
err = oprot.WriteFieldEnd(ctx)
if err != nil {
return
}
}
err = oprot.WriteFieldBegin("type", I32, 2)
err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
if err != nil {
return
}
err = oprot.WriteI32(p.type_)
err = oprot.WriteI32(ctx, p.type_)
if err != nil {
return
}
err = oprot.WriteFieldEnd()
err = oprot.WriteFieldEnd(ctx)
if err != nil {
return
}
err = oprot.WriteFieldStop()
err = oprot.WriteFieldStop(ctx)
if err != nil {
return
}
err = oprot.WriteStructEnd()
err = oprot.WriteStructEnd(ctx)
return
}

View File

@ -0,0 +1,555 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
)
type TBinaryProtocol struct {
trans TRichTransport
origTransport TTransport
cfg *TConfiguration
buffer [64]byte
}
type TBinaryProtocolFactory struct {
cfg *TConfiguration
}
// Deprecated: Use NewTBinaryProtocolConf instead.
func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
return NewTBinaryProtocolConf(t, &TConfiguration{
noPropagation: true,
})
}
// Deprecated: Use NewTBinaryProtocolConf instead.
func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
return NewTBinaryProtocolConf(t, &TConfiguration{
TBinaryStrictRead: &strictRead,
TBinaryStrictWrite: &strictWrite,
noPropagation: true,
})
}
func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
PropagateTConfiguration(t, conf)
p := &TBinaryProtocol{
origTransport: t,
cfg: conf,
}
if et, ok := t.(TRichTransport); ok {
p.trans = et
} else {
p.trans = NewTRichTransport(t)
}
return p
}
// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
return NewTBinaryProtocolFactoryConf(&TConfiguration{
noPropagation: true,
})
}
// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
return NewTBinaryProtocolFactoryConf(&TConfiguration{
TBinaryStrictRead: &strictRead,
TBinaryStrictWrite: &strictWrite,
noPropagation: true,
})
}
func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
return &TBinaryProtocolFactory{
cfg: conf,
}
}
func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
return NewTBinaryProtocolConf(t, p.cfg)
}
func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
p.cfg = conf
}
/**
* Writing Methods
*/
func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
if p.cfg.GetTBinaryStrictWrite() {
version := uint32(VERSION_1) | uint32(typeId)
e := p.WriteI32(ctx, int32(version))
if e != nil {
return e
}
e = p.WriteString(ctx, name)
if e != nil {
return e
}
e = p.WriteI32(ctx, seqId)
return e
} else {
e := p.WriteString(ctx, name)
if e != nil {
return e
}
e = p.WriteByte(ctx, int8(typeId))
if e != nil {
return e
}
e = p.WriteI32(ctx, seqId)
return e
}
return nil
}
func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
return nil
}
func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
e := p.WriteByte(ctx, int8(typeId))
if e != nil {
return e
}
e = p.WriteI16(ctx, id)
return e
}
func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
e := p.WriteByte(ctx, STOP)
return e
}
func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
e := p.WriteByte(ctx, int8(keyType))
if e != nil {
return e
}
e = p.WriteByte(ctx, int8(valueType))
if e != nil {
return e
}
e = p.WriteI32(ctx, int32(size))
return e
}
func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
e := p.WriteByte(ctx, int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(ctx, int32(size))
return e
}
func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
e := p.WriteByte(ctx, int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(ctx, int32(size))
return e
}
func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
if value {
return p.WriteByte(ctx, 1)
}
return p.WriteByte(ctx, 0)
}
func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
e := p.trans.WriteByte(byte(value))
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
v := p.buffer[0:2]
binary.BigEndian.PutUint16(v, uint16(value))
_, e := p.trans.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
v := p.buffer[0:4]
binary.BigEndian.PutUint32(v, uint32(value))
_, e := p.trans.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
v := p.buffer[0:8]
binary.BigEndian.PutUint64(v, uint64(value))
_, err := p.trans.Write(v)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
return p.WriteI64(ctx, int64(math.Float64bits(value)))
}
func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
e := p.WriteI32(ctx, int32(len(value)))
if e != nil {
return e
}
_, err := p.trans.WriteString(value)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
e := p.WriteI32(ctx, int32(len(value)))
if e != nil {
return e
}
_, err := p.trans.Write(value)
return NewTProtocolException(err)
}
/**
* Reading methods
*/
func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
size, e := p.ReadI32(ctx)
if e != nil {
return "", typeId, 0, NewTProtocolException(e)
}
if size < 0 {
typeId = TMessageType(size & 0x0ff)
version := int64(int64(size) & VERSION_MASK)
if version != VERSION_1 {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
}
name, e = p.ReadString(ctx)
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
seqId, e = p.ReadI32(ctx)
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
return name, typeId, seqId, nil
}
if p.cfg.GetTBinaryStrictRead() {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
}
name, e2 := p.readStringBody(size)
if e2 != nil {
return name, typeId, seqId, e2
}
b, e3 := p.ReadByte(ctx)
if e3 != nil {
return name, typeId, seqId, e3
}
typeId = TMessageType(b)
seqId, e4 := p.ReadI32(ctx)
if e4 != nil {
return name, typeId, seqId, e4
}
return name, typeId, seqId, nil
}
func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
return
}
func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
t, err := p.ReadByte(ctx)
typeId = TType(t)
if err != nil {
return name, typeId, seqId, err
}
if t != STOP {
seqId, err = p.ReadI16(ctx)
}
return name, typeId, seqId, err
}
func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
return nil
}
var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
k, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
kType = TType(k)
v, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
vType = TType(v)
size32, e := p.ReadI32(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return kType, vType, size, nil
}
func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
b, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return
}
func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
b, e := p.ReadByte(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32(ctx)
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return elemType, size, nil
}
func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
return nil
}
func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
b, e := p.ReadByte(ctx)
v := true
if b != 1 {
v = false
}
return v, e
}
func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
v, err := p.trans.ReadByte()
return int8(v), err
}
func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
buf := p.buffer[0:2]
err = p.readAll(ctx, buf)
value = int16(binary.BigEndian.Uint16(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
buf := p.buffer[0:4]
err = p.readAll(ctx, buf)
value = int32(binary.BigEndian.Uint32(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
buf := p.buffer[0:8]
err = p.readAll(ctx, buf)
value = int64(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
buf := p.buffer[0:8]
err = p.readAll(ctx, buf)
value = math.Float64frombits(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
size, e := p.ReadI32(ctx)
if e != nil {
return "", e
}
err = checkSizeForProtocol(size, p.cfg)
if err != nil {
return
}
if size < 0 {
err = invalidDataLength
return
}
if size == 0 {
return "", nil
}
if size < int32(len(p.buffer)) {
// Avoid allocation on small reads
buf := p.buffer[:size]
read, e := io.ReadFull(p.trans, buf)
return string(buf[:read]), NewTProtocolException(e)
}
return p.readStringBody(size)
}
func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
size, e := p.ReadI32(ctx)
if e != nil {
return nil, e
}
if err := checkSizeForProtocol(size, p.cfg); err != nil {
return nil, err
}
buf, err := safeReadBytes(size, p.trans)
return buf, NewTProtocolException(err)
}
func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
return NewTProtocolException(p.trans.Flush(ctx))
}
func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
return SkipDefaultDepth(ctx, p, fieldType)
}
func (p *TBinaryProtocol) Transport() TTransport {
return p.origTransport
}
func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
var read int
_, deadlineSet := ctx.Deadline()
for {
read, err = io.ReadFull(p.trans, buf)
if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
// This is I/O timeout without anything read,
// and we still have time left, keep retrying.
continue
}
// For anything else, don't retry
break
}
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
buf, err := safeReadBytes(size, p.trans)
return string(buf), NewTProtocolException(err)
}
func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
PropagateTConfiguration(p.trans, conf)
PropagateTConfiguration(p.origTransport, conf)
p.cfg = conf
}
var (
_ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
_ TConfigurationSetter = (*TBinaryProtocol)(nil)
)
// This function is shared between TBinaryProtocol and TCompactProtocol.
//
// It tries to read size bytes from trans, in a way that prevents large
// allocations when size is insanely large (mostly caused by malformed message).
func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
if size < 0 {
return nil, nil
}
buf := new(bytes.Buffer)
_, err := io.CopyN(buf, trans, int64(size))
return buf.Bytes(), err
}

View File

@ -90,3 +90,10 @@ func (p *TBufferedTransport) Flush(ctx context.Context) error {
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
return p.tp.RemainingBytes()
}
// SetTConfiguration implements TConfigurationSetter for propagation.
func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) {
PropagateTConfiguration(p.tp, conf)
}
var _ TConfigurationSetter = (*TBufferedTransport)(nil)

View File

@ -5,8 +5,15 @@ import (
"fmt"
)
// ResponseMeta represents the metadata attached to the response.
type ResponseMeta struct {
// The headers in the response, if any.
// If the underlying transport/protocol is not THeader, this will always be nil.
Headers THeaderMap
}
type TClient interface {
Call(ctx context.Context, method string, args, result TStruct) error
Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
}
type TStandardClient struct {
@ -34,20 +41,20 @@ func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32
}
}
if err := oprot.WriteMessageBegin(method, CALL, seqId); err != nil {
if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
return err
}
if err := args.Write(oprot); err != nil {
if err := args.Write(ctx, oprot); err != nil {
return err
}
if err := oprot.WriteMessageEnd(); err != nil {
if err := oprot.WriteMessageEnd(ctx); err != nil {
return err
}
return oprot.Flush(ctx)
}
func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, result TStruct) error {
rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin()
func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
if err != nil {
return err
}
@ -58,11 +65,11 @@ func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, resu
return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
} else if rTypeId == EXCEPTION {
var exception tApplicationException
if err := exception.Read(iprot); err != nil {
if err := exception.Read(ctx, iprot); err != nil {
return err
}
if err := iprot.ReadMessageEnd(); err != nil {
if err := iprot.ReadMessageEnd(ctx); err != nil {
return err
}
@ -71,25 +78,32 @@ func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, resu
return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
}
if err := result.Read(iprot); err != nil {
if err := result.Read(ctx, iprot); err != nil {
return err
}
return iprot.ReadMessageEnd()
return iprot.ReadMessageEnd(ctx)
}
func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error {
func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
p.seqId++
seqId := p.seqId
if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
return err
return ResponseMeta{}, err
}
// method is oneway
if result == nil {
return nil
return ResponseMeta{}, nil
}
return p.Recv(p.iprot, seqId, method, result)
err := p.Recv(ctx, p.iprot, seqId, method, result)
var headers THeaderMap
if hp, ok := p.iprot.(*THeaderProtocol); ok {
headers = hp.transport.readHeaders
}
return ResponseMeta{
Headers: headers,
}, err
}

View File

@ -22,6 +22,7 @@ package thrift
import (
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
@ -74,20 +75,37 @@ func init() {
}
}
type TCompactProtocolFactory struct{}
type TCompactProtocolFactory struct {
cfg *TConfiguration
}
// Deprecated: Use NewTCompactProtocolFactoryConf instead.
func NewTCompactProtocolFactory() *TCompactProtocolFactory {
return &TCompactProtocolFactory{}
return NewTCompactProtocolFactoryConf(&TConfiguration{
noPropagation: true,
})
}
func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
return &TCompactProtocolFactory{
cfg: conf,
}
}
func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return NewTCompactProtocol(trans)
return NewTCompactProtocolConf(trans, p.cfg)
}
func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
p.cfg = conf
}
type TCompactProtocol struct {
trans TRichTransport
origTransport TTransport
cfg *TConfiguration
// Used to keep track of the last field for the current and previous structs,
// so we can do the delta stuff.
lastField []int
@ -106,9 +124,19 @@ type TCompactProtocol struct {
buffer [64]byte
}
// Create a TCompactProtocol given a TTransport
// Deprecated: Use NewTCompactProtocolConf instead.
func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
return NewTCompactProtocolConf(trans, &TConfiguration{
noPropagation: true,
})
}
func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
PropagateTConfiguration(trans, conf)
p := &TCompactProtocol{
origTransport: trans,
cfg: conf,
}
if et, ok := trans.(TRichTransport); ok {
p.trans = et
} else {
@ -116,7 +144,6 @@ func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
}
return p
}
//
@ -125,7 +152,7 @@ func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
// Write a message header to the wire. Compact Protocol messages contain the
// protocol version so we can migrate forwards in the future if need be.
func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
if err != nil {
return NewTProtocolException(err)
@ -138,17 +165,17 @@ func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, s
if err != nil {
return NewTProtocolException(err)
}
e := p.WriteString(name)
e := p.WriteString(ctx, name)
return e
}
func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
// Write a struct begin. This doesn't actually put anything on the wire. We
// use it as an opportunity to put special placeholder markers on the field
// stack so we can get the field id deltas correct.
func (p *TCompactProtocol) WriteStructBegin(name string) error {
func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
p.lastField = append(p.lastField, p.lastFieldId)
p.lastFieldId = 0
return nil
@ -157,26 +184,29 @@ func (p *TCompactProtocol) WriteStructBegin(name string) error {
// Write a struct end. This doesn't actually put anything on the wire. We use
// this as an opportunity to pop the last field from the current struct off
// of the field stack.
func (p *TCompactProtocol) WriteStructEnd() error {
func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
if len(p.lastField) <= 0 {
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
}
p.lastFieldId = p.lastField[len(p.lastField)-1]
p.lastField = p.lastField[:len(p.lastField)-1]
return nil
}
func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
if typeId == BOOL {
// we want to possibly include the value, so we'll wait.
p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
return nil
}
_, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
_, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
return NewTProtocolException(err)
}
// The workhorse of writeFieldBegin. It has the option of doing a
// 'type override' of the type header. This is used specifically in the
// boolean field case.
func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
// short lastField = lastField_.pop();
// if there's a type override, use that.
@ -201,7 +231,7 @@ func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id
if err != nil {
return 0, err
}
err = p.WriteI16(id)
err = p.WriteI16(ctx, id)
written = 1 + 2
if err != nil {
return 0, err
@ -209,18 +239,17 @@ func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id
}
p.lastFieldId = fieldId
// p.lastField.Push(field.id);
return written, nil
}
func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
func (p *TCompactProtocol) WriteFieldStop() error {
func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
err := p.writeByteDirect(STOP)
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
if size == 0 {
err := p.writeByteDirect(0)
return NewTProtocolException(err)
@ -233,32 +262,32 @@ func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size in
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteMapEnd() error { return nil }
func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
// Write a list header.
func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
_, err := p.writeCollectionBegin(elemType, size)
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteListEnd() error { return nil }
func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
// Write a set header.
func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
_, err := p.writeCollectionBegin(elemType, size)
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteSetEnd() error { return nil }
func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
func (p *TCompactProtocol) WriteBool(value bool) error {
func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
v := byte(COMPACT_BOOLEAN_FALSE)
if value {
v = byte(COMPACT_BOOLEAN_TRUE)
}
if p.booleanFieldPending {
// we haven't written the field header yet
_, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
_, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
p.booleanFieldPending = false
return NewTProtocolException(err)
}
@ -268,31 +297,31 @@ func (p *TCompactProtocol) WriteBool(value bool) error {
}
// Write a byte. Nothing to see here!
func (p *TCompactProtocol) WriteByte(value int8) error {
func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
err := p.writeByteDirect(byte(value))
return NewTProtocolException(err)
}
// Write an I16 as a zigzag varint.
func (p *TCompactProtocol) WriteI16(value int16) error {
func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
return NewTProtocolException(err)
}
// Write an i32 as a zigzag varint.
func (p *TCompactProtocol) WriteI32(value int32) error {
func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
_, err := p.writeVarint32(p.int32ToZigzag(value))
return NewTProtocolException(err)
}
// Write an i64 as a zigzag varint.
func (p *TCompactProtocol) WriteI64(value int64) error {
func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
_, err := p.writeVarint64(p.int64ToZigzag(value))
return NewTProtocolException(err)
}
// Write a double to the wire as 8 bytes.
func (p *TCompactProtocol) WriteDouble(value float64) error {
func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
buf := p.buffer[0:8]
binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
_, err := p.trans.Write(buf)
@ -300,7 +329,7 @@ func (p *TCompactProtocol) WriteDouble(value float64) error {
}
// Write a string to the wire with a varint size preceding.
func (p *TCompactProtocol) WriteString(value string) error {
func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
_, e := p.writeVarint32(int32(len(value)))
if e != nil {
return NewTProtocolException(e)
@ -312,7 +341,7 @@ func (p *TCompactProtocol) WriteString(value string) error {
}
// Write a byte array, using a varint for the size.
func (p *TCompactProtocol) WriteBinary(bin []byte) error {
func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
_, e := p.writeVarint32(int32(len(bin)))
if e != nil {
return NewTProtocolException(e)
@ -329,9 +358,20 @@ func (p *TCompactProtocol) WriteBinary(bin []byte) error {
//
// Read a message header.
func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
var protocolId byte
protocolId, err := p.readByteDirect()
_, deadlineSet := ctx.Deadline()
for {
protocolId, err = p.readByteDirect()
if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
// keep retrying I/O timeout errors since we still have
// time left
continue
}
// For anything else, don't retry
break
}
if err != nil {
return
}
@ -358,15 +398,15 @@ func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType,
err = NewTProtocolException(e)
return
}
name, err = p.ReadString()
name, err = p.ReadString(ctx)
return
}
func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
// Read a struct begin. There's nothing on the wire for this, but it is our
// opportunity to push a new struct begin marker onto the field stack.
func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
p.lastField = append(p.lastField, p.lastFieldId)
p.lastFieldId = 0
return
@ -374,15 +414,18 @@ func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
// Doesn't actually consume any wire data, just removes the last field for
// this struct from the field stack.
func (p *TCompactProtocol) ReadStructEnd() error {
func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
// consume the last field we read off the wire.
if len(p.lastField) <= 0 {
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
}
p.lastFieldId = p.lastField[len(p.lastField)-1]
p.lastField = p.lastField[:len(p.lastField)-1]
return nil
}
// Read a field header off the wire.
func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
t, err := p.readByteDirect()
if err != nil {
return
@ -397,7 +440,7 @@ func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16
modifier := int16((t & 0xf0) >> 4)
if modifier == 0 {
// not a delta. look ahead for the zigzag varint field id.
id, err = p.ReadI16()
id, err = p.ReadI16(ctx)
if err != nil {
return
}
@ -423,12 +466,12 @@ func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16
return
}
func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
// Read a map header off the wire. If the size is zero, skip reading the key
// and value type. This means that 0-length maps will yield TMaps without the
// "correct" types.
func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
size32, e := p.readVarint32()
if e != nil {
err = NewTProtocolException(e)
@ -452,13 +495,13 @@ func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size
return
}
func (p *TCompactProtocol) ReadMapEnd() error { return nil }
func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
// Read a list header off the wire. If the list size is 0-14, the size will
// be packed into the element type header. If it's a longer list, the 4 MSB
// of the element type header will be 0xF, and a varint will follow with the
// true size.
func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
size_and_type, err := p.readByteDirect()
if err != nil {
return
@ -484,22 +527,22 @@ func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error)
return
}
func (p *TCompactProtocol) ReadListEnd() error { return nil }
func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
// Read a set header off the wire. If the set size is 0-14, the size will
// be packed into the element type header. If it's a longer set, the 4 MSB
// of the element type header will be 0xF, and a varint will follow with the
// true size.
func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
return p.ReadListBegin()
func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
return p.ReadListBegin(ctx)
}
func (p *TCompactProtocol) ReadSetEnd() error { return nil }
func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
// Read a boolean off the wire. If this is a boolean field, the value should
// already have been read during readFieldBegin, so we'll just consume the
// pre-stored value. Otherwise, read a byte.
func (p *TCompactProtocol) ReadBool() (value bool, err error) {
func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
if p.boolValueIsNotNull {
p.boolValueIsNotNull = false
return p.boolValue, nil
@ -509,7 +552,7 @@ func (p *TCompactProtocol) ReadBool() (value bool, err error) {
}
// Read a single byte off the wire. Nothing interesting here.
func (p *TCompactProtocol) ReadByte() (int8, error) {
func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
v, err := p.readByteDirect()
if err != nil {
return 0, NewTProtocolException(err)
@ -518,13 +561,13 @@ func (p *TCompactProtocol) ReadByte() (int8, error) {
}
// Read an i16 from the wire as a zigzag varint.
func (p *TCompactProtocol) ReadI16() (value int16, err error) {
v, err := p.ReadI32()
func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
v, err := p.ReadI32(ctx)
return int16(v), err
}
// Read an i32 from the wire as a zigzag varint.
func (p *TCompactProtocol) ReadI32() (value int32, err error) {
func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
v, e := p.readVarint32()
if e != nil {
return 0, NewTProtocolException(e)
@ -534,7 +577,7 @@ func (p *TCompactProtocol) ReadI32() (value int32, err error) {
}
// Read an i64 from the wire as a zigzag varint.
func (p *TCompactProtocol) ReadI64() (value int64, err error) {
func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
v, e := p.readVarint64()
if e != nil {
return 0, NewTProtocolException(e)
@ -544,7 +587,7 @@ func (p *TCompactProtocol) ReadI64() (value int64, err error) {
}
// No magic here - just read a double off the wire.
func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
longBits := p.buffer[0:8]
_, e := io.ReadFull(p.trans, longBits)
if e != nil {
@ -554,43 +597,44 @@ func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
}
// Reads a []byte (via readBinary), and then UTF-8 decodes it.
func (p *TCompactProtocol) ReadString() (value string, err error) {
func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
length, e := p.readVarint32()
if e != nil {
return "", NewTProtocolException(e)
}
if length < 0 {
return "", invalidDataLength
err = checkSizeForProtocol(length, p.cfg)
if err != nil {
return
}
if length == 0 {
return "", nil
}
var buf []byte
if length <= int32(len(p.buffer)) {
buf = p.buffer[0:length]
} else {
buf = make([]byte, length)
if length < int32(len(p.buffer)) {
// Avoid allocation on small reads
buf := p.buffer[:length]
read, e := io.ReadFull(p.trans, buf)
return string(buf[:read]), NewTProtocolException(e)
}
_, e = io.ReadFull(p.trans, buf)
buf, e := safeReadBytes(length, p.trans)
return string(buf), NewTProtocolException(e)
}
// Read a []byte from the wire.
func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
length, e := p.readVarint32()
if e != nil {
return nil, NewTProtocolException(e)
}
err = checkSizeForProtocol(length, p.cfg)
if err != nil {
return
}
if length == 0 {
return []byte{}, nil
}
if length < 0 {
return nil, invalidDataLength
}
buf := make([]byte, length)
_, e = io.ReadFull(p.trans, buf)
buf, e := safeReadBytes(length, p.trans)
return buf, NewTProtocolException(e)
}
@ -598,8 +642,8 @@ func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
return NewTProtocolException(p.trans.Flush(ctx))
}
func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
return SkipDefaultDepth(p, fieldType)
func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
return SkipDefaultDepth(ctx, p, fieldType)
}
func (p *TCompactProtocol) Transport() TTransport {
@ -801,10 +845,21 @@ func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
case COMPACT_STRUCT:
return STRUCT, nil
}
return STOP, TException(fmt.Errorf("don't know what type: %v", t&0x0f))
return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
}
// Given a TType value, find the appropriate TCompactProtocol.Types constant.
func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
return ttypeToCompactType[t]
}
func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
PropagateTConfiguration(p.trans, conf)
PropagateTConfiguration(p.origTransport, conf)
p.cfg = conf
}
var (
_ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
_ TConfigurationSetter = (*TCompactProtocol)(nil)
)

View File

@ -0,0 +1,378 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"crypto/tls"
"fmt"
"time"
)
// Default TConfiguration values.
const (
DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
DEFAULT_MAX_FRAME_SIZE = 16384000
DEFAULT_TBINARY_STRICT_READ = false
DEFAULT_TBINARY_STRICT_WRITE = true
DEFAULT_CONNECT_TIMEOUT = 0
DEFAULT_SOCKET_TIMEOUT = 0
)
// TConfiguration defines some configurations shared between TTransport,
// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
//
// When constructing TConfiguration, you only need to specify the non-default
// fields. All zero values have sane default values.
//
// Not all configurations defined are applicable to all implementations.
// Implementations are free to ignore the configurations not applicable to them.
//
// All functions attached to this type are nil-safe.
//
// See [1] for spec.
//
// NOTE: When using TConfiguration, fill in all the configurations you want to
// set across the stack, not only the ones you want to set in the immediate
// TTransport/TProtocol.
//
// For example, say you want to migrate this old code into using TConfiguration:
//
// sccket := thrift.NewTSocketTimeout("host:port", time.Second)
// transFactory := thrift.NewTFramedTransportFactoryMaxLength(
// thrift.NewTTransportFactory(),
// 1024 * 1024 * 256,
// )
// protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
//
// This is the wrong way to do it because in the end the TConfiguration used by
// socket and transFactory will be overwritten by the one used by protoFactory
// because of TConfiguration propagation:
//
// // bad example, DO NOT USE
// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
// ConnectTimeout: time.Second,
// SocketTimeout: time.Second,
// })
// transFactory := thrift.NewTFramedTransportFactoryConf(
// thrift.NewTTransportFactory(),
// &thrift.TConfiguration{
// MaxFrameSize: 1024 * 1024 * 256,
// },
// )
// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
// TBinaryStrictRead: thrift.BoolPtr(true),
// TBinaryStrictWrite: thrift.BoolPtr(true),
// })
//
// This is the correct way to do it:
//
// conf := &thrift.TConfiguration{
// ConnectTimeout: time.Second,
// SocketTimeout: time.Second,
//
// MaxFrameSize: 1024 * 1024 * 256,
//
// TBinaryStrictRead: thrift.BoolPtr(true),
// TBinaryStrictWrite: thrift.BoolPtr(true),
// }
// sccket := thrift.NewTSocketConf("host:port", conf)
// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
//
// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
type TConfiguration struct {
// If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
MaxMessageSize int32
// If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
//
// Also if MaxMessageSize < MaxFrameSize,
// MaxMessageSize will be used instead.
MaxFrameSize int32
// Connect and socket timeouts to be used by TSocket and TSSLSocket.
//
// 0 means no timeout.
//
// If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
// used.
ConnectTimeout time.Duration
SocketTimeout time.Duration
// TLS config to be used by TSSLSocket.
TLSConfig *tls.Config
// Strict read/write configurations for TBinaryProtocol.
//
// BoolPtr helper function is available to use literal values.
TBinaryStrictRead *bool
TBinaryStrictWrite *bool
// The wrapped protocol id to be used in THeader transport/protocol.
//
// THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
// are provided to help filling this value.
THeaderProtocolID *THeaderProtocolID
// Used internally by deprecated constructors, to avoid overriding
// underlying TTransport/TProtocol's cfg by accidental propagations.
//
// For external users this is always false.
noPropagation bool
}
// GetMaxMessageSize returns the max message size an implementation should
// follow.
//
// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
func (tc *TConfiguration) GetMaxMessageSize() int32 {
if tc == nil || tc.MaxMessageSize <= 0 {
return DEFAULT_MAX_MESSAGE_SIZE
}
return tc.MaxMessageSize
}
// GetMaxFrameSize returns the max frame size an implementation should follow.
//
// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
//
// If the configured max message size is smaller than the configured max frame
// size, the smaller one will be returned instead.
func (tc *TConfiguration) GetMaxFrameSize() int32 {
if tc == nil {
return DEFAULT_MAX_FRAME_SIZE
}
maxFrameSize := tc.MaxFrameSize
if maxFrameSize <= 0 {
maxFrameSize = DEFAULT_MAX_FRAME_SIZE
}
if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
return maxMessageSize
}
return maxFrameSize
}
// GetConnectTimeout returns the connect timeout should be used by TSocket and
// TSSLSocket.
//
// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
func (tc *TConfiguration) GetConnectTimeout() time.Duration {
if tc == nil || tc.ConnectTimeout < 0 {
return DEFAULT_CONNECT_TIMEOUT
}
return tc.ConnectTimeout
}
// GetSocketTimeout returns the socket timeout should be used by TSocket and
// TSSLSocket.
//
// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
func (tc *TConfiguration) GetSocketTimeout() time.Duration {
if tc == nil || tc.SocketTimeout < 0 {
return DEFAULT_SOCKET_TIMEOUT
}
return tc.SocketTimeout
}
// GetTLSConfig returns the tls config should be used by TSSLSocket.
//
// It's nil-safe. If tc is nil, nil will be returned instead.
func (tc *TConfiguration) GetTLSConfig() *tls.Config {
if tc == nil {
return nil
}
return tc.TLSConfig
}
// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
// should follow.
//
// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
// tc.TBinaryStrictRead is nil.
func (tc *TConfiguration) GetTBinaryStrictRead() bool {
if tc == nil || tc.TBinaryStrictRead == nil {
return DEFAULT_TBINARY_STRICT_READ
}
return *tc.TBinaryStrictRead
}
// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
// should follow.
//
// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
// tc.TBinaryStrictWrite is nil.
func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
if tc == nil || tc.TBinaryStrictWrite == nil {
return DEFAULT_TBINARY_STRICT_WRITE
}
return *tc.TBinaryStrictWrite
}
// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
// THeaderProtocol clients (for servers, they always use the same one as the
// client instead).
//
// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
// THeaderProtocolDefault will be returned instead.
// THeaderProtocolDefault will also be returned if configured value is invalid.
func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
if tc == nil || tc.THeaderProtocolID == nil {
return THeaderProtocolDefault
}
protoID := *tc.THeaderProtocolID
if err := protoID.Validate(); err != nil {
return THeaderProtocolDefault
}
return protoID
}
// THeaderProtocolIDPtr validates and returns the pointer to id.
//
// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
// and the validation error will be returned.
func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
err := id.Validate()
if err != nil {
id = THeaderProtocolDefault
}
return &id, err
}
// THeaderProtocolIDPtrMust validates and returns the pointer to id.
//
// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
// instead of returning them.
func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
ptr, err := THeaderProtocolIDPtr(id)
if err != nil {
panic(err)
}
return ptr
}
// TConfigurationSetter is an optional interface TProtocol, TTransport,
// TProtocolFactory, TTransportFactory, and other implementations can implement.
//
// It's intended to be called during intializations.
// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
// middle of a message is undefined:
// It may or may not change the behavior of the current processing message,
// and it may even cause the current message to fail.
//
// Note for implementations: SetTConfiguration might be called multiple times
// with the same value in quick successions due to the implementation of the
// propagation. Implementations should make SetTConfiguration as simple as
// possible (usually just overwrite the stored configuration and propagate it to
// the wrapped TTransports/TProtocols).
type TConfigurationSetter interface {
SetTConfiguration(*TConfiguration)
}
// PropagateTConfiguration propagates cfg to impl if impl implements
// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
//
// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
// with everything being default value, use &TConfiguration{} explicitly instead.
func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
if cfg == nil || cfg.noPropagation {
return
}
if setter, ok := impl.(TConfigurationSetter); ok {
setter.SetTConfiguration(cfg)
}
}
func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
if size < 0 {
return NewTProtocolExceptionWithType(
NEGATIVE_SIZE,
fmt.Errorf("negative size: %d", size),
)
}
if size > cfg.GetMaxMessageSize() {
return NewTProtocolExceptionWithType(
SIZE_LIMIT,
fmt.Errorf("size exceeded max allowed: %d", size),
)
}
return nil
}
type tTransportFactoryConf struct {
delegate TTransportFactory
cfg *TConfiguration
}
func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
trans, err := f.delegate.GetTransport(orig)
if err == nil {
PropagateTConfiguration(orig, f.cfg)
PropagateTConfiguration(trans, f.cfg)
}
return trans, err
}
func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(f.delegate, f.cfg)
f.cfg = cfg
}
// TTransportFactoryConf wraps a TTransportFactory to propagate
// TConfiguration on the factory's GetTransport calls.
func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
return &tTransportFactoryConf{
delegate: delegate,
cfg: conf,
}
}
type tProtocolFactoryConf struct {
delegate TProtocolFactory
cfg *TConfiguration
}
func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
proto := f.delegate.GetProtocol(trans)
PropagateTConfiguration(trans, f.cfg)
PropagateTConfiguration(proto, f.cfg)
return proto
}
func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(f.delegate, f.cfg)
f.cfg = cfg
}
// TProtocolFactoryConf wraps a TProtocolFactory to propagate
// TConfiguration on the factory's GetProtocol calls.
func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
return &tProtocolFactoryConf{
delegate: delegate,
cfg: conf,
}
}
var (
_ TConfigurationSetter = (*tTransportFactoryConf)(nil)
_ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
)

View File

@ -0,0 +1,447 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"fmt"
)
type TDebugProtocol struct {
// Required. The actual TProtocol to do the read/write.
Delegate TProtocol
// Optional. The logger and prefix to log all the args/return values
// from Delegate TProtocol calls.
//
// If Logger is nil, StdLogger using stdlib log package with os.Stderr
// will be used. If disable logging is desired, set Logger to NopLogger
// explicitly instead of leaving it as nil/unset.
Logger Logger
LogPrefix string
// Optional. An TProtocol to duplicate everything read/written from Delegate.
//
// A typical use case of this is to use TSimpleJSONProtocol wrapping
// TMemoryBuffer in a middleware to json logging requests/responses.
//
// This feature is not available from TDebugProtocolFactory. In order to
// use it you have to construct TDebugProtocol directly, or set DuplicateTo
// field after getting a TDebugProtocol from the factory.
DuplicateTo TProtocol
}
type TDebugProtocolFactory struct {
Underlying TProtocolFactory
LogPrefix string
Logger Logger
}
// NewTDebugProtocolFactory creates a TDebugProtocolFactory.
//
// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct
// itself instead. This version will use the default logger from standard
// library.
func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
return &TDebugProtocolFactory{
Underlying: underlying,
LogPrefix: logPrefix,
Logger: StdLogger(nil),
}
}
// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory.
func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory {
return &TDebugProtocolFactory{
Underlying: underlying,
LogPrefix: logPrefix,
Logger: logger,
}
}
func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return &TDebugProtocol{
Delegate: t.Underlying.GetProtocol(trans),
LogPrefix: t.LogPrefix,
Logger: fallbackLogger(t.Logger),
}
}
func (tdp *TDebugProtocol) logf(format string, v ...interface{}) {
fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...))
}
func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid)
tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid)
}
return err
}
func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error {
err := tdp.Delegate.WriteMessageEnd(ctx)
tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteMessageEnd(ctx)
}
return err
}
func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error {
err := tdp.Delegate.WriteStructBegin(ctx, name)
tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteStructBegin(ctx, name)
}
return err
}
func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error {
err := tdp.Delegate.WriteStructEnd(ctx)
tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteStructEnd(ctx)
}
return err
}
func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id)
tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id)
}
return err
}
func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error {
err := tdp.Delegate.WriteFieldEnd(ctx)
tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteFieldEnd(ctx)
}
return err
}
func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error {
err := tdp.Delegate.WriteFieldStop(ctx)
tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteFieldStop(ctx)
}
return err
}
func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size)
tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size)
}
return err
}
func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error {
err := tdp.Delegate.WriteMapEnd(ctx)
tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteMapEnd(ctx)
}
return err
}
func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
err := tdp.Delegate.WriteListBegin(ctx, elemType, size)
tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteListBegin(ctx, elemType, size)
}
return err
}
func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error {
err := tdp.Delegate.WriteListEnd(ctx)
tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteListEnd(ctx)
}
return err
}
func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
err := tdp.Delegate.WriteSetBegin(ctx, elemType, size)
tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size)
}
return err
}
func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error {
err := tdp.Delegate.WriteSetEnd(ctx)
tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteSetEnd(ctx)
}
return err
}
func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error {
err := tdp.Delegate.WriteBool(ctx, value)
tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteBool(ctx, value)
}
return err
}
func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error {
err := tdp.Delegate.WriteByte(ctx, value)
tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteByte(ctx, value)
}
return err
}
func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error {
err := tdp.Delegate.WriteI16(ctx, value)
tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteI16(ctx, value)
}
return err
}
func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error {
err := tdp.Delegate.WriteI32(ctx, value)
tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteI32(ctx, value)
}
return err
}
func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error {
err := tdp.Delegate.WriteI64(ctx, value)
tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteI64(ctx, value)
}
return err
}
func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error {
err := tdp.Delegate.WriteDouble(ctx, value)
tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteDouble(ctx, value)
}
return err
}
func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error {
err := tdp.Delegate.WriteString(ctx, value)
tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteString(ctx, value)
}
return err
}
func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error {
err := tdp.Delegate.WriteBinary(ctx, value)
tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteBinary(ctx, value)
}
return err
}
func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) {
name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx)
tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid)
}
return
}
func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) {
err = tdp.Delegate.ReadMessageEnd(ctx)
tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteMessageEnd(ctx)
}
return
}
func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
name, err = tdp.Delegate.ReadStructBegin(ctx)
tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteStructBegin(ctx, name)
}
return
}
func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) {
err = tdp.Delegate.ReadStructEnd(ctx)
tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteStructEnd(ctx)
}
return
}
func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx)
tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id)
}
return
}
func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) {
err = tdp.Delegate.ReadFieldEnd(ctx)
tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteFieldEnd(ctx)
}
return
}
func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx)
tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size)
}
return
}
func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) {
err = tdp.Delegate.ReadMapEnd(ctx)
tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteMapEnd(ctx)
}
return
}
func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
elemType, size, err = tdp.Delegate.ReadListBegin(ctx)
tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteListBegin(ctx, elemType, size)
}
return
}
func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) {
err = tdp.Delegate.ReadListEnd(ctx)
tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteListEnd(ctx)
}
return
}
func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
elemType, size, err = tdp.Delegate.ReadSetBegin(ctx)
tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size)
}
return
}
func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) {
err = tdp.Delegate.ReadSetEnd(ctx)
tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteSetEnd(ctx)
}
return
}
func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) {
value, err = tdp.Delegate.ReadBool(ctx)
tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteBool(ctx, value)
}
return
}
func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) {
value, err = tdp.Delegate.ReadByte(ctx)
tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteByte(ctx, value)
}
return
}
func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) {
value, err = tdp.Delegate.ReadI16(ctx)
tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteI16(ctx, value)
}
return
}
func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) {
value, err = tdp.Delegate.ReadI32(ctx)
tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteI32(ctx, value)
}
return
}
func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) {
value, err = tdp.Delegate.ReadI64(ctx)
tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteI64(ctx, value)
}
return
}
func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
value, err = tdp.Delegate.ReadDouble(ctx)
tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteDouble(ctx, value)
}
return
}
func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) {
value, err = tdp.Delegate.ReadString(ctx)
tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteString(ctx, value)
}
return
}
func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
value, err = tdp.Delegate.ReadBinary(ctx)
tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.WriteBinary(ctx, value)
}
return
}
func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
err = tdp.Delegate.Skip(ctx, fieldType)
tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.Skip(ctx, fieldType)
}
return
}
func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) {
err = tdp.Delegate.Flush(ctx)
tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
if tdp.DuplicateTo != nil {
tdp.DuplicateTo.Flush(ctx)
}
return
}
func (tdp *TDebugProtocol) Transport() TTransport {
return tdp.Delegate.Transport()
}
// SetTConfiguration implements TConfigurationSetter for propagation.
func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) {
PropagateTConfiguration(tdp.Delegate, conf)
PropagateTConfiguration(tdp.DuplicateTo, conf)
}
var _ TConfigurationSetter = (*TDebugProtocol)(nil)

View File

@ -0,0 +1,121 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"sync"
)
type TDeserializer struct {
Transport *TMemoryBuffer
Protocol TProtocol
}
func NewTDeserializer() *TDeserializer {
transport := NewTMemoryBufferLen(1024)
protocol := NewTBinaryProtocolTransport(transport)
return &TDeserializer{
Transport: transport,
Protocol: protocol,
}
}
func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) {
t.Transport.Reset()
err = nil
if _, err = t.Transport.Write([]byte(s)); err != nil {
return
}
if err = msg.Read(ctx, t.Protocol); err != nil {
return
}
return
}
func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) {
t.Transport.Reset()
err = nil
if _, err = t.Transport.Write(b); err != nil {
return
}
if err = msg.Read(ctx, t.Protocol); err != nil {
return
}
return
}
// TDeserializerPool is the thread-safe version of TDeserializer,
// it uses resource pool of TDeserializer under the hood.
//
// It must be initialized with either NewTDeserializerPool or
// NewTDeserializerPoolSizeFactory.
type TDeserializerPool struct {
pool sync.Pool
}
// NewTDeserializerPool creates a new TDeserializerPool.
//
// NewTDeserializer can be used as the arg here.
func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool {
return &TDeserializerPool{
pool: sync.Pool{
New: func() interface{} {
return f()
},
},
}
}
// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with
// the given size and protocol factory.
//
// Note that the size is not the limit. The TMemoryBuffer underneath can grow
// larger than that. It just dictates the initial size.
func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool {
return &TDeserializerPool{
pool: sync.Pool{
New: func() interface{} {
transport := NewTMemoryBufferLen(size)
protocol := factory.GetProtocol(transport)
return &TDeserializer{
Transport: transport,
Protocol: protocol,
}
},
},
}
}
func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error {
d := t.pool.Get().(*TDeserializer)
defer t.pool.Put(d)
return d.ReadString(ctx, msg, s)
}
func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error {
d := t.pool.Get().(*TDeserializer)
defer t.pool.Put(d)
return d.Read(ctx, msg, b)
}

View File

@ -0,0 +1,116 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"errors"
)
// Generic Thrift exception
type TException interface {
error
TExceptionType() TExceptionType
}
// Prepends additional information to an error without losing the Thrift exception interface
func PrependError(prepend string, err error) error {
msg := prepend + err.Error()
var te TException
if errors.As(err, &te) {
switch te.TExceptionType() {
case TExceptionTypeTransport:
if t, ok := err.(TTransportException); ok {
return prependTTransportException(prepend, t)
}
case TExceptionTypeProtocol:
if t, ok := err.(TProtocolException); ok {
return prependTProtocolException(prepend, t)
}
case TExceptionTypeApplication:
var t TApplicationException
if errors.As(err, &t) {
return NewTApplicationException(t.TypeId(), msg)
}
}
return wrappedTException{
err: err,
msg: msg,
tExceptionType: te.TExceptionType(),
}
}
return errors.New(msg)
}
// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
type TExceptionType byte
// TExceptionType values
const (
TExceptionTypeUnknown TExceptionType = iota
TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler
TExceptionTypeApplication // TApplicationExceptions
TExceptionTypeProtocol // TProtocolExceptions
TExceptionTypeTransport // TTransportExceptions
)
// WrapTException wraps an error into TException.
//
// If err is nil or already TException, it's returned as-is.
// Otherwise it will be wraped into TException with TExceptionType() returning
// TExceptionTypeUnknown, and Unwrap() returning the original error.
func WrapTException(err error) TException {
if err == nil {
return nil
}
if te, ok := err.(TException); ok {
return te
}
return wrappedTException{
err: err,
msg: err.Error(),
tExceptionType: TExceptionTypeUnknown,
}
}
type wrappedTException struct {
err error
msg string
tExceptionType TExceptionType
}
func (w wrappedTException) Error() string {
return w.msg
}
func (w wrappedTException) TExceptionType() TExceptionType {
return w.tExceptionType
}
func (w wrappedTException) Unwrap() error {
return w.err
}
var _ TException = wrappedTException{}

View File

@ -0,0 +1,223 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
)
// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead.
const DEFAULT_MAX_LENGTH = 16384000
type TFramedTransport struct {
transport TTransport
cfg *TConfiguration
writeBuf bytes.Buffer
reader *bufio.Reader
readBuf bytes.Buffer
buffer [4]byte
}
type tFramedTransportFactory struct {
factory TTransportFactory
cfg *TConfiguration
}
// Deprecated: Use NewTFramedTransportFactoryConf instead.
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
return NewTFramedTransportFactoryConf(factory, &TConfiguration{
MaxFrameSize: DEFAULT_MAX_LENGTH,
noPropagation: true,
})
}
// Deprecated: Use NewTFramedTransportFactoryConf instead.
func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
return NewTFramedTransportFactoryConf(factory, &TConfiguration{
MaxFrameSize: int32(maxLength),
noPropagation: true,
})
}
func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
PropagateTConfiguration(factory, conf)
return &tFramedTransportFactory{
factory: factory,
cfg: conf,
}
}
func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
PropagateTConfiguration(base, p.cfg)
tt, err := p.factory.GetTransport(base)
if err != nil {
return nil, err
}
return NewTFramedTransportConf(tt, p.cfg), nil
}
func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(p.factory, cfg)
p.cfg = cfg
}
// Deprecated: Use NewTFramedTransportConf instead.
func NewTFramedTransport(transport TTransport) *TFramedTransport {
return NewTFramedTransportConf(transport, &TConfiguration{
MaxFrameSize: DEFAULT_MAX_LENGTH,
noPropagation: true,
})
}
// Deprecated: Use NewTFramedTransportConf instead.
func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
return NewTFramedTransportConf(transport, &TConfiguration{
MaxFrameSize: int32(maxLength),
noPropagation: true,
})
}
func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport {
PropagateTConfiguration(transport, conf)
return &TFramedTransport{
transport: transport,
reader: bufio.NewReader(transport),
cfg: conf,
}
}
func (p *TFramedTransport) Open() error {
return p.transport.Open()
}
func (p *TFramedTransport) IsOpen() bool {
return p.transport.IsOpen()
}
func (p *TFramedTransport) Close() error {
return p.transport.Close()
}
func (p *TFramedTransport) Read(buf []byte) (read int, err error) {
read, err = p.readBuf.Read(buf)
if err != io.EOF {
return
}
// For bytes.Buffer.Read, EOF would only happen when read is zero,
// but still, do a sanity check,
// in case that behavior is changed in a future version of go stdlib.
// When that happens, just return nil error,
// and let the caller call Read again to read the next frame.
if read > 0 {
return read, nil
}
// Reaching here means that the last Read finished the last frame,
// so we need to read the next frame into readBuf now.
if err = p.readFrame(); err != nil {
return read, err
}
newRead, err := p.Read(buf[read:])
return read + newRead, err
}
func (p *TFramedTransport) ReadByte() (c byte, err error) {
buf := p.buffer[:1]
_, err = p.Read(buf)
if err != nil {
return
}
c = buf[0]
return
}
func (p *TFramedTransport) Write(buf []byte) (int, error) {
n, err := p.writeBuf.Write(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) WriteByte(c byte) error {
return p.writeBuf.WriteByte(c)
}
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
return p.writeBuf.WriteString(s)
}
func (p *TFramedTransport) Flush(ctx context.Context) error {
size := p.writeBuf.Len()
buf := p.buffer[:4]
binary.BigEndian.PutUint32(buf, uint32(size))
_, err := p.transport.Write(buf)
if err != nil {
p.writeBuf.Reset()
return NewTTransportExceptionFromError(err)
}
if size > 0 {
if _, err := io.Copy(p.transport, &p.writeBuf); err != nil {
p.writeBuf.Reset()
return NewTTransportExceptionFromError(err)
}
}
err = p.transport.Flush(ctx)
return NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) readFrame() error {
buf := p.buffer[:4]
if _, err := io.ReadFull(p.reader, buf); err != nil {
return err
}
size := binary.BigEndian.Uint32(buf)
if size < 0 || size > uint32(p.cfg.GetMaxFrameSize()) {
return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
}
_, err := io.CopyN(&p.readBuf, p.reader, int64(size))
return NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
return uint64(p.readBuf.Len())
}
// SetTConfiguration implements TConfigurationSetter.
func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(p.transport, cfg)
p.cfg = cfg
}
var (
_ TConfigurationSetter = (*tFramedTransportFactory)(nil)
_ TConfigurationSetter = (*TFramedTransport)(nil)
)

View File

@ -44,6 +44,15 @@ func SetHeader(ctx context.Context, key, value string) context.Context {
)
}
// UnsetHeader unsets a previously set header in the context.
func UnsetHeader(ctx context.Context, key string) context.Context {
return context.WithValue(
ctx,
headerKey(key),
nil,
)
}
// GetHeader returns a value of the given header from the context.
func GetHeader(ctx context.Context, key string) (value string, ok bool) {
if v := ctx.Value(headerKey(key)); v != nil {

View File

@ -0,0 +1,351 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"errors"
)
// THeaderProtocol is a thrift protocol that implements THeader:
// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
//
// It supports either binary or compact protocol as the wrapped protocol.
//
// Most of the THeader handlings are happening inside THeaderTransport.
type THeaderProtocol struct {
transport *THeaderTransport
// Will be initialized on first read/write.
protocol TProtocol
cfg *TConfiguration
}
// Deprecated: Use NewTHeaderProtocolConf instead.
func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
return newTHeaderProtocolConf(trans, &TConfiguration{
noPropagation: true,
})
}
// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
// transport with given TConfiguration.
//
// The passed in transport will be wrapped with THeaderTransport.
//
// Note that THeaderTransport handles frame and zlib by itself,
// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
// instead of rich transports like TZlibTransport or TFramedTransport.
func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
return newTHeaderProtocolConf(trans, conf)
}
func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
t := NewTHeaderTransportConf(trans, cfg)
p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
PropagateTConfiguration(p, cfg)
return &THeaderProtocol{
transport: t,
protocol: p,
cfg: cfg,
}
}
type tHeaderProtocolFactory struct {
cfg *TConfiguration
}
func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return newTHeaderProtocolConf(trans, f.cfg)
}
func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
f.cfg = cfg
}
// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
func NewTHeaderProtocolFactory() TProtocolFactory {
return NewTHeaderProtocolFactoryConf(&TConfiguration{
noPropagation: true,
})
}
// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
// TConfiguration.
func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
return tHeaderProtocolFactory{
cfg: conf,
}
}
// Transport returns the underlying transport.
//
// It's guaranteed to be of type *THeaderTransport.
func (p *THeaderProtocol) Transport() TTransport {
return p.transport
}
// GetReadHeaders returns the THeaderMap read from transport.
func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
return p.transport.GetReadHeaders()
}
// SetWriteHeader sets a header for write.
func (p *THeaderProtocol) SetWriteHeader(key, value string) {
p.transport.SetWriteHeader(key, value)
}
// ClearWriteHeaders clears all write headers previously set.
func (p *THeaderProtocol) ClearWriteHeaders() {
p.transport.ClearWriteHeaders()
}
// AddTransform add a transform for writing.
func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
return p.transport.AddTransform(transform)
}
func (p *THeaderProtocol) Flush(ctx context.Context) error {
return p.transport.Flush(ctx)
}
func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
newProto, err := p.transport.Protocol().GetProtocol(p.transport)
if err != nil {
return err
}
PropagateTConfiguration(newProto, p.cfg)
p.protocol = newProto
p.transport.SequenceID = seqID
return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
}
func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
if err := p.protocol.WriteMessageEnd(ctx); err != nil {
return err
}
return p.transport.Flush(ctx)
}
func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
return p.protocol.WriteStructBegin(ctx, name)
}
func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
return p.protocol.WriteStructEnd(ctx)
}
func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
}
func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
return p.protocol.WriteFieldEnd(ctx)
}
func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
return p.protocol.WriteFieldStop(ctx)
}
func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
}
func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
return p.protocol.WriteMapEnd(ctx)
}
func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
return p.protocol.WriteListBegin(ctx, elemType, size)
}
func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
return p.protocol.WriteListEnd(ctx)
}
func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
return p.protocol.WriteSetBegin(ctx, elemType, size)
}
func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
return p.protocol.WriteSetEnd(ctx)
}
func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
return p.protocol.WriteBool(ctx, value)
}
func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
return p.protocol.WriteByte(ctx, value)
}
func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
return p.protocol.WriteI16(ctx, value)
}
func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
return p.protocol.WriteI32(ctx, value)
}
func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
return p.protocol.WriteI64(ctx, value)
}
func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
return p.protocol.WriteDouble(ctx, value)
}
func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
return p.protocol.WriteString(ctx, value)
}
func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
return p.protocol.WriteBinary(ctx, value)
}
// ReadFrame calls underlying THeaderTransport's ReadFrame function.
func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
return p.transport.ReadFrame(ctx)
}
func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
if err = p.transport.ReadFrame(ctx); err != nil {
return
}
var newProto TProtocol
newProto, err = p.transport.Protocol().GetProtocol(p.transport)
if err != nil {
var tAppExc TApplicationException
if !errors.As(err, &tAppExc) {
return
}
if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
return
}
if e := tAppExc.Write(ctx, p.protocol); e != nil {
return
}
if e := p.protocol.WriteMessageEnd(ctx); e != nil {
return
}
if e := p.transport.Flush(ctx); e != nil {
return
}
return
}
PropagateTConfiguration(newProto, p.cfg)
p.protocol = newProto
return p.protocol.ReadMessageBegin(ctx)
}
func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
return p.protocol.ReadMessageEnd(ctx)
}
func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
return p.protocol.ReadStructBegin(ctx)
}
func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
return p.protocol.ReadStructEnd(ctx)
}
func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
return p.protocol.ReadFieldBegin(ctx)
}
func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
return p.protocol.ReadFieldEnd(ctx)
}
func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
return p.protocol.ReadMapBegin(ctx)
}
func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
return p.protocol.ReadMapEnd(ctx)
}
func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
return p.protocol.ReadListBegin(ctx)
}
func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
return p.protocol.ReadListEnd(ctx)
}
func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
return p.protocol.ReadSetBegin(ctx)
}
func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
return p.protocol.ReadSetEnd(ctx)
}
func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
return p.protocol.ReadBool(ctx)
}
func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
return p.protocol.ReadByte(ctx)
}
func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
return p.protocol.ReadI16(ctx)
}
func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
return p.protocol.ReadI32(ctx)
}
func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
return p.protocol.ReadI64(ctx)
}
func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
return p.protocol.ReadDouble(ctx)
}
func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
return p.protocol.ReadString(ctx)
}
func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
return p.protocol.ReadBinary(ctx)
}
func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
return p.protocol.Skip(ctx, fieldType)
}
// SetTConfiguration implements TConfigurationSetter.
func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(p.transport, cfg)
PropagateTConfiguration(p.protocol, cfg)
p.cfg = cfg
}
var (
_ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
_ TConfigurationSetter = (*THeaderProtocol)(nil)
)

View File

@ -75,6 +75,15 @@ const (
THeaderProtocolDefault = THeaderProtocolBinary
)
// Declared globally to avoid repetitive allocations, not really used.
var globalMemoryBuffer = NewTMemoryBuffer()
// Validate checks whether the THeaderProtocolID is a valid/supported one.
func (id THeaderProtocolID) Validate() error {
_, err := id.GetProtocol(globalMemoryBuffer)
return err
}
// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
switch id {
@ -84,7 +93,7 @@ func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
fmt.Sprintf("THeader protocol id %d not supported", id),
)
case THeaderProtocolBinary:
return NewTBinaryProtocolFactoryDefault().GetProtocol(trans), nil
return NewTBinaryProtocolTransport(trans), nil
case THeaderProtocolCompact:
return NewTCompactProtocol(trans), nil
}
@ -93,11 +102,12 @@ func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
// THeaderTransformID defines the numeric id of the transform used.
type THeaderTransformID int32
// THeaderTransformID values
// THeaderTransformID values.
//
// Values not defined here are not currently supported, namely HMAC and Snappy.
const (
TransformNone THeaderTransformID = iota // 0, no special handling
TransformZlib // 1, zlib
// Rest of the values are not currently supported, namely HMAC and Snappy.
)
var supportedTransformIDs = map[THeaderTransformID]bool{
@ -255,6 +265,7 @@ type THeaderTransport struct {
clientType clientType
protocolID THeaderProtocolID
cfg *TConfiguration
// buffer is used in the following scenarios to avoid repetitive
// allocations, while 4 is big enough for all those scenarios:
@ -266,22 +277,35 @@ type THeaderTransport struct {
var _ TTransport = (*THeaderTransport)(nil)
// NewTHeaderTransport creates THeaderTransport from the underlying transport.
//
// Please note that THeaderTransport handles framing and zlib by itself,
// so the underlying transport should be the raw socket transports (TSocket or TSSLSocket),
// instead of rich transports like TZlibTransport or TFramedTransport.
//
// If trans is already a *THeaderTransport, it will be returned as is.
// Deprecated: Use NewTHeaderTransportConf instead.
func NewTHeaderTransport(trans TTransport) *THeaderTransport {
return NewTHeaderTransportConf(trans, &TConfiguration{
noPropagation: true,
})
}
// NewTHeaderTransportConf creates THeaderTransport from the
// underlying transport, with given TConfiguration attached.
//
// If trans is already a *THeaderTransport, it will be returned as is,
// but with TConfiguration overridden by the value passed in.
//
// The protocol ID in TConfiguration is only useful for client transports.
// For servers,
// the protocol ID will be overridden again to the one set by the client,
// to ensure that servers always speak the same dialect as the client.
func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
if ht, ok := trans.(*THeaderTransport); ok {
ht.SetTConfiguration(conf)
return ht
}
PropagateTConfiguration(trans, conf)
return &THeaderTransport{
transport: trans,
reader: bufio.NewReader(trans),
writeHeaders: make(THeaderMap),
protocolID: THeaderProtocolDefault,
protocolID: conf.GetTHeaderProtocolID(),
cfg: conf,
}
}
@ -297,18 +321,34 @@ func (t *THeaderTransport) IsOpen() bool {
// ReadFrame tries to read the frame header, guess the client type, and handle
// unframed clients.
func (t *THeaderTransport) ReadFrame() error {
func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
if !t.needReadFrame() {
// No need to read frame, skipping.
return nil
}
// Peek and handle the first 32 bits.
// They could either be the length field of a framed message,
// or the first bytes of an unframed message.
buf, err := t.reader.Peek(size32)
var buf []byte
var err error
// This is also usually the first read from a connection,
// so handle retries around socket timeouts.
_, deadlineSet := ctx.Deadline()
for {
buf, err = t.reader.Peek(size32)
if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
// This is I/O timeout and we still have time,
// continue trying
continue
}
// For anything else, do not retry
break
}
if err != nil {
return err
}
frameSize := binary.BigEndian.Uint32(buf)
if frameSize&VERSION_MASK == VERSION_1 {
t.clientType = clientUnframedBinary
@ -321,7 +361,7 @@ func (t *THeaderTransport) ReadFrame() error {
// At this point it should be a framed message,
// sanity check on frameSize then discard the peeked part.
if frameSize > THeaderMaxFrameSize {
if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
return NewTProtocolExceptionWithType(
SIZE_LIMIT,
errors.New("frame too large"),
@ -330,10 +370,7 @@ func (t *THeaderTransport) ReadFrame() error {
t.reader.Discard(size32)
// Read the frame fully into frameBuffer.
_, err = io.Copy(
&t.frameBuffer,
io.LimitReader(t.reader, int64(frameSize)),
)
_, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
if err != nil {
return err
}
@ -344,7 +381,7 @@ func (t *THeaderTransport) ReadFrame() error {
version := binary.BigEndian.Uint32(buf)
if version&THeaderHeaderMask == THeaderHeaderMagic {
t.clientType = clientHeaders
return t.parseHeaders(frameSize)
return t.parseHeaders(ctx, frameSize)
}
if version&VERSION_MASK == VERSION_1 {
t.clientType = clientFramedBinary
@ -374,7 +411,7 @@ func (t *THeaderTransport) endOfFrame() error {
return t.frameReader.Close()
}
func (t *THeaderTransport) parseHeaders(frameSize uint32) error {
func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
if t.clientType != clientHeaders {
return nil
}
@ -395,11 +432,12 @@ func (t *THeaderTransport) parseHeaders(frameSize uint32) error {
)
}
headerBuf := NewTMemoryBuffer()
_, err = io.Copy(headerBuf, io.LimitReader(&t.frameBuffer, headerLength))
_, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
if err != nil {
return err
}
hp := NewTCompactProtocol(headerBuf)
hp.SetTConfiguration(t.cfg)
// At this point the header is already read into headerBuf,
// and t.frameBuffer starts from the actual payload.
@ -408,6 +446,7 @@ func (t *THeaderTransport) parseHeaders(frameSize uint32) error {
return err
}
t.protocolID = THeaderProtocolID(protoID)
var transformCount int32
transformCount, err = hp.readVarint32()
if err != nil {
@ -442,7 +481,7 @@ func (t *THeaderTransport) parseHeaders(frameSize uint32) error {
headers := make(THeaderMap)
for {
infoType, err := hp.readVarint32()
if err == io.EOF {
if errors.Is(err, io.EOF) {
break
}
if err != nil {
@ -454,11 +493,11 @@ func (t *THeaderTransport) parseHeaders(frameSize uint32) error {
return err
}
for i := 0; i < int(count); i++ {
key, err := hp.ReadString()
key, err := hp.ReadString(ctx)
if err != nil {
return err
}
value, err := hp.ReadString()
value, err := hp.ReadString(ctx)
if err != nil {
return err
}
@ -488,21 +527,37 @@ func (t *THeaderTransport) needReadFrame() bool {
}
func (t *THeaderTransport) Read(p []byte) (read int, err error) {
err = t.ReadFrame()
// Here using context.Background instead of a context passed in is safe.
// First is that there's no way to pass context into this function.
// Then, 99% of the case when calling this Read frame is already read
// into frameReader. ReadFrame here is more of preventing bugs that
// didn't call ReadFrame before calling Read.
err = t.ReadFrame(context.Background())
if err != nil {
return
}
if t.frameReader != nil {
read, err = t.frameReader.Read(p)
if err == io.EOF {
if err == nil && t.frameBuffer.Len() <= 0 {
// the last Read finished the frame, do endOfFrame
// handling here.
err = t.endOfFrame()
} else if err == io.EOF {
err = t.endOfFrame()
if err != nil {
return
}
if read < len(p) {
var nextRead int
nextRead, err = t.Read(p[read:])
read += nextRead
if read == 0 {
// Try to read the next frame when we hit EOF
// (end of frame) immediately.
// When we got here, it means the last read
// finished the previous frame, but didn't
// do endOfFrame handling yet.
// We have to read the next frame here,
// as otherwise we would return 0 and nil,
// which is a case not handled well by most
// protocol implementations.
return t.Read(p)
}
}
return
@ -534,6 +589,7 @@ func (t *THeaderTransport) Flush(ctx context.Context) error {
case clientHeaders:
headers := NewTMemoryBuffer()
hp := NewTCompactProtocol(headers)
hp.SetTConfiguration(t.cfg)
if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
return NewTTransportExceptionFromError(err)
}
@ -553,10 +609,10 @@ func (t *THeaderTransport) Flush(ctx context.Context) error {
return NewTTransportExceptionFromError(err)
}
for key, value := range t.writeHeaders {
if err := hp.WriteString(key); err != nil {
if err := hp.WriteString(ctx, key); err != nil {
return NewTTransportExceptionFromError(err)
}
if err := hp.WriteString(value); err != nil {
if err := hp.WriteString(ctx, value); err != nil {
return NewTTransportExceptionFromError(err)
}
}
@ -696,17 +752,37 @@ func (t *THeaderTransport) isFramed() bool {
}
}
// SetTConfiguration implements TConfigurationSetter.
func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(t.transport, cfg)
t.cfg = cfg
}
// THeaderTransportFactory is a TTransportFactory implementation to create
// THeaderTransport.
//
// It also implements TConfigurationSetter.
type THeaderTransportFactory struct {
// The underlying factory, could be nil.
Factory TTransportFactory
cfg *TConfiguration
}
// NewTHeaderTransportFactory creates a new *THeaderTransportFactory.
// Deprecated: Use NewTHeaderTransportFactoryConf instead.
func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
noPropagation: true,
})
}
// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
// the given *TConfiguration.
func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
return &THeaderTransportFactory{
Factory: factory,
cfg: conf,
}
}
@ -717,7 +793,18 @@ func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, er
if err != nil {
return nil, err
}
return NewTHeaderTransport(t), nil
return NewTHeaderTransportConf(t, f.cfg), nil
}
return NewTHeaderTransport(trans), nil
return NewTHeaderTransportConf(trans, f.cfg), nil
}
// SetTConfiguration implements TConfigurationSetter.
func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
PropagateTConfiguration(f.Factory, f.cfg)
f.cfg = cfg
}
var (
_ TConfigurationSetter = (*THeaderTransportFactory)(nil)
_ TConfigurationSetter = (*THeaderTransport)(nil)
)

View File

@ -22,6 +22,7 @@ package thrift
import (
"bytes"
"context"
"errors"
"io"
"io/ioutil"
"net/http"
@ -159,26 +160,37 @@ func (p *THttpClient) Read(buf []byte) (int, error) {
return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
}
n, err := p.response.Body.Read(buf)
if n > 0 && (err == nil || err == io.EOF) {
if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
return n, nil
}
return n, NewTTransportExceptionFromError(err)
}
func (p *THttpClient) ReadByte() (c byte, err error) {
if p.response == nil {
return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
}
return readByte(p.response.Body)
}
func (p *THttpClient) Write(buf []byte) (int, error) {
n, err := p.requestBuffer.Write(buf)
return n, err
if p.requestBuffer == nil {
return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
}
return p.requestBuffer.Write(buf)
}
func (p *THttpClient) WriteByte(c byte) error {
if p.requestBuffer == nil {
return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
}
return p.requestBuffer.WriteByte(c)
}
func (p *THttpClient) WriteString(s string) (n int, err error) {
if p.requestBuffer == nil {
return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
}
return p.requestBuffer.WriteString(s)
}
@ -186,7 +198,11 @@ func (p *THttpClient) Flush(ctx context.Context) error {
// Close any previous response body to avoid leaking connections.
p.closeResponse()
req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer)
// Give up the ownership of the current request buffer to http request,
// and create a new buffer for the next request.
buf := p.requestBuffer
p.requestBuffer = new(bytes.Buffer)
req, err := http.NewRequest("POST", p.url.String(), buf)
if err != nil {
return NewTTransportExceptionFromError(err)
}
@ -218,7 +234,7 @@ func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
}
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
return maxSize // the truth is, we just don't know unless framed is used
}
// Deprecated: Use NewTHttpClientTransportFactory instead.

View File

@ -24,6 +24,7 @@ import (
"io"
"net/http"
"strings"
"sync"
)
// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
@ -40,14 +41,24 @@ func NewThriftHandlerFunc(processor TProcessor,
// gz transparently compresses the HTTP response if the client supports it.
func gz(handler http.HandlerFunc) http.HandlerFunc {
sp := &sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
},
}
return func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
handler(w, r)
return
}
w.Header().Set("Content-Encoding", "gzip")
gz := gzip.NewWriter(w)
defer gz.Close()
gz := sp.Get().(*gzip.Writer)
gz.Reset(w)
defer func() {
_ = gz.Close()
sp.Put(gz)
}()
gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
handler(gzw, r)
}

View File

@ -210,5 +210,13 @@ func (p *StreamTransport) WriteString(s string) (n int, err error) {
func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
return maxSize // the truth is, we just don't know unless framed is used
}
// SetTConfiguration implements TConfigurationSetter for propagation.
func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) {
PropagateTConfiguration(p.Reader, conf)
PropagateTConfiguration(p.Writer, conf)
}
var _ TConfigurationSetter = (*StreamTransport)(nil)

View File

@ -41,8 +41,8 @@ type TJSONProtocol struct {
// Constructor
func NewTJSONProtocol(t TTransport) *TJSONProtocol {
v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
return v
}
@ -57,43 +57,43 @@ func NewTJSONProtocolFactory() *TJSONProtocolFactory {
return &TJSONProtocolFactory{}
}
func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
p.resetContextStack() // THRIFT-3735
if e := p.OutputListBegin(); e != nil {
return e
}
if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil {
if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil {
return e
}
if e := p.WriteString(name); e != nil {
if e := p.WriteString(ctx, name); e != nil {
return e
}
if e := p.WriteByte(int8(typeId)); e != nil {
if e := p.WriteByte(ctx, int8(typeId)); e != nil {
return e
}
if e := p.WriteI32(seqId); e != nil {
if e := p.WriteI32(ctx, seqId); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) WriteMessageEnd() error {
func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error {
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteStructBegin(name string) error {
func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
if e := p.OutputObjectBegin(); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) WriteStructEnd() error {
func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error {
return p.OutputObjectEnd()
}
func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
if e := p.WriteI16(id); e != nil {
func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
if e := p.WriteI16(ctx, id); e != nil {
return e
}
if e := p.OutputObjectBegin(); e != nil {
@ -103,19 +103,19 @@ func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) err
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
if e := p.WriteString(ctx, s); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) WriteFieldEnd() error {
func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error {
return p.OutputObjectEnd()
}
func (p *TJSONProtocol) WriteFieldStop() error { return nil }
func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
if e := p.OutputListBegin(); e != nil {
return e
}
@ -123,77 +123,77 @@ func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
if e := p.WriteString(ctx, s); e != nil {
return e
}
s, e1 = p.TypeIdToString(valueType)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
if e := p.WriteString(ctx, s); e != nil {
return e
}
if e := p.WriteI64(int64(size)); e != nil {
if e := p.WriteI64(ctx, int64(size)); e != nil {
return e
}
return p.OutputObjectBegin()
}
func (p *TJSONProtocol) WriteMapEnd() error {
func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error {
if e := p.OutputObjectEnd(); e != nil {
return e
}
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error {
func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
return p.OutputElemListBegin(elemType, size)
}
func (p *TJSONProtocol) WriteListEnd() error {
func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error {
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error {
func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
return p.OutputElemListBegin(elemType, size)
}
func (p *TJSONProtocol) WriteSetEnd() error {
func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error {
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteBool(b bool) error {
func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error {
if b {
return p.WriteI32(1)
return p.WriteI32(ctx, 1)
}
return p.WriteI32(0)
return p.WriteI32(ctx, 0)
}
func (p *TJSONProtocol) WriteByte(b int8) error {
return p.WriteI32(int32(b))
func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error {
return p.WriteI32(ctx, int32(b))
}
func (p *TJSONProtocol) WriteI16(v int16) error {
return p.WriteI32(int32(v))
func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error {
return p.WriteI32(ctx, int32(v))
}
func (p *TJSONProtocol) WriteI32(v int32) error {
func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error {
return p.OutputI64(int64(v))
}
func (p *TJSONProtocol) WriteI64(v int64) error {
func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error {
return p.OutputI64(int64(v))
}
func (p *TJSONProtocol) WriteDouble(v float64) error {
func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
return p.OutputF64(v)
}
func (p *TJSONProtocol) WriteString(v string) error {
func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error {
return p.OutputString(v)
}
func (p *TJSONProtocol) WriteBinary(v []byte) error {
func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
// JSON library only takes in a string,
// not an arbitrary byte array, to ensure bytes are transmitted
// efficiently we must convert this into a valid JSON string
@ -219,12 +219,12 @@ func (p *TJSONProtocol) WriteBinary(v []byte) error {
}
// Reading methods.
func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
p.resetContextStack() // THRIFT-3735
if isNull, err := p.ParseListBegin(); isNull || err != nil {
return name, typeId, seqId, err
}
version, err := p.ReadI32()
version, err := p.ReadI32(ctx)
if err != nil {
return name, typeId, seqId, err
}
@ -233,47 +233,47 @@ func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, se
return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
if name, err = p.ReadString(); err != nil {
if name, err = p.ReadString(ctx); err != nil {
return name, typeId, seqId, err
}
bTypeId, err := p.ReadByte()
bTypeId, err := p.ReadByte(ctx)
typeId = TMessageType(bTypeId)
if err != nil {
return name, typeId, seqId, err
}
if seqId, err = p.ReadI32(); err != nil {
if seqId, err = p.ReadI32(ctx); err != nil {
return name, typeId, seqId, err
}
return name, typeId, seqId, nil
}
func (p *TJSONProtocol) ReadMessageEnd() error {
func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error {
err := p.ParseListEnd()
return err
}
func (p *TJSONProtocol) ReadStructBegin() (name string, err error) {
func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
_, err = p.ParseObjectStart()
return "", err
}
func (p *TJSONProtocol) ReadStructEnd() error {
func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error {
return p.ParseObjectEnd()
}
func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
b, _ := p.reader.Peek(1)
if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
return "", STOP, -1, nil
}
fieldId, err := p.ReadI16()
fieldId, err := p.ReadI16(ctx)
if err != nil {
return "", STOP, fieldId, err
}
if _, err = p.ParseObjectStart(); err != nil {
return "", STOP, fieldId, err
}
sType, err := p.ReadString()
sType, err := p.ReadString(ctx)
if err != nil {
return "", STOP, fieldId, err
}
@ -281,17 +281,17 @@ func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
return "", fType, fieldId, err
}
func (p *TJSONProtocol) ReadFieldEnd() error {
func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error {
return p.ParseObjectEnd()
}
func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, VOID, 0, e
}
// read keyType
sKeyType, e := p.ReadString()
sKeyType, e := p.ReadString(ctx)
if e != nil {
return keyType, valueType, size, e
}
@ -301,7 +301,7 @@ func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int
}
// read valueType
sValueType, e := p.ReadString()
sValueType, e := p.ReadString(ctx)
if e != nil {
return keyType, valueType, size, e
}
@ -311,7 +311,7 @@ func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int
}
// read size
iSize, e := p.ReadI64()
iSize, e := p.ReadI64(ctx)
if e != nil {
return keyType, valueType, size, e
}
@ -321,7 +321,7 @@ func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int
return keyType, valueType, size, e
}
func (p *TJSONProtocol) ReadMapEnd() error {
func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error {
e := p.ParseObjectEnd()
if e != nil {
return e
@ -329,53 +329,53 @@ func (p *TJSONProtocol) ReadMapEnd() error {
return p.ParseListEnd()
}
func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
return p.ParseElemListBegin()
}
func (p *TJSONProtocol) ReadListEnd() error {
func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error {
return p.ParseListEnd()
}
func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
return p.ParseElemListBegin()
}
func (p *TJSONProtocol) ReadSetEnd() error {
func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error {
return p.ParseListEnd()
}
func (p *TJSONProtocol) ReadBool() (bool, error) {
value, err := p.ReadI32()
func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
value, err := p.ReadI32(ctx)
return (value != 0), err
}
func (p *TJSONProtocol) ReadByte() (int8, error) {
v, err := p.ReadI64()
func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
v, err := p.ReadI64(ctx)
return int8(v), err
}
func (p *TJSONProtocol) ReadI16() (int16, error) {
v, err := p.ReadI64()
func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
v, err := p.ReadI64(ctx)
return int16(v), err
}
func (p *TJSONProtocol) ReadI32() (int32, error) {
v, err := p.ReadI64()
func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
v, err := p.ReadI64(ctx)
return int32(v), err
}
func (p *TJSONProtocol) ReadI64() (int64, error) {
func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
v, _, err := p.ParseI64()
return v, err
}
func (p *TJSONProtocol) ReadDouble() (float64, error) {
func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
v, _, err := p.ParseF64()
return v, err
}
func (p *TJSONProtocol) ReadString() (string, error) {
func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) {
var v string
if err := p.ParsePreValue(); err != nil {
return v, err
@ -405,7 +405,7 @@ func (p *TJSONProtocol) ReadString() (string, error) {
return v, p.ParsePostValue()
}
func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
var v []byte
if err := p.ParsePreValue(); err != nil {
return nil, err
@ -444,8 +444,8 @@ func (p *TJSONProtocol) Flush(ctx context.Context) (err error) {
return NewTProtocolException(err)
}
func (p *TJSONProtocol) Skip(fieldType TType) (err error) {
return SkipDefaultDepth(p, fieldType)
func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
return SkipDefaultDepth(ctx, p, fieldType)
}
func (p *TJSONProtocol) Transport() TTransport {
@ -460,10 +460,10 @@ func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
if e := p.OutputString(s); e != nil {
return e
}
if e := p.WriteI64(int64(size)); e != nil {
if e := p.OutputI64(int64(size)); e != nil {
return e
}
return nil
@ -473,7 +473,11 @@ func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error)
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, 0, e
}
sElemType, err := p.ReadString()
// We don't really use the ctx in ReadString implementation,
// so this is safe for now.
// We might want to add context to ParseElemListBegin if we start to use
// ctx in ReadString implementation in the future.
sElemType, err := p.ReadString(context.Background())
if err != nil {
return VOID, size, err
}
@ -481,7 +485,7 @@ func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error)
if err != nil {
return elemType, size, err
}
nSize, err2 := p.ReadI64()
nSize, _, err2 := p.ParseI64()
size = int(nSize)
return elemType, size, err2
}
@ -490,7 +494,11 @@ func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error)
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, 0, e
}
sElemType, err := p.ReadString()
// We don't really use the ctx in ReadString implementation,
// so this is safe for now.
// We might want to add context to ParseElemListBegin if we start to use
// ctx in ReadString implementation in the future.
sElemType, err := p.ReadString(context.Background())
if err != nil {
return VOID, size, err
}
@ -498,7 +506,7 @@ func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error)
if err != nil {
return elemType, size, err
}
nSize, err2 := p.ReadI64()
nSize, _, err2 := p.ParseI64()
size = int(nSize)
return elemType, size, err2
}
@ -579,3 +587,5 @@ func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
e := fmt.Errorf("Unknown type identifier: %s", fieldType)
return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
}
var _ TConfigurationSetter = (*TJSONProtocol)(nil)

View File

@ -0,0 +1,69 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"log"
"os"
"testing"
)
// Logger is a simple wrapper of a logging function.
//
// In reality the users might actually use different logging libraries, and they
// are not always compatible with each other.
//
// Logger is meant to be a simple common ground that it's easy to wrap whatever
// logging library they use into.
//
// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
// discussion behind it.
type Logger func(msg string)
// NopLogger is a Logger implementation that does nothing.
func NopLogger(msg string) {}
// StdLogger wraps stdlib log package into a Logger.
//
// If logger passed in is nil, it will fallback to use stderr and default flags.
func StdLogger(logger *log.Logger) Logger {
if logger == nil {
logger = log.New(os.Stderr, "", log.LstdFlags)
}
return func(msg string) {
logger.Print(msg)
}
}
// TestLogger is a Logger implementation can be used in test codes.
//
// It fails the test when being called.
func TestLogger(tb testing.TB) Logger {
return func(msg string) {
tb.Errorf("logger called with msg: %q", msg)
}
}
func fallbackLogger(logger Logger) Logger {
if logger == nil {
return StdLogger(nil)
}
return logger
}

View File

@ -0,0 +1,109 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "context"
// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the
// TProcessorFunctions for that TProcessor.
//
// Middlewares are passed in the name of the function as set in the processor
// map of the TProcessor.
type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction
// WrapProcessor takes an existing TProcessor and wraps each of its inner
// TProcessorFunctions with the middlewares passed in and returns it.
//
// Middlewares will be called in the order that they are defined:
//
// 1. Middlewares[0]
// 2. Middlewares[1]
// ...
// N. Middlewares[n]
func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor {
for name, processorFunc := range processor.ProcessorMap() {
wrapped := processorFunc
// Add middlewares in reverse so the first in the list is the outermost.
for i := len(middlewares) - 1; i >= 0; i-- {
wrapped = middlewares[i](name, wrapped)
}
processor.AddToProcessorMap(name, wrapped)
}
return processor
}
// WrappedTProcessorFunction is a convenience struct that implements the
// TProcessorFunction interface that can be used when implementing custom
// Middleware.
type WrappedTProcessorFunction struct {
// Wrapped is called by WrappedTProcessorFunction.Process and should be a
// "wrapped" call to a base TProcessorFunc.Process call.
Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
}
// Process implements the TProcessorFunction interface using p.Wrapped.
func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) {
return p.Wrapped(ctx, seqID, in, out)
}
// verify that WrappedTProcessorFunction implements TProcessorFunction
var (
_ TProcessorFunction = WrappedTProcessorFunction{}
_ TProcessorFunction = (*WrappedTProcessorFunction)(nil)
)
// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls
// with custom middleware.
type ClientMiddleware func(TClient) TClient
// WrappedTClient is a convenience struct that implements the TClient interface
// using inner Wrapped function.
//
// This is provided to aid in developing ClientMiddleware.
type WrappedTClient struct {
Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
}
// Call implements the TClient interface by calling and returning c.Wrapped.
func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
return c.Wrapped(ctx, method, args, result)
}
// verify that WrappedTClient implements TClient
var (
_ TClient = WrappedTClient{}
_ TClient = (*WrappedTClient)(nil)
)
// WrapClient wraps the given TClient in the given middlewares.
//
// Middlewares will be called in the order that they are defined:
//
// 1. Middlewares[0]
// 2. Middlewares[1]
// ...
// N. Middlewares[n]
func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient {
// Add middlewares in reverse so the first in the list is the outermost.
for i := len(middlewares) - 1; i >= 0; i-- {
client = middlewares[i](client)
}
return client
}

View File

@ -68,11 +68,11 @@ func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplex
}
}
func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
if typeId == CALL || typeId == ONEWAY {
return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
} else {
return t.TProtocol.WriteMessageBegin(name, typeId, seqid)
return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid)
}
}
@ -117,6 +117,67 @@ func NewTMultiplexedProcessor() *TMultiplexedProcessor {
}
}
// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}"
// to TProcessorFunction for any registered processors. If there is also a
// DefaultProcessor, the keys for the methods on that processor will simply be
// "{FunctionName}". If the TMultiplexedProcessor has both a DefaultProcessor and
// other registered processors, then the keys will be a mix of both formats.
//
// The implementation differs with other TProcessors in that the map returned is
// a new map, while most TProcessors just return their internal mapping directly.
// This means that edits to the map returned by this implementation of ProcessorMap
// will not affect the underlying mapping within the TMultiplexedProcessor.
func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction {
processorFuncMap := make(map[string]TProcessorFunction)
for name, processor := range t.serviceProcessorMap {
for method, processorFunc := range processor.ProcessorMap() {
processorFuncName := name + MULTIPLEXED_SEPARATOR + method
processorFuncMap[processorFuncName] = processorFunc
}
}
if t.DefaultProcessor != nil {
for method, processorFunc := range t.DefaultProcessor.ProcessorMap() {
processorFuncMap[method] = processorFunc
}
}
return processorFuncMap
}
// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on
// the format of "name".
//
// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}",
// then it sets the given TProcessorFunction on the inner TProcessor with the
// ProcessorName component using the FunctionName component.
//
// If "name" is just in the format "{FunctionName}", that is to say there is no
// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor
// configured, then it will set the given TProcessorFunction on the DefaultProcessor
// using the given name.
//
// If there is not a TProcessor available for the given name, then this function
// does nothing. This can happen when there is no TProcessor registered for
// the given ProcessorName or if all that is given is the FunctionName and there
// is no DefaultProcessor set.
func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) {
components := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
if len(components) != 2 {
if t.DefaultProcessor != nil && len(components) == 1 {
t.DefaultProcessor.AddToProcessorMap(components[0], processorFunc)
}
return
}
processorName := components[0]
funcName := components[1]
if processor, ok := t.serviceProcessorMap[processorName]; ok {
processor.AddToProcessorMap(funcName, processorFunc)
}
}
// verify that TMultiplexedProcessor implements TProcessor
var _ TProcessor = (*TMultiplexedProcessor)(nil)
func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
t.DefaultProcessor = processor
}
@ -129,12 +190,12 @@ func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProces
}
func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
name, typeId, seqid, err := in.ReadMessageBegin()
name, typeId, seqid, err := in.ReadMessageBegin(ctx)
if err != nil {
return false, err
return false, NewTProtocolException(err)
}
if typeId != CALL && typeId != ONEWAY {
return false, fmt.Errorf("Unexpected message type %v", typeId)
return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId))
}
//extract the service name
v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
@ -143,11 +204,17 @@ func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol)
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
return t.DefaultProcessor.Process(ctx, smb, out)
}
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
return false, NewTProtocolException(fmt.Errorf(
"Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?",
name,
))
}
actualProcessor, ok := t.serviceProcessorMap[v[0]]
if !ok {
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
return false, NewTProtocolException(fmt.Errorf(
"Service name not found: %s. Did you forget to call registerProcessor()?",
v[0],
))
}
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
return actualProcessor.Process(ctx, smb, out)
@ -165,6 +232,6 @@ func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageTy
return &storedMessageProtocol{protocol, name, typeId, seqid}
}
func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) {
return s.name, s.typeId, s.seqid, nil
}

View File

@ -69,14 +69,14 @@ func NewNumericFromDouble(dValue float64) Numeric {
func NewNumericFromI64(iValue int64) Numeric {
dValue := float64(iValue)
sValue := string(iValue)
sValue := strconv.FormatInt(iValue, 10)
isNil := false
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromI32(iValue int32) Numeric {
dValue := float64(iValue)
sValue := string(iValue)
sValue := strconv.FormatInt(int64(iValue), 10)
isNil := false
return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
}

View File

@ -25,6 +25,16 @@ import "context"
// writes to some output stream.
type TProcessor interface {
Process(ctx context.Context, in, out TProtocol) (bool, TException)
// ProcessorMap returns a map of thrift method names to TProcessorFunctions.
ProcessorMap() map[string]TProcessorFunction
// AddToProcessorMap adds the given TProcessorFunction to the internal
// processor map at the given key.
//
// If one is already set at the given key, it will be replaced with the new
// TProcessorFunction.
AddToProcessorMap(string, TProcessorFunction)
}
type TProcessorFunction interface {

View File

@ -0,0 +1,177 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"errors"
"fmt"
)
const (
VERSION_MASK = 0xffff0000
VERSION_1 = 0x80010000
)
type TProtocol interface {
WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
WriteMessageEnd(ctx context.Context) error
WriteStructBegin(ctx context.Context, name string) error
WriteStructEnd(ctx context.Context) error
WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
WriteFieldEnd(ctx context.Context) error
WriteFieldStop(ctx context.Context) error
WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
WriteMapEnd(ctx context.Context) error
WriteListBegin(ctx context.Context, elemType TType, size int) error
WriteListEnd(ctx context.Context) error
WriteSetBegin(ctx context.Context, elemType TType, size int) error
WriteSetEnd(ctx context.Context) error
WriteBool(ctx context.Context, value bool) error
WriteByte(ctx context.Context, value int8) error
WriteI16(ctx context.Context, value int16) error
WriteI32(ctx context.Context, value int32) error
WriteI64(ctx context.Context, value int64) error
WriteDouble(ctx context.Context, value float64) error
WriteString(ctx context.Context, value string) error
WriteBinary(ctx context.Context, value []byte) error
ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
ReadMessageEnd(ctx context.Context) error
ReadStructBegin(ctx context.Context) (name string, err error)
ReadStructEnd(ctx context.Context) error
ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
ReadFieldEnd(ctx context.Context) error
ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
ReadMapEnd(ctx context.Context) error
ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
ReadListEnd(ctx context.Context) error
ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
ReadSetEnd(ctx context.Context) error
ReadBool(ctx context.Context) (value bool, err error)
ReadByte(ctx context.Context) (value int8, err error)
ReadI16(ctx context.Context) (value int16, err error)
ReadI32(ctx context.Context) (value int32, err error)
ReadI64(ctx context.Context) (value int64, err error)
ReadDouble(ctx context.Context) (value float64, err error)
ReadString(ctx context.Context) (value string, err error)
ReadBinary(ctx context.Context) (value []byte, err error)
Skip(ctx context.Context, fieldType TType) (err error)
Flush(ctx context.Context) (err error)
Transport() TTransport
}
// The maximum recursive depth the skip() function will traverse
const DEFAULT_RECURSION_DEPTH = 64
// Skips over the next data element from the provided input TProtocol object.
func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
}
// Skips over the next data element from the provided input TProtocol object.
func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
if maxDepth <= 0 {
return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
}
switch fieldType {
case BOOL:
_, err = self.ReadBool(ctx)
return
case BYTE:
_, err = self.ReadByte(ctx)
return
case I16:
_, err = self.ReadI16(ctx)
return
case I32:
_, err = self.ReadI32(ctx)
return
case I64:
_, err = self.ReadI64(ctx)
return
case DOUBLE:
_, err = self.ReadDouble(ctx)
return
case STRING:
_, err = self.ReadString(ctx)
return
case STRUCT:
if _, err = self.ReadStructBegin(ctx); err != nil {
return err
}
for {
_, typeId, _, _ := self.ReadFieldBegin(ctx)
if typeId == STOP {
break
}
err := Skip(ctx, self, typeId, maxDepth-1)
if err != nil {
return err
}
self.ReadFieldEnd(ctx)
}
return self.ReadStructEnd(ctx)
case MAP:
keyType, valueType, size, err := self.ReadMapBegin(ctx)
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(ctx, self, keyType, maxDepth-1)
if err != nil {
return err
}
self.Skip(ctx, valueType)
}
return self.ReadMapEnd(ctx)
case SET:
elemType, size, err := self.ReadSetBegin(ctx)
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(ctx, self, elemType, maxDepth-1)
if err != nil {
return err
}
}
return self.ReadSetEnd(ctx)
case LIST:
elemType, size, err := self.ReadListBegin(ctx)
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(ctx, self, elemType, maxDepth-1)
if err != nil {
return err
}
}
return self.ReadListEnd(ctx)
default:
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
}
return nil
}

View File

@ -21,6 +21,7 @@ package thrift
import (
"encoding/base64"
"errors"
)
// Thrift Protocol exception
@ -41,7 +42,14 @@ const (
type tProtocolException struct {
typeId int
message string
err error
msg string
}
var _ TProtocolException = (*tProtocolException)(nil)
func (tProtocolException) TExceptionType() TExceptionType {
return TExceptionTypeProtocol
}
func (p *tProtocolException) TypeId() int {
@ -49,29 +57,48 @@ func (p *tProtocolException) TypeId() int {
}
func (p *tProtocolException) String() string {
return p.message
return p.msg
}
func (p *tProtocolException) Error() string {
return p.message
return p.msg
}
func (p *tProtocolException) Unwrap() error {
return p.err
}
func NewTProtocolException(err error) TProtocolException {
if err == nil {
return nil
}
if e, ok := err.(TProtocolException); ok {
return e
}
if _, ok := err.(base64.CorruptInputError); ok {
return &tProtocolException{INVALID_DATA, err.Error()}
if errors.As(err, new(base64.CorruptInputError)) {
return NewTProtocolExceptionWithType(INVALID_DATA, err)
}
return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()}
return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
}
func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
if err == nil {
return nil
}
return &tProtocolException{errType, err.Error()}
return &tProtocolException{
typeId: errType,
err: err,
msg: err.Error(),
}
}
func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
return &tProtocolException{
typeId: err.TypeId(),
err: err,
msg: prepend + err.Error(),
}
}

View File

@ -0,0 +1,94 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
)
// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
type responseHelperKey struct{}
// TResponseHelper defines a object with a set of helper functions that can be
// retrieved from the context object passed into server handler functions.
//
// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
// from the context object.
//
// The zero value of TResponseHelper is valid with all helper functions being
// no-op.
type TResponseHelper struct {
// THeader related functions
*THeaderResponseHelper
}
// THeaderResponseHelper defines THeader related TResponseHelper functions.
//
// The zero value of *THeaderResponseHelper is valid with all helper functions
// being no-op.
type THeaderResponseHelper struct {
proto *THeaderProtocol
}
// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
// underlying TProtocol.
func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
if hp, ok := proto.(*THeaderProtocol); ok {
return &THeaderResponseHelper{
proto: hp,
}
}
return nil
}
// SetHeader sets a response header.
//
// It's no-op if the underlying protocol/transport does not support THeader.
func (h *THeaderResponseHelper) SetHeader(key, value string) {
if h != nil && h.proto != nil {
h.proto.SetWriteHeader(key, value)
}
}
// ClearHeaders clears all the response headers previously set.
//
// It's no-op if the underlying protocol/transport does not support THeader.
func (h *THeaderResponseHelper) ClearHeaders() {
if h != nil && h.proto != nil {
h.proto.ClearWriteHeaders()
}
}
// GetResponseHelper retrieves the TResponseHelper implementation injected into
// the context object.
//
// If no helper was found in the context object, a nop helper with ok == false
// will be returned.
func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
if v := ctx.Value(responseHelperKey{}); v != nil {
helper, ok = v.(TResponseHelper)
}
return
}
// SetResponseHelper injects TResponseHelper into the context object.
func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
return context.WithValue(ctx, responseHelperKey{}, helper)
}

Some files were not shown because too many files have changed in this diff Show More