mirror of
https://github.com/distribution/distribution.git
synced 2025-06-23 05:59:31 +00:00
Upgrade go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
Signed-off-by: krynju <krystian.gulinski@juliahub.com>
This commit is contained in:
parent
3ddd142339
commit
abbe03efef
71
go.mod
71
go.mod
@ -1,6 +1,8 @@
|
|||||||
module github.com/distribution/distribution/v3
|
module github.com/distribution/distribution/v3
|
||||||
|
|
||||||
go 1.22.0
|
go 1.22.7
|
||||||
|
|
||||||
|
toolchain go1.23.2
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/storage v1.45.0
|
cloud.google.com/go/storage v1.45.0
|
||||||
@ -20,7 +22,7 @@ require (
|
|||||||
github.com/gorilla/handlers v1.5.2
|
github.com/gorilla/handlers v1.5.2
|
||||||
github.com/gorilla/mux v1.8.1
|
github.com/gorilla/mux v1.8.1
|
||||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5
|
github.com/hashicorp/golang-lru/arc/v2 v2.0.5
|
||||||
github.com/klauspost/compress v1.17.9
|
github.com/klauspost/compress v1.17.11
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.5.0
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.1.0
|
github.com/opencontainers/image-spec v1.1.0
|
||||||
@ -29,14 +31,14 @@ require (
|
|||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/spf13/cobra v1.8.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
go.opentelemetry.io/contrib/exporters/autoexport v0.54.0
|
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0
|
||||||
go.opentelemetry.io/otel v1.29.0
|
go.opentelemetry.io/otel v1.32.0
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0
|
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0
|
||||||
go.opentelemetry.io/otel/sdk v1.29.0
|
go.opentelemetry.io/otel/sdk v1.32.0
|
||||||
go.opentelemetry.io/otel/trace v1.29.0
|
go.opentelemetry.io/otel/trace v1.32.0
|
||||||
golang.org/x/crypto v0.27.0
|
golang.org/x/crypto v0.28.0
|
||||||
golang.org/x/net v0.29.0
|
golang.org/x/net v0.30.0
|
||||||
golang.org/x/oauth2 v0.23.0
|
golang.org/x/oauth2 v0.23.0
|
||||||
google.golang.org/api v0.197.0
|
google.golang.org/api v0.197.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
@ -73,7 +75,7 @@ require (
|
|||||||
github.com/google/s2a-go v0.1.8 // indirect
|
github.com/google/s2a-go v0.1.8 // indirect
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||||
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
|
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
|
github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
@ -82,39 +84,40 @@ require (
|
|||||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.20.1 // indirect; updated to latest
|
github.com/prometheus/client_golang v1.20.5 // indirect; updated to latest
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.6.1 // indirect
|
||||||
github.com/prometheus/common v0.55.0 // indirect
|
github.com/prometheus/common v0.60.1 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
|
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
go.opentelemetry.io/contrib/bridges/prometheus v0.54.0 // indirect
|
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect
|
||||||
go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect
|
go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.5.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/prometheus v0.51.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.5.0 // indirect
|
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 // indirect
|
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect
|
||||||
go.opentelemetry.io/otel/log v0.5.0 // indirect
|
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.29.0 // indirect
|
go.opentelemetry.io/otel/log v0.8.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk/log v0.5.0 // indirect
|
go.opentelemetry.io/otel/metric v1.32.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect
|
go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||||
golang.org/x/sync v0.8.0
|
golang.org/x/sync v0.9.0
|
||||||
golang.org/x/sys v0.25.0 // indirect
|
golang.org/x/sys v0.27.0 // indirect
|
||||||
golang.org/x/text v0.18.0 // indirect
|
golang.org/x/text v0.20.0 // indirect
|
||||||
golang.org/x/time v0.6.0 // indirect
|
golang.org/x/time v0.6.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
|
||||||
google.golang.org/grpc v1.66.2 // indirect
|
google.golang.org/grpc v1.68.0 // indirect
|
||||||
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect
|
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect
|
||||||
google.golang.org/protobuf v1.34.2 // indirect
|
google.golang.org/protobuf v1.35.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
138
go.sum
138
go.sum
@ -155,8 +155,8 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE
|
|||||||
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
|
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
|
||||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0=
|
||||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw=
|
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw=
|
||||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU=
|
github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
|
github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
|
||||||
@ -170,8 +170,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC
|
|||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
@ -207,8 +207,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
|||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||||
github.com/prometheus/client_golang v1.20.1 h1:IMJXHOD6eARkQpxo8KkhgEVFlBNm+nkrFUyGlIu7Na8=
|
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||||
github.com/prometheus/client_golang v1.20.1/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
@ -216,8 +216,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
|
|||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
|
||||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||||
@ -230,8 +230,8 @@ github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnA
|
|||||||
github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
|
github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
|
||||||
github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY=
|
github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY=
|
||||||
github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c=
|
github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c=
|
||||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
@ -255,50 +255,52 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
|
|||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||||
go.opentelemetry.io/contrib/bridges/prometheus v0.54.0 h1:WWL67oxtknNVMb70lJXxXruf8UyK/a9hmIE1XO3Uedg=
|
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
|
||||||
go.opentelemetry.io/contrib/bridges/prometheus v0.54.0/go.mod h1:LqNcnXmyULp8ertk4hUTVtSUvKXj4h1Mx7gUCSSr/q0=
|
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
|
||||||
go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ=
|
go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ=
|
||||||
go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU=
|
go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU=
|
||||||
go.opentelemetry.io/contrib/exporters/autoexport v0.54.0 h1:dTmcmVm4J54IRPGm5oVjLci1uYat4UDea84E2tyBaAk=
|
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
|
||||||
go.opentelemetry.io/contrib/exporters/autoexport v0.54.0/go.mod h1:zPp5Fwpq2Hc7xMtVttg6GhZMcfTESjVbY9ONw2o/Dc4=
|
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g=
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94=
|
||||||
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
|
||||||
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
|
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.5.0 h1:4d++HQ+Ihdl+53zSjtsCUFDmNMju2FC9qFkUlTxPLqo=
|
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.5.0/go.mod h1:mQX5dTO3Mh5ZF7bPKDkt5c/7C41u/SiDr9XgTpzXXn8=
|
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 h1:k6fQVDQexDE+3jG2SfCQjnHS7OamcP73YMoxEVq5B6k=
|
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0/go.mod h1:t4BrYLHU450Zo9fnydWlIuswB1bm7rM8havDpWOJeDo=
|
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 h1:xvhQxJ/C9+RTnAj5DpTg7LSM1vbbMTiXt7e9hsfqHNw=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0/go.mod h1:Fcvs2Bz1jkDM+Wf5/ozBGmi3tQ/c9zPKLnsipnfhGAo=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ=
|
||||||
go.opentelemetry.io/otel/exporters/prometheus v0.51.0 h1:G7uexXb/K3T+T9fNLCCKncweEtNEBMTO+46hKX5EdKw=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
|
||||||
go.opentelemetry.io/otel/exporters/prometheus v0.51.0/go.mod h1:v0mFe5Kk7woIh938mrZBJBmENYquyA0IICrlYm4Y0t4=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.5.0 h1:ThVXnEsdwNcxdBO+r96ci1xbF+PgNjwlk457VNuJODo=
|
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.5.0/go.mod h1:rHWcSmC4q2h3gje/yOq6sAOaq8+UHxN/Ru3BbmDXOfY=
|
go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc=
|
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I=
|
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 h1:X3ZjNp36/WlkSYx0ul2jw4PtbNEDDeLskw3VPsrpYM0=
|
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8=
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0/go.mod h1:2uL/xnOXh0CHOBFCWXz5u1A4GXLiW+0IQIzVbeOEQ0U=
|
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk=
|
||||||
go.opentelemetry.io/otel/log v0.5.0 h1:x1Pr6Y3gnXgl1iFBwtGy1W/mnzENoK0w0ZoaeOI3i30=
|
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw=
|
||||||
go.opentelemetry.io/otel/log v0.5.0/go.mod h1:NU/ozXeGuOR5/mjCRXYbTC00NFJ3NYuraV/7O78F0rE=
|
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
|
||||||
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
|
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
|
||||||
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
|
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
|
||||||
go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
|
go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
|
||||||
go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
|
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
|
||||||
go.opentelemetry.io/otel/sdk/log v0.5.0 h1:A+9lSjlZGxkQOr7QSBJcuyyYBw79CufQ69saiJLey7o=
|
go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
|
||||||
go.opentelemetry.io/otel/sdk/log v0.5.0/go.mod h1:zjxIW7sw1IHolZL2KlSAtrUi8JHttoeiQy43Yl3WuVQ=
|
go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY=
|
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
|
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
|
||||||
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
|
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
|
||||||
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
|
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
|
||||||
|
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
|
||||||
|
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
@ -306,8 +308,8 @@ go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
|||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||||
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
@ -320,8 +322,8 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||||
@ -329,8 +331,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
|
||||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -342,12 +344,12 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
|
||||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
||||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@ -365,17 +367,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
|
|||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU=
|
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU=
|
||||||
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4=
|
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
|
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
|
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||||
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
|
google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
|
||||||
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
|
google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
|
||||||
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw=
|
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw=
|
||||||
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A=
|
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
@ -387,8 +389,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
|||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
@ -94,7 +94,7 @@ func Int64(val string) (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Int64Slice converts 'val' where individual integers are separated by
|
// Int64Slice converts 'val' where individual integers are separated by
|
||||||
// 'sep' into a int64 slice.
|
// 'sep' into an int64 slice.
|
||||||
func Int64Slice(val, sep string) ([]int64, error) {
|
func Int64Slice(val, sep string) ([]int64, error) {
|
||||||
s := strings.Split(val, sep)
|
s := strings.Split(val, sep)
|
||||||
values := make([]int64, len(s))
|
values := make([]int64, len(s))
|
||||||
@ -118,7 +118,7 @@ func Int32(val string) (int32, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Int32Slice converts 'val' where individual integers are separated by
|
// Int32Slice converts 'val' where individual integers are separated by
|
||||||
// 'sep' into a int32 slice.
|
// 'sep' into an int32 slice.
|
||||||
func Int32Slice(val, sep string) ([]int32, error) {
|
func Int32Slice(val, sep string) ([]int32, error) {
|
||||||
s := strings.Split(val, sep)
|
s := strings.Split(val, sep)
|
||||||
values := make([]int32, len(s))
|
values := make([]int32, len(s))
|
||||||
@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
|
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
|
||||||
// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
|
// base64 without padding, are separated by 'sep' into a slice of byte slices.
|
||||||
func BytesSlice(val, sep string) ([][]byte, error) {
|
func BytesSlice(val, sep string) ([][]byte, error) {
|
||||||
s := strings.Split(val, sep)
|
s := strings.Split(val, sep)
|
||||||
values := make([][]byte, len(s))
|
values := make([][]byte, len(s))
|
||||||
|
15
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
15
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R
|
|||||||
mux.errorHandler(ctx, mux, marshaler, w, r, err)
|
mux.errorHandler(ctx, mux, marshaler, w, r, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection.
|
||||||
|
func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||||
|
st := mux.streamErrorHandler(ctx, err)
|
||||||
|
msg := errorChunk(st)
|
||||||
|
buf, err := marshaler.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Errorf("Failed to marshal an error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := w.Write(buf); err != nil {
|
||||||
|
grpclog.Errorf("Failed to notify error to client: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DefaultHTTPErrorHandler is the default error handler.
|
// DefaultHTTPErrorHandler is the default error handler.
|
||||||
// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
|
// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||||
// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
|
// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
|
||||||
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string {
|
|||||||
return paths
|
return paths
|
||||||
}
|
}
|
||||||
|
|
||||||
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
|
// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask
|
||||||
type fieldMaskPathItem struct {
|
type fieldMaskPathItem struct {
|
||||||
// the list of prior fields leading up to node connected by dots
|
// the list of prior fields leading up to node connected by dots
|
||||||
path string
|
path string
|
||||||
|
4
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
generated
vendored
4
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
generated
vendored
@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
|
|||||||
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
||||||
//
|
//
|
||||||
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
||||||
// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
||||||
// with a "application/json" Content-Type.
|
// with an "application/json" Content-Type.
|
||||||
// "*" can be used to match any Content-Type.
|
// "*" can be used to match any Content-Type.
|
||||||
// This can be attached to a ServerMux with the marshaler option.
|
// This can be attached to a ServerMux with the marshaler option.
|
||||||
func makeMarshalerMIMERegistry() marshalerRegistry {
|
func makeMarshalerMIMERegistry() marshalerRegistry {
|
||||||
|
4
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
generated
vendored
4
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
generated
vendored
@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Int64P parses the given string representation of an integer
|
// Int64P parses the given string representation of an integer
|
||||||
// and returns a pointer to a int64 whose value is same as the parsed integer.
|
// and returns a pointer to an int64 whose value is same as the parsed integer.
|
||||||
func Int64P(val string) (*int64, error) {
|
func Int64P(val string) (*int64, error) {
|
||||||
i, err := Int64(val)
|
i, err := Int64(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Int32P parses the given string representation of an integer
|
// Int32P parses the given string representation of an integer
|
||||||
// and returns a pointer to a int32 whose value is same as the parsed integer.
|
// and returns a pointer to an int32 whose value is same as the parsed integer.
|
||||||
func Int32P(val string) (*int32, error) {
|
func Int32P(val string) (*int32, error) {
|
||||||
i, err := Int32(val)
|
i, err := Int32(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
package utilities
|
package utilities
|
||||||
|
|
||||||
// An OpCode is a opcode of compiled path patterns.
|
// OpCode is an opcode of compiled path patterns.
|
||||||
type OpCode int
|
type OpCode int
|
||||||
|
|
||||||
// These constants are the valid values of OpCode.
|
// These constants are the valid values of OpCode.
|
||||||
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
@ -5,7 +5,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// flagInterface is an cut down interface to `flag`
|
// flagInterface is a cut down interface to `flag`
|
||||||
type flagInterface interface {
|
type flagInterface interface {
|
||||||
Var(value flag.Value, name string, usage string)
|
Var(value flag.Value, name string, usage string)
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
6
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
# This is an example goreleaser.yaml file with some sane defaults.
|
version: 2
|
||||||
# Make sure to check the documentation at http://goreleaser.com
|
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- ./gen.sh
|
- ./gen.sh
|
||||||
@ -99,7 +99,7 @@ archives:
|
|||||||
checksum:
|
checksum:
|
||||||
name_template: 'checksums.txt'
|
name_template: 'checksums.txt'
|
||||||
snapshot:
|
snapshot:
|
||||||
name_template: "{{ .Tag }}-next"
|
version_template: "{{ .Tag }}-next"
|
||||||
changelog:
|
changelog:
|
||||||
sort: asc
|
sort: asc
|
||||||
filters:
|
filters:
|
||||||
|
29
vendor/github.com/klauspost/compress/README.md
generated
vendored
29
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -16,6 +16,27 @@ This package provides various compression algorithms.
|
|||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
|
||||||
|
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
|
||||||
|
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
|
||||||
|
* s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
|
||||||
|
* zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
|
||||||
|
* flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
|
||||||
|
|
||||||
|
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
|
||||||
|
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
|
||||||
|
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
|
||||||
|
* Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
|
||||||
|
* zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
|
||||||
|
|
||||||
|
* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
|
||||||
|
* zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
|
||||||
|
* zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
|
||||||
|
|
||||||
|
* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
|
||||||
|
* s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
|
||||||
|
* s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
|
||||||
|
|
||||||
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
|
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
|
||||||
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
|
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
|
||||||
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
|
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
|
||||||
@ -81,7 +102,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
|||||||
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
|
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
|
||||||
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
|
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
|
||||||
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
|
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
|
||||||
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
* gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
||||||
|
|
||||||
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
||||||
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
||||||
@ -136,7 +157,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
|||||||
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
||||||
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
|
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
|
||||||
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
|
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
|
||||||
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
|
* zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
|
||||||
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
|
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
|
||||||
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
|
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
|
||||||
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
|
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
|
||||||
@ -339,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when
|
|||||||
* s2: Fix binaries.
|
* s2: Fix binaries.
|
||||||
|
|
||||||
* Feb 25, 2021 (v1.11.8)
|
* Feb 25, 2021 (v1.11.8)
|
||||||
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
|
* s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
|
||||||
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
|
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
|
||||||
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
|
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
|
||||||
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
|
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
|
||||||
@ -518,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when
|
|||||||
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
|
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
|
||||||
* Feb 19, 2016: Handle small payloads faster in level 1-3.
|
* Feb 19, 2016: Handle small payloads faster in level 1-3.
|
||||||
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
|
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
|
||||||
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
|
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
|
||||||
* Feb 14, 2016: Snappy: Merge upstream changes.
|
* Feb 14, 2016: Snappy: Merge upstream changes.
|
||||||
* Feb 14, 2016: Snappy: Fix aggressive skipping.
|
* Feb 14, 2016: Snappy: Fix aggressive skipping.
|
||||||
* Feb 14, 2016: Snappy: Update benchmark.
|
* Feb 14, 2016: Snappy: Update benchmark.
|
||||||
|
2
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
2
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
@ -15,7 +15,7 @@ const (
|
|||||||
// It is possible, but by no way guaranteed that corrupt data will
|
// It is possible, but by no way guaranteed that corrupt data will
|
||||||
// return an error.
|
// return an error.
|
||||||
// It is up to the caller to verify integrity of the returned data.
|
// It is up to the caller to verify integrity of the returned data.
|
||||||
// Use a predefined Scrach to set maximum acceptable output size.
|
// Use a predefined Scratch to set maximum acceptable output size.
|
||||||
func Decompress(b []byte, s *Scratch) ([]byte, error) {
|
func Decompress(b []byte, s *Scratch) ([]byte, error) {
|
||||||
s, err := s.prepare(b)
|
s, err := s.prepare(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
4
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
4
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
|
|||||||
errs++
|
errs++
|
||||||
}
|
}
|
||||||
if errs > 0 {
|
if errs > 0 {
|
||||||
fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
|
fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Ensure that all combinations are covered.
|
// Ensure that all combinations are covered.
|
||||||
@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
|
|||||||
errs++
|
errs++
|
||||||
}
|
}
|
||||||
if errs > 20 {
|
if errs > 20 {
|
||||||
fmt.Fprintf(w, "%d errros, stopping\n", errs)
|
fmt.Fprintf(w, "%d errors, stopping\n", errs)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
|||||||
printf("RLE set to 0x%x, code: %v", symb, v)
|
printf("RLE set to 0x%x, code: %v", symb, v)
|
||||||
}
|
}
|
||||||
case compModeFSE:
|
case compModeFSE:
|
||||||
|
if debugDecoder {
|
||||||
println("Reading table for", tableIndex(i))
|
println("Reading table for", tableIndex(i))
|
||||||
|
}
|
||||||
if seq.fse == nil || seq.fse.preDefined {
|
if seq.fse == nil || seq.fse.preDefined {
|
||||||
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
||||||
}
|
}
|
||||||
|
32
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
32
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@ -179,9 +179,9 @@ encodeLoop:
|
|||||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
@ -210,12 +210,12 @@ encodeLoop:
|
|||||||
|
|
||||||
// Index match start+1 (long) -> s - 1
|
// Index match start+1 (long) -> s - 1
|
||||||
index0 := s + repOff
|
index0 := s + repOff
|
||||||
s += lenght + repOff
|
s += length + repOff
|
||||||
|
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
@ -241,9 +241,9 @@ encodeLoop:
|
|||||||
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
@ -270,11 +270,11 @@ encodeLoop:
|
|||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
|
|
||||||
s += lenght + repOff2
|
s += length + repOff2
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
@ -708,9 +708,9 @@ encodeLoop:
|
|||||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
@ -738,12 +738,12 @@ encodeLoop:
|
|||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
|
|
||||||
// Index match start+1 (long) -> s - 1
|
// Index match start+1 (long) -> s - 1
|
||||||
s += lenght + repOff
|
s += length + repOff
|
||||||
|
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
@ -772,9 +772,9 @@ encodeLoop:
|
|||||||
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
@ -801,11 +801,11 @@ encodeLoop:
|
|||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
|
|
||||||
s += lenght + repOff2
|
s += length + repOff2
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
|
16
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
16
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@ -138,9 +138,9 @@ encodeLoop:
|
|||||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
@ -166,11 +166,11 @@ encodeLoop:
|
|||||||
println("repeat sequence", seq, "next s:", s)
|
println("repeat sequence", seq, "next s:", s)
|
||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
s += lenght + repOff
|
s += length + repOff
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
@ -798,9 +798,9 @@ encodeLoop:
|
|||||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||||
|
|
||||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
// Extend as long as we can.
|
// Extend as long as we can.
|
||||||
@ -826,11 +826,11 @@ encodeLoop:
|
|||||||
println("repeat sequence", seq, "next s:", s)
|
println("repeat sequence", seq, "next s:", s)
|
||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
s += lenght + repOff
|
s += length + repOff
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, lenght)
|
println("repeat ended", s, length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
|
45
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
45
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@ -6,6 +6,7 @@ package zstd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
|
|||||||
// and write CRC if requested.
|
// and write CRC if requested.
|
||||||
func (e *Encoder) Write(p []byte) (n int, err error) {
|
func (e *Encoder) Write(p []byte) (n int, err error) {
|
||||||
s := &e.state
|
s := &e.state
|
||||||
|
if s.eofWritten {
|
||||||
|
return 0, ErrEncoderClosed
|
||||||
|
}
|
||||||
for len(p) > 0 {
|
for len(p) > 0 {
|
||||||
if len(p)+len(s.filling) < e.o.blockSize {
|
if len(p)+len(s.filling) < e.o.blockSize {
|
||||||
if e.o.crc {
|
if e.o.crc {
|
||||||
@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if final && len(s.filling) > 0 {
|
if final && len(s.filling) > 0 {
|
||||||
s.current = e.EncodeAll(s.filling, s.current[:0])
|
s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
|
||||||
var n2 int
|
var n2 int
|
||||||
n2, s.err = s.w.Write(s.current)
|
n2, s.err = s.w.Write(s.current)
|
||||||
if s.err != nil {
|
if s.err != nil {
|
||||||
@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||||||
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
||||||
s.nInput += int64(len(s.current))
|
s.nInput += int64(len(s.current))
|
||||||
s.wg.Add(1)
|
s.wg.Add(1)
|
||||||
|
if final {
|
||||||
|
s.eofWritten = true
|
||||||
|
}
|
||||||
go func(src []byte) {
|
go func(src []byte) {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("Adding block,", len(src), "bytes, final:", final)
|
println("Adding block,", len(src), "bytes, final:", final)
|
||||||
@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||||||
blk := enc.Block()
|
blk := enc.Block()
|
||||||
enc.Encode(blk, src)
|
enc.Encode(blk, src)
|
||||||
blk.last = final
|
blk.last = final
|
||||||
if final {
|
|
||||||
s.eofWritten = true
|
|
||||||
}
|
|
||||||
// Wait for pending writes.
|
// Wait for pending writes.
|
||||||
s.wWg.Wait()
|
s.wWg.Wait()
|
||||||
if s.writeErr != nil {
|
if s.writeErr != nil {
|
||||||
@ -401,12 +405,20 @@ func (e *Encoder) Flush() error {
|
|||||||
if len(s.filling) > 0 {
|
if len(s.filling) > 0 {
|
||||||
err := e.nextBlock(false)
|
err := e.nextBlock(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Ignore Flush after Close.
|
||||||
|
if errors.Is(s.err, ErrEncoderClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
s.wWg.Wait()
|
s.wWg.Wait()
|
||||||
if s.err != nil {
|
if s.err != nil {
|
||||||
|
// Ignore Flush after Close.
|
||||||
|
if errors.Is(s.err, ErrEncoderClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
return s.writeErr
|
return s.writeErr
|
||||||
@ -422,6 +434,9 @@ func (e *Encoder) Close() error {
|
|||||||
}
|
}
|
||||||
err := e.nextBlock(true)
|
err := e.nextBlock(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(s.err, ErrEncoderClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if s.frameContentSize > 0 {
|
if s.frameContentSize > 0 {
|
||||||
@ -459,6 +474,11 @@ func (e *Encoder) Close() error {
|
|||||||
}
|
}
|
||||||
_, s.err = s.w.Write(frame)
|
_, s.err = s.w.Write(frame)
|
||||||
}
|
}
|
||||||
|
if s.err == nil {
|
||||||
|
s.err = ErrEncoderClosed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -469,6 +489,15 @@ func (e *Encoder) Close() error {
|
|||||||
// Data compressed with EncodeAll can be decoded with the Decoder,
|
// Data compressed with EncodeAll can be decoded with the Decoder,
|
||||||
// using either a stream or DecodeAll.
|
// using either a stream or DecodeAll.
|
||||||
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||||
|
e.init.Do(e.initialize)
|
||||||
|
enc := <-e.encoders
|
||||||
|
defer func() {
|
||||||
|
e.encoders <- enc
|
||||||
|
}()
|
||||||
|
return e.encodeAll(enc, src, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
|
||||||
if len(src) == 0 {
|
if len(src) == 0 {
|
||||||
if e.o.fullZero {
|
if e.o.fullZero {
|
||||||
// Add frame header.
|
// Add frame header.
|
||||||
@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
}
|
}
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
e.init.Do(e.initialize)
|
|
||||||
enc := <-e.encoders
|
|
||||||
defer func() {
|
|
||||||
// Release encoder reference to last block.
|
|
||||||
// If a non-single block is needed the encoder will reset again.
|
|
||||||
e.encoders <- enc
|
|
||||||
}()
|
|
||||||
// Use single segments when above minimum window and below window size.
|
// Use single segments when above minimum window and below window size.
|
||||||
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
|
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
|
||||||
if e.o.single != nil {
|
if e.o.single != nil {
|
||||||
|
2
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if debugDecoder {
|
||||||
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
|
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
|
||||||
|
}
|
||||||
windowLog := 10 + (wd >> 3)
|
windowLog := 10 + (wd >> 3)
|
||||||
windowBase := uint64(1) << windowLog
|
windowBase := uint64(1) << windowLog
|
||||||
windowAdd := (windowBase / 8) * uint64(wd&0x7)
|
windowAdd := (windowBase / 8) * uint64(wd&0x7)
|
||||||
|
4
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
|
return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.seqSize += ctx.litRemain
|
s.seqSize += ctx.litRemain
|
||||||
@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.litRemain < 0 {
|
if ctx.litRemain < 0 {
|
||||||
|
8
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
8
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
|
|||||||
MOVQ 40(SP), AX
|
MOVQ 40(SP), AX
|
||||||
ADDQ AX, 48(SP)
|
ADDQ AX, 48(SP)
|
||||||
|
|
||||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||||
ADDQ R10, 32(SP)
|
ADDQ R10, 32(SP)
|
||||||
|
|
||||||
// outBase += outPosition
|
// outBase += outPosition
|
||||||
@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
|
|||||||
MOVQ 40(SP), CX
|
MOVQ 40(SP), CX
|
||||||
ADDQ CX, 48(SP)
|
ADDQ CX, 48(SP)
|
||||||
|
|
||||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||||
ADDQ R9, 32(SP)
|
ADDQ R9, 32(SP)
|
||||||
|
|
||||||
// outBase += outPosition
|
// outBase += outPosition
|
||||||
@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
|
|||||||
MOVQ 40(SP), AX
|
MOVQ 40(SP), AX
|
||||||
ADDQ AX, 48(SP)
|
ADDQ AX, 48(SP)
|
||||||
|
|
||||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||||
ADDQ R10, 32(SP)
|
ADDQ R10, 32(SP)
|
||||||
|
|
||||||
// outBase += outPosition
|
// outBase += outPosition
|
||||||
@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
|
|||||||
MOVQ 40(SP), CX
|
MOVQ 40(SP), CX
|
||||||
ADDQ CX, 48(SP)
|
ADDQ CX, 48(SP)
|
||||||
|
|
||||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||||
ADDQ R9, 32(SP)
|
ADDQ R9, 32(SP)
|
||||||
|
|
||||||
// outBase += outPosition
|
// outBase += outPosition
|
||||||
|
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -88,6 +88,10 @@ var (
|
|||||||
// Close has been called.
|
// Close has been called.
|
||||||
ErrDecoderClosed = errors.New("decoder used after Close")
|
ErrDecoderClosed = errors.New("decoder used after Close")
|
||||||
|
|
||||||
|
// ErrEncoderClosed will be returned if the Encoder was used after
|
||||||
|
// Close has been called.
|
||||||
|
ErrEncoderClosed = errors.New("encoder used after Close")
|
||||||
|
|
||||||
// ErrDecoderNilInput is returned when a nil Reader was provided
|
// ErrDecoderNilInput is returned when a nil Reader was provided
|
||||||
// and an operation other than Reset/DecodeAll/Close was attempted.
|
// and an operation other than Reset/DecodeAll/Close was attempted.
|
||||||
ErrDecoderNilInput = errors.New("nil input provided as reader")
|
ErrDecoderNilInput = errors.New("nil input provided as reader")
|
||||||
|
88
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
88
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -844,9 +844,7 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If exemplars are not configured, the cap will be 0.
|
if h.nativeExemplars.isEnabled() {
|
||||||
// So append is not needed in this case.
|
|
||||||
if cap(h.nativeExemplars.exemplars) > 0 {
|
|
||||||
h.nativeExemplars.Lock()
|
h.nativeExemplars.Lock()
|
||||||
his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
|
his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
|
||||||
h.nativeExemplars.Unlock()
|
h.nativeExemplars.Unlock()
|
||||||
@ -1658,10 +1656,17 @@ func addAndResetCounts(hot, cold *histogramCounts) {
|
|||||||
type nativeExemplars struct {
|
type nativeExemplars struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
|
// Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
|
||||||
|
// The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
|
||||||
ttl time.Duration
|
ttl time.Duration
|
||||||
|
|
||||||
exemplars []*dto.Exemplar
|
exemplars []*dto.Exemplar
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *nativeExemplars) isEnabled() bool {
|
||||||
|
return n.ttl != -1
|
||||||
|
}
|
||||||
|
|
||||||
func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
|
func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
|
||||||
if ttl == 0 {
|
if ttl == 0 {
|
||||||
ttl = 5 * time.Minute
|
ttl = 5 * time.Minute
|
||||||
@ -1673,6 +1678,7 @@ func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
|
|||||||
|
|
||||||
if maxCount < 0 {
|
if maxCount < 0 {
|
||||||
maxCount = 0
|
maxCount = 0
|
||||||
|
ttl = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
return nativeExemplars{
|
return nativeExemplars{
|
||||||
@ -1682,20 +1688,18 @@ func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
||||||
if cap(n.exemplars) == 0 {
|
if !n.isEnabled() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
n.Lock()
|
n.Lock()
|
||||||
defer n.Unlock()
|
defer n.Unlock()
|
||||||
|
|
||||||
// The index where to insert the new exemplar.
|
|
||||||
var nIdx int = -1
|
|
||||||
|
|
||||||
// When the number of exemplars has not yet exceeded or
|
// When the number of exemplars has not yet exceeded or
|
||||||
// is equal to cap(n.exemplars), then
|
// is equal to cap(n.exemplars), then
|
||||||
// insert the new exemplar directly.
|
// insert the new exemplar directly.
|
||||||
if len(n.exemplars) < cap(n.exemplars) {
|
if len(n.exemplars) < cap(n.exemplars) {
|
||||||
|
var nIdx int
|
||||||
for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
|
for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
|
||||||
if *e.Value < *n.exemplars[nIdx].Value {
|
if *e.Value < *n.exemplars[nIdx].Value {
|
||||||
break
|
break
|
||||||
@ -1705,15 +1709,44 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(n.exemplars) == 1 {
|
||||||
|
// When the number of exemplars is 1, then
|
||||||
|
// replace the existing exemplar with the new exemplar.
|
||||||
|
n.exemplars[0] = e
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// From this point on, the number of exemplars is greater than 1.
|
||||||
|
|
||||||
// When the number of exemplars exceeds the limit, remove one exemplar.
|
// When the number of exemplars exceeds the limit, remove one exemplar.
|
||||||
var (
|
var (
|
||||||
rIdx int // The index where to remove the old exemplar.
|
ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
|
||||||
|
|
||||||
ot = time.Now() // Oldest timestamp seen.
|
|
||||||
otIdx = -1 // Index of the exemplar with the oldest timestamp.
|
otIdx = -1 // Index of the exemplar with the oldest timestamp.
|
||||||
|
|
||||||
md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
|
md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
|
||||||
mdIdx = -1 // Index of the older exemplar within the closest pair.
|
|
||||||
|
// The insertion point of the new exemplar in the exemplars slice after insertion.
|
||||||
|
// This is calculated purely based on the order of the exemplars by value.
|
||||||
|
// nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
|
||||||
|
nIdx = -1
|
||||||
|
|
||||||
|
// rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
|
||||||
|
// The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
|
||||||
|
// It is calculated in 3 steps:
|
||||||
|
// 1. First we set rIdx to the index of the older exemplar within the closest pair by value.
|
||||||
|
// That is the following will be true (on log scale):
|
||||||
|
// either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
|
||||||
|
// the closest values to each other from all pairs.
|
||||||
|
// For example, suppose the values are distributed like this:
|
||||||
|
// |-----------x-------------x----------------x----x-----|
|
||||||
|
// ^--rIdx as this is older.
|
||||||
|
// Or like this:
|
||||||
|
// |-----------x-------------x----------------x----x-----|
|
||||||
|
// ^--rIdx as this is older.
|
||||||
|
// 2. If there is an exemplar that expired, then we simple reset rIdx to that index.
|
||||||
|
// 3. We check if by inserting the new exemplar we would create a closer pair at
|
||||||
|
// (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
|
||||||
|
// keep the spread of exemplars by value; otherwise we keep rIdx as it is.
|
||||||
|
rIdx = -1
|
||||||
cLog float64 // Logarithm of the current exemplar.
|
cLog float64 // Logarithm of the current exemplar.
|
||||||
pLog float64 // Logarithm of the previous exemplar.
|
pLog float64 // Logarithm of the previous exemplar.
|
||||||
)
|
)
|
||||||
@ -1726,7 +1759,7 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find the index at which to insert new the exemplar.
|
// Find the index at which to insert new the exemplar.
|
||||||
if *e.Value <= *exemplar.Value && nIdx == -1 {
|
if nIdx == -1 && *e.Value <= *exemplar.Value {
|
||||||
nIdx = i
|
nIdx = i
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1738,11 +1771,13 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
|||||||
}
|
}
|
||||||
diff := math.Abs(cLog - pLog)
|
diff := math.Abs(cLog - pLog)
|
||||||
if md == -1 || diff < md {
|
if md == -1 || diff < md {
|
||||||
|
// The closest exemplar pair is at index: i-1, i.
|
||||||
|
// Choose the exemplar with the older timestamp for replacement.
|
||||||
md = diff
|
md = diff
|
||||||
if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
|
if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
|
||||||
mdIdx = i
|
rIdx = i
|
||||||
} else {
|
} else {
|
||||||
mdIdx = i - 1
|
rIdx = i - 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1753,8 +1788,12 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
|||||||
if nIdx == -1 {
|
if nIdx == -1 {
|
||||||
nIdx = len(n.exemplars)
|
nIdx = len(n.exemplars)
|
||||||
}
|
}
|
||||||
|
// Here, we have the following relationships:
|
||||||
|
// n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
|
||||||
|
// e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
|
||||||
|
|
||||||
if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
|
if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
|
||||||
|
// If the oldest exemplar has expired, then replace it with the new exemplar.
|
||||||
rIdx = otIdx
|
rIdx = otIdx
|
||||||
} else {
|
} else {
|
||||||
// In the previous for loop, when calculating the closest pair of exemplars,
|
// In the previous for loop, when calculating the closest pair of exemplars,
|
||||||
@ -1764,24 +1803,27 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
|||||||
if nIdx > 0 {
|
if nIdx > 0 {
|
||||||
diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
|
diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
|
||||||
if diff < md {
|
if diff < md {
|
||||||
|
// The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
|
||||||
|
// v--rIdx
|
||||||
|
// |-----------x-n-----------x----------------x----x-----|
|
||||||
|
// nIdx-1--^ ^--new exemplar value
|
||||||
|
// Do not make the spread worse, replace nIdx-1 and not rIdx.
|
||||||
md = diff
|
md = diff
|
||||||
mdIdx = nIdx
|
rIdx = nIdx - 1
|
||||||
if n.exemplars[nIdx-1].Timestamp.AsTime().Before(e.Timestamp.AsTime()) {
|
|
||||||
mdIdx = nIdx - 1
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if nIdx < len(n.exemplars) {
|
if nIdx < len(n.exemplars) {
|
||||||
diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
|
diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
|
||||||
if diff < md {
|
if diff < md {
|
||||||
mdIdx = nIdx
|
// The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
|
||||||
if n.exemplars[nIdx].Timestamp.AsTime().Before(e.Timestamp.AsTime()) {
|
// v--rIdx
|
||||||
mdIdx = nIdx
|
// |-----------x-----------n-x----------------x----x-----|
|
||||||
|
// new exemplar value--^ ^--nIdx
|
||||||
|
// Do not make the spread worse, replace nIdx-1 and not rIdx.
|
||||||
|
rIdx = nIdx
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rIdx = mdIdx
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adjust the slice according to rIdx and nIdx.
|
// Adjust the slice according to rIdx and nIdx.
|
||||||
switch {
|
switch {
|
||||||
|
4
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
4
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@ -203,8 +203,10 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
|
|||||||
|
|
||||||
defer closeWriter()
|
defer closeWriter()
|
||||||
|
|
||||||
|
// Set Content-Encoding only when data is compressed
|
||||||
|
if encodingHeader != string(Identity) {
|
||||||
rsp.Header().Set(contentEncodingHeader, encodingHeader)
|
rsp.Header().Set(contentEncodingHeader, encodingHeader)
|
||||||
|
}
|
||||||
enc := expfmt.NewEncoder(w, contentType)
|
enc := expfmt.NewEncoder(w, contentType)
|
||||||
|
|
||||||
// handleError handles the error according to opts.ErrorHandling
|
// handleError handles the error according to opts.ErrorHandling
|
||||||
|
14
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
14
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
|
|||||||
|
|
||||||
mediatype, params, err := mime.ParseMediaType(ct)
|
mediatype, params, err := mime.ParseMediaType(ct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
const textType = "text/plain"
|
const textType = "text/plain"
|
||||||
@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format {
|
|||||||
switch mediatype {
|
switch mediatype {
|
||||||
case ProtoType:
|
case ProtoType:
|
||||||
if p, ok := params["proto"]; ok && p != ProtoProtocol {
|
if p, ok := params["proto"]; ok && p != ProtoProtocol {
|
||||||
return fmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
if e, ok := params["encoding"]; ok && e != "delimited" {
|
if e, ok := params["encoding"]; ok && e != "delimited" {
|
||||||
return fmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
return fmtProtoDelim
|
return FmtProtoDelim
|
||||||
|
|
||||||
case textType:
|
case textType:
|
||||||
if v, ok := params["version"]; ok && v != TextVersion {
|
if v, ok := params["version"]; ok && v != TextVersion {
|
||||||
return fmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
return fmtText
|
return FmtText
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDecoder returns a new decoder based on the given input format.
|
// NewDecoder returns a new decoder based on the given input format.
|
||||||
|
24
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
24
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format {
|
|||||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||||
switch ac.Params["encoding"] {
|
switch ac.Params["encoding"] {
|
||||||
case "delimited":
|
case "delimited":
|
||||||
return fmtProtoDelim + escapingScheme
|
return FmtProtoDelim + escapingScheme
|
||||||
case "text":
|
case "text":
|
||||||
return fmtProtoText + escapingScheme
|
return FmtProtoText + escapingScheme
|
||||||
case "compact-text":
|
case "compact-text":
|
||||||
return fmtProtoCompact + escapingScheme
|
return FmtProtoCompact + escapingScheme
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||||
return fmtText + escapingScheme
|
return FmtText + escapingScheme
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmtText + escapingScheme
|
return FmtText + escapingScheme
|
||||||
}
|
}
|
||||||
|
|
||||||
// NegotiateIncludingOpenMetrics works like Negotiate but includes
|
// NegotiateIncludingOpenMetrics works like Negotiate but includes
|
||||||
@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
|||||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||||
switch ac.Params["encoding"] {
|
switch ac.Params["encoding"] {
|
||||||
case "delimited":
|
case "delimited":
|
||||||
return fmtProtoDelim + escapingScheme
|
return FmtProtoDelim + escapingScheme
|
||||||
case "text":
|
case "text":
|
||||||
return fmtProtoText + escapingScheme
|
return FmtProtoText + escapingScheme
|
||||||
case "compact-text":
|
case "compact-text":
|
||||||
return fmtProtoCompact + escapingScheme
|
return FmtProtoCompact + escapingScheme
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||||
return fmtText + escapingScheme
|
return FmtText + escapingScheme
|
||||||
}
|
}
|
||||||
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
|
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
|
||||||
switch ver {
|
switch ver {
|
||||||
case OpenMetricsVersion_1_0_0:
|
case OpenMetricsVersion_1_0_0:
|
||||||
return fmtOpenMetrics_1_0_0 + escapingScheme
|
return FmtOpenMetrics_1_0_0 + escapingScheme
|
||||||
default:
|
default:
|
||||||
return fmtOpenMetrics_0_0_1 + escapingScheme
|
return FmtOpenMetrics_0_0_1 + escapingScheme
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmtText + escapingScheme
|
return FmtText + escapingScheme
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEncoder returns a new encoder based on content type negotiation. All
|
// NewEncoder returns a new encoder based on content type negotiation. All
|
||||||
|
70
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
70
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@ -35,21 +35,28 @@ const (
|
|||||||
TextVersion = "0.0.4"
|
TextVersion = "0.0.4"
|
||||||
ProtoType = `application/vnd.google.protobuf`
|
ProtoType = `application/vnd.google.protobuf`
|
||||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||||
protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
|
||||||
|
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||||
OpenMetricsType = `application/openmetrics-text`
|
OpenMetricsType = `application/openmetrics-text`
|
||||||
OpenMetricsVersion_0_0_1 = "0.0.1"
|
OpenMetricsVersion_0_0_1 = "0.0.1"
|
||||||
OpenMetricsVersion_1_0_0 = "1.0.0"
|
OpenMetricsVersion_1_0_0 = "1.0.0"
|
||||||
|
|
||||||
// The Content-Type values for the different wire protocols. Note that these
|
// The Content-Type values for the different wire protocols. Do not do direct
|
||||||
// values are now unexported. If code was relying on comparisons to these
|
// comparisons to these constants, instead use the comparison functions.
|
||||||
// constants, instead use FormatType().
|
// Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
|
||||||
fmtUnknown Format = `<unknown>`
|
FmtUnknown Format = `<unknown>`
|
||||||
fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
// Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
|
||||||
fmtProtoDelim Format = protoFmt + ` encoding=delimited`
|
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||||
fmtProtoText Format = protoFmt + ` encoding=text`
|
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
|
||||||
fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
|
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||||
fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
|
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
|
||||||
fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
|
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||||
|
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
|
||||||
|
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||||
|
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
|
||||||
|
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
|
||||||
|
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
|
||||||
|
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -79,17 +86,17 @@ const (
|
|||||||
func NewFormat(t FormatType) Format {
|
func NewFormat(t FormatType) Format {
|
||||||
switch t {
|
switch t {
|
||||||
case TypeProtoCompact:
|
case TypeProtoCompact:
|
||||||
return fmtProtoCompact
|
return FmtProtoCompact
|
||||||
case TypeProtoDelim:
|
case TypeProtoDelim:
|
||||||
return fmtProtoDelim
|
return FmtProtoDelim
|
||||||
case TypeProtoText:
|
case TypeProtoText:
|
||||||
return fmtProtoText
|
return FmtProtoText
|
||||||
case TypeTextPlain:
|
case TypeTextPlain:
|
||||||
return fmtText
|
return FmtText
|
||||||
case TypeOpenMetrics:
|
case TypeOpenMetrics:
|
||||||
return fmtOpenMetrics_1_0_0
|
return FmtOpenMetrics_1_0_0
|
||||||
default:
|
default:
|
||||||
return fmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format {
|
|||||||
// specified version number.
|
// specified version number.
|
||||||
func NewOpenMetricsFormat(version string) (Format, error) {
|
func NewOpenMetricsFormat(version string) (Format, error) {
|
||||||
if version == OpenMetricsVersion_0_0_1 {
|
if version == OpenMetricsVersion_0_0_1 {
|
||||||
return fmtOpenMetrics_0_0_1, nil
|
return FmtOpenMetrics_0_0_1, nil
|
||||||
}
|
}
|
||||||
if version == OpenMetricsVersion_1_0_0 {
|
if version == OpenMetricsVersion_1_0_0 {
|
||||||
return fmtOpenMetrics_1_0_0, nil
|
return FmtOpenMetrics_1_0_0, nil
|
||||||
}
|
}
|
||||||
return fmtUnknown, fmt.Errorf("unknown open metrics version string")
|
return FmtUnknown, fmt.Errorf("unknown open metrics version string")
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEscapingScheme returns a copy of Format with the specified escaping
|
||||||
|
// scheme appended to the end. If an escaping scheme already exists it is
|
||||||
|
// removed.
|
||||||
|
func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
|
||||||
|
var terms []string
|
||||||
|
for _, p := range strings.Split(string(f), ";") {
|
||||||
|
toks := strings.Split(p, "=")
|
||||||
|
if len(toks) != 2 {
|
||||||
|
trimmed := strings.TrimSpace(p)
|
||||||
|
if len(trimmed) > 0 {
|
||||||
|
terms = append(terms, trimmed)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key := strings.TrimSpace(toks[0])
|
||||||
|
if key != model.EscapingKey {
|
||||||
|
terms = append(terms, strings.TrimSpace(p))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
terms = append(terms, model.EscapingKey+"="+s.String())
|
||||||
|
return Format(strings.Join(terms, "; "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// FormatType deduces an overall FormatType for the given format.
|
// FormatType deduces an overall FormatType for the given format.
|
||||||
|
2
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs(
|
|||||||
if name != "" {
|
if name != "" {
|
||||||
// If the name does not pass the legacy validity check, we must put the
|
// If the name does not pass the legacy validity check, we must put the
|
||||||
// metric name inside the braces, quoted.
|
// metric name inside the braces, quoted.
|
||||||
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
if !model.IsValidLegacyMetricName(name) {
|
||||||
metricInsideBraces = true
|
metricInsideBraces = true
|
||||||
err := w.WriteByte(separator)
|
err := w.WriteByte(separator)
|
||||||
written++
|
written++
|
||||||
|
4
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
@ -354,7 +354,7 @@ func writeNameAndLabelPairs(
|
|||||||
if name != "" {
|
if name != "" {
|
||||||
// If the name does not pass the legacy validity check, we must put the
|
// If the name does not pass the legacy validity check, we must put the
|
||||||
// metric name inside the braces.
|
// metric name inside the braces.
|
||||||
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
if !model.IsValidLegacyMetricName(name) {
|
||||||
metricInsideBraces = true
|
metricInsideBraces = true
|
||||||
err := w.WriteByte(separator)
|
err := w.WriteByte(separator)
|
||||||
written++
|
written++
|
||||||
@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
|
|||||||
// writeName writes a string as-is if it complies with the legacy naming
|
// writeName writes a string as-is if it complies with the legacy naming
|
||||||
// scheme, or escapes it in double quotes if not.
|
// scheme, or escapes it in double quotes if not.
|
||||||
func writeName(w enhancedWriter, name string) (int, error) {
|
func writeName(w enhancedWriter, name string) (int, error) {
|
||||||
if model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
if model.IsValidLegacyMetricName(name) {
|
||||||
return w.WriteString(name)
|
return w.WriteString(name)
|
||||||
}
|
}
|
||||||
var written int
|
var written int
|
||||||
|
158
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
158
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@ -22,9 +22,9 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
@ -60,6 +60,7 @@ type TextParser struct {
|
|||||||
currentMF *dto.MetricFamily
|
currentMF *dto.MetricFamily
|
||||||
currentMetric *dto.Metric
|
currentMetric *dto.Metric
|
||||||
currentLabelPair *dto.LabelPair
|
currentLabelPair *dto.LabelPair
|
||||||
|
currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
|
||||||
|
|
||||||
// The remaining member variables are only used for summaries/histograms.
|
// The remaining member variables are only used for summaries/histograms.
|
||||||
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
|
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
|
||||||
@ -74,6 +75,9 @@ type TextParser struct {
|
|||||||
// count and sum of that summary/histogram.
|
// count and sum of that summary/histogram.
|
||||||
currentIsSummaryCount, currentIsSummarySum bool
|
currentIsSummaryCount, currentIsSummarySum bool
|
||||||
currentIsHistogramCount, currentIsHistogramSum bool
|
currentIsHistogramCount, currentIsHistogramSum bool
|
||||||
|
// These indicate if the metric name from the current line being parsed is inside
|
||||||
|
// braces and if that metric name was found respectively.
|
||||||
|
currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
|
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
|
||||||
@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) {
|
|||||||
}
|
}
|
||||||
p.currentQuantile = math.NaN()
|
p.currentQuantile = math.NaN()
|
||||||
p.currentBucket = math.NaN()
|
p.currentBucket = math.NaN()
|
||||||
|
p.currentMF = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// startOfLine represents the state where the next byte read from p.buf is the
|
// startOfLine represents the state where the next byte read from p.buf is the
|
||||||
// start of a line (or whitespace leading up to it).
|
// start of a line (or whitespace leading up to it).
|
||||||
func (p *TextParser) startOfLine() stateFn {
|
func (p *TextParser) startOfLine() stateFn {
|
||||||
p.lineCount++
|
p.lineCount++
|
||||||
|
p.currentMetricIsInsideBraces = false
|
||||||
|
p.currentMetricInsideBracesIsPresent = false
|
||||||
if p.skipBlankTab(); p.err != nil {
|
if p.skipBlankTab(); p.err != nil {
|
||||||
// This is the only place that we expect to see io.EOF,
|
// This is the only place that we expect to see io.EOF,
|
||||||
// which is not an error but the signal that we are done.
|
// which is not an error but the signal that we are done.
|
||||||
@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn {
|
|||||||
return p.startComment
|
return p.startComment
|
||||||
case '\n':
|
case '\n':
|
||||||
return p.startOfLine // Empty line, start the next one.
|
return p.startOfLine // Empty line, start the next one.
|
||||||
|
case '{':
|
||||||
|
p.currentMetricIsInsideBraces = true
|
||||||
|
return p.readingLabels
|
||||||
}
|
}
|
||||||
return p.readingMetricName
|
return p.readingMetricName
|
||||||
}
|
}
|
||||||
@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn {
|
|||||||
return nil // Unexpected end of input.
|
return nil // Unexpected end of input.
|
||||||
}
|
}
|
||||||
if p.currentByte == '}' {
|
if p.currentByte == '}' {
|
||||||
|
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
|
||||||
|
p.currentLabelPairs = nil
|
||||||
if p.skipBlankTab(); p.err != nil {
|
if p.skipBlankTab(); p.err != nil {
|
||||||
return nil // Unexpected end of input.
|
return nil // Unexpected end of input.
|
||||||
}
|
}
|
||||||
@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn {
|
|||||||
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
|
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
if p.currentByte != '=' {
|
||||||
|
if p.currentMetricIsInsideBraces {
|
||||||
|
if p.currentMetricInsideBracesIsPresent {
|
||||||
|
p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch p.currentByte {
|
||||||
|
case ',':
|
||||||
|
p.setOrCreateCurrentMF()
|
||||||
|
if p.currentMF.Type == nil {
|
||||||
|
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
||||||
|
}
|
||||||
|
p.currentMetric = &dto.Metric{}
|
||||||
|
p.currentMetricInsideBracesIsPresent = true
|
||||||
|
return p.startLabelName
|
||||||
|
case '}':
|
||||||
|
p.setOrCreateCurrentMF()
|
||||||
|
if p.currentMF.Type == nil {
|
||||||
|
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
||||||
|
}
|
||||||
|
p.currentMetric = &dto.Metric{}
|
||||||
|
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
|
||||||
|
p.currentLabelPairs = nil
|
||||||
|
if p.skipBlankTab(); p.err != nil {
|
||||||
|
return nil // Unexpected end of input.
|
||||||
|
}
|
||||||
|
return p.readingValue
|
||||||
|
default:
|
||||||
|
p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
||||||
|
p.currentLabelPairs = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
|
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
|
||||||
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
|
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
|
||||||
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
|
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
|
||||||
@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn {
|
|||||||
// labels to 'real' labels.
|
// labels to 'real' labels.
|
||||||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
||||||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
||||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
|
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
|
||||||
}
|
|
||||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.currentByte != '=' {
|
|
||||||
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
// Check for duplicate label names.
|
// Check for duplicate label names.
|
||||||
labels := make(map[string]struct{})
|
labels := make(map[string]struct{})
|
||||||
for _, l := range p.currentMetric.Label {
|
for _, l := range p.currentLabelPairs {
|
||||||
lName := l.GetName()
|
lName := l.GetName()
|
||||||
if _, exists := labels[lName]; !exists {
|
if _, exists := labels[lName]; !exists {
|
||||||
labels[lName] = struct{}{}
|
labels[lName] = struct{}{}
|
||||||
} else {
|
} else {
|
||||||
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
|
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
|
||||||
|
p.currentLabelPairs = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn {
|
|||||||
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
||||||
// Create a more helpful error message.
|
// Create a more helpful error message.
|
||||||
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
||||||
|
p.currentLabelPairs = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn {
|
|||||||
return p.startLabelName
|
return p.startLabelName
|
||||||
|
|
||||||
case '}':
|
case '}':
|
||||||
|
if p.currentMF == nil {
|
||||||
|
p.parseError("invalid metric name")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
|
||||||
|
p.currentLabelPairs = nil
|
||||||
if p.skipBlankTab(); p.err != nil {
|
if p.skipBlankTab(); p.err != nil {
|
||||||
return nil // Unexpected end of input.
|
return nil // Unexpected end of input.
|
||||||
}
|
}
|
||||||
return p.readingValue
|
return p.readingValue
|
||||||
default:
|
default:
|
||||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
|
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
|
||||||
|
p.currentLabelPairs = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
|||||||
p.currentToken.WriteByte(p.currentByte)
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
case 'n':
|
case 'n':
|
||||||
p.currentToken.WriteByte('\n')
|
p.currentToken.WriteByte('\n')
|
||||||
|
case '"':
|
||||||
|
p.currentToken.WriteByte('"')
|
||||||
default:
|
default:
|
||||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||||
return
|
return
|
||||||
@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
|||||||
// but not into p.currentToken.
|
// but not into p.currentToken.
|
||||||
func (p *TextParser) readTokenAsMetricName() {
|
func (p *TextParser) readTokenAsMetricName() {
|
||||||
p.currentToken.Reset()
|
p.currentToken.Reset()
|
||||||
|
// A UTF-8 metric name must be quoted and may have escaped characters.
|
||||||
|
quoted := false
|
||||||
|
escaped := false
|
||||||
if !isValidMetricNameStart(p.currentByte) {
|
if !isValidMetricNameStart(p.currentByte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for {
|
for p.err == nil {
|
||||||
|
if escaped {
|
||||||
|
switch p.currentByte {
|
||||||
|
case '\\':
|
||||||
p.currentToken.WriteByte(p.currentByte)
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
case 'n':
|
||||||
|
p.currentToken.WriteByte('\n')
|
||||||
|
case '"':
|
||||||
|
p.currentToken.WriteByte('"')
|
||||||
|
default:
|
||||||
|
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
escaped = false
|
||||||
|
} else {
|
||||||
|
switch p.currentByte {
|
||||||
|
case '"':
|
||||||
|
quoted = !quoted
|
||||||
|
if !quoted {
|
||||||
p.currentByte, p.err = p.buf.ReadByte()
|
p.currentByte, p.err = p.buf.ReadByte()
|
||||||
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
|
return
|
||||||
|
}
|
||||||
|
case '\n':
|
||||||
|
p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
|
||||||
|
return
|
||||||
|
case '\\':
|
||||||
|
escaped = true
|
||||||
|
default:
|
||||||
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.currentByte, p.err = p.buf.ReadByte()
|
||||||
|
if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() {
|
|||||||
// but not into p.currentToken.
|
// but not into p.currentToken.
|
||||||
func (p *TextParser) readTokenAsLabelName() {
|
func (p *TextParser) readTokenAsLabelName() {
|
||||||
p.currentToken.Reset()
|
p.currentToken.Reset()
|
||||||
|
// A UTF-8 label name must be quoted and may have escaped characters.
|
||||||
|
quoted := false
|
||||||
|
escaped := false
|
||||||
if !isValidLabelNameStart(p.currentByte) {
|
if !isValidLabelNameStart(p.currentByte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for {
|
for p.err == nil {
|
||||||
|
if escaped {
|
||||||
|
switch p.currentByte {
|
||||||
|
case '\\':
|
||||||
p.currentToken.WriteByte(p.currentByte)
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
case 'n':
|
||||||
|
p.currentToken.WriteByte('\n')
|
||||||
|
case '"':
|
||||||
|
p.currentToken.WriteByte('"')
|
||||||
|
default:
|
||||||
|
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
escaped = false
|
||||||
|
} else {
|
||||||
|
switch p.currentByte {
|
||||||
|
case '"':
|
||||||
|
quoted = !quoted
|
||||||
|
if !quoted {
|
||||||
p.currentByte, p.err = p.buf.ReadByte()
|
p.currentByte, p.err = p.buf.ReadByte()
|
||||||
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
|
return
|
||||||
|
}
|
||||||
|
case '\n':
|
||||||
|
p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
|
||||||
|
return
|
||||||
|
case '\\':
|
||||||
|
escaped = true
|
||||||
|
default:
|
||||||
|
p.currentToken.WriteByte(p.currentByte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.currentByte, p.err = p.buf.ReadByte()
|
||||||
|
if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() {
|
|||||||
p.currentToken.WriteByte('\n')
|
p.currentToken.WriteByte('\n')
|
||||||
default:
|
default:
|
||||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||||
|
p.currentLabelPairs = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
escaped = false
|
escaped = false
|
||||||
@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isValidLabelNameStart(b byte) bool {
|
func isValidLabelNameStart(b byte) bool {
|
||||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
|
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
|
||||||
}
|
}
|
||||||
|
|
||||||
func isValidLabelNameContinuation(b byte) bool {
|
func isValidLabelNameContinuation(b byte, quoted bool) bool {
|
||||||
return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
|
return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func isValidMetricNameStart(b byte) bool {
|
func isValidMetricNameStart(b byte) bool {
|
||||||
return isValidLabelNameStart(b) || b == ':'
|
return isValidLabelNameStart(b) || b == ':'
|
||||||
}
|
}
|
||||||
|
|
||||||
func isValidMetricNameContinuation(b byte) bool {
|
func isValidMetricNameContinuation(b byte, quoted bool) bool {
|
||||||
return isValidLabelNameContinuation(b) || b == ':'
|
return isValidLabelNameContinuation(b, quoted) || b == ':'
|
||||||
}
|
}
|
||||||
|
|
||||||
func isBlankOrTab(b byte) bool {
|
func isBlankOrTab(b byte) bool {
|
||||||
|
27
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
27
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
|||||||
// therewith.
|
// therewith.
|
||||||
type LabelName string
|
type LabelName string
|
||||||
|
|
||||||
// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
|
// IsValid returns true iff the name matches the pattern of LabelNameRE when
|
||||||
// names, and iff it's valid UTF-8 if NameValidationScheme is set to
|
// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
|
||||||
// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
|
// NameValidationScheme is set to UTF8Validation.
|
||||||
// check but a much faster hardcoded implementation.
|
|
||||||
func (ln LabelName) IsValid() bool {
|
func (ln LabelName) IsValid() bool {
|
||||||
if len(ln) == 0 {
|
if len(ln) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
switch NameValidationScheme {
|
switch NameValidationScheme {
|
||||||
case LegacyValidation:
|
case LegacyValidation:
|
||||||
for i, b := range ln {
|
return ln.IsValidLegacy()
|
||||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case UTF8Validation:
|
case UTF8Validation:
|
||||||
return utf8.ValidString(string(ln))
|
return utf8.ValidString(string(ln))
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
|
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
|
||||||
|
// legacy names. It does not use LabelNameRE for the check but a much faster
|
||||||
|
// hardcoded implementation.
|
||||||
|
func (ln LabelName) IsValidLegacy() bool {
|
||||||
|
if len(ln) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, b := range ln {
|
||||||
|
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
2
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
@ -11,8 +11,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build go1.21
|
|
||||||
|
|
||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
@ -1,39 +0,0 @@
|
|||||||
// Copyright 2024 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:build !go1.21
|
|
||||||
|
|
||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// String was optimized using functions not available for go 1.20
|
|
||||||
// or lower. We keep the old implementation for compatibility with client_golang.
|
|
||||||
// Once client golang drops support for go 1.20 (scheduled for August 2024), this
|
|
||||||
// file can be removed.
|
|
||||||
func (l LabelSet) String() string {
|
|
||||||
labelNames := make([]string, 0, len(l))
|
|
||||||
for name := range l {
|
|
||||||
labelNames = append(labelNames, string(name))
|
|
||||||
}
|
|
||||||
sort.Strings(labelNames)
|
|
||||||
lstrs := make([]string, 0, len(l))
|
|
||||||
for _, name := range labelNames {
|
|
||||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
|
||||||
}
|
|
31
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
31
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@ -34,10 +34,13 @@ var (
|
|||||||
// goroutines are started.
|
// goroutines are started.
|
||||||
NameValidationScheme = LegacyValidation
|
NameValidationScheme = LegacyValidation
|
||||||
|
|
||||||
// NameEscapingScheme defines the default way that names will be
|
// NameEscapingScheme defines the default way that names will be escaped when
|
||||||
// escaped when presented to systems that do not support UTF-8 names. If the
|
// presented to systems that do not support UTF-8 names. If the Content-Type
|
||||||
// Content-Type "escaping" term is specified, that will override this value.
|
// "escaping" term is specified, that will override this value.
|
||||||
NameEscapingScheme = ValueEncodingEscaping
|
// NameEscapingScheme should not be set to the NoEscaping value. That string
|
||||||
|
// is used in content negotiation to indicate that a system supports UTF-8 and
|
||||||
|
// has that feature enabled.
|
||||||
|
NameEscapingScheme = UnderscoreEscaping
|
||||||
)
|
)
|
||||||
|
|
||||||
// ValidationScheme is a Go enum for determining how metric and label names will
|
// ValidationScheme is a Go enum for determining how metric and label names will
|
||||||
@ -161,7 +164,7 @@ func (m Metric) FastFingerprint() Fingerprint {
|
|||||||
func IsValidMetricName(n LabelValue) bool {
|
func IsValidMetricName(n LabelValue) bool {
|
||||||
switch NameValidationScheme {
|
switch NameValidationScheme {
|
||||||
case LegacyValidation:
|
case LegacyValidation:
|
||||||
return IsValidLegacyMetricName(n)
|
return IsValidLegacyMetricName(string(n))
|
||||||
case UTF8Validation:
|
case UTF8Validation:
|
||||||
if len(n) == 0 {
|
if len(n) == 0 {
|
||||||
return false
|
return false
|
||||||
@ -176,7 +179,7 @@ func IsValidMetricName(n LabelValue) bool {
|
|||||||
// legacy validation scheme regardless of the value of NameValidationScheme.
|
// legacy validation scheme regardless of the value of NameValidationScheme.
|
||||||
// This function, however, does not use MetricNameRE for the check but a much
|
// This function, however, does not use MetricNameRE for the check but a much
|
||||||
// faster hardcoded implementation.
|
// faster hardcoded implementation.
|
||||||
func IsValidLegacyMetricName(n LabelValue) bool {
|
func IsValidLegacyMetricName(n string) bool {
|
||||||
if len(n) == 0 {
|
if len(n) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -208,7 +211,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If the name is nil, copy as-is, don't try to escape.
|
// If the name is nil, copy as-is, don't try to escape.
|
||||||
if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
|
if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
|
||||||
out.Name = v.Name
|
out.Name = v.Name
|
||||||
} else {
|
} else {
|
||||||
out.Name = proto.String(EscapeName(v.GetName(), scheme))
|
out.Name = proto.String(EscapeName(v.GetName(), scheme))
|
||||||
@ -230,7 +233,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
|||||||
|
|
||||||
for _, l := range m.Label {
|
for _, l := range m.Label {
|
||||||
if l.GetName() == MetricNameLabel {
|
if l.GetName() == MetricNameLabel {
|
||||||
if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
|
if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
|
||||||
escaped.Label = append(escaped.Label, l)
|
escaped.Label = append(escaped.Label, l)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -240,7 +243,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
|||||||
})
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
|
if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
|
||||||
escaped.Label = append(escaped.Label, l)
|
escaped.Label = append(escaped.Label, l)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -256,10 +259,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
|||||||
|
|
||||||
func metricNeedsEscaping(m *dto.Metric) bool {
|
func metricNeedsEscaping(m *dto.Metric) bool {
|
||||||
for _, l := range m.Label {
|
for _, l := range m.Label {
|
||||||
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
|
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
|
if !IsValidLegacyMetricName(l.GetName()) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -283,7 +286,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
|
|||||||
case NoEscaping:
|
case NoEscaping:
|
||||||
return name
|
return name
|
||||||
case UnderscoreEscaping:
|
case UnderscoreEscaping:
|
||||||
if IsValidLegacyMetricName(LabelValue(name)) {
|
if IsValidLegacyMetricName(name) {
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
for i, b := range name {
|
for i, b := range name {
|
||||||
@ -309,7 +312,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
|
|||||||
}
|
}
|
||||||
return escaped.String()
|
return escaped.String()
|
||||||
case ValueEncodingEscaping:
|
case ValueEncodingEscaping:
|
||||||
if IsValidLegacyMetricName(LabelValue(name)) {
|
if IsValidLegacyMetricName(name) {
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
escaped.WriteString("U__")
|
escaped.WriteString("U__")
|
||||||
@ -452,6 +455,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) {
|
|||||||
case EscapeValues:
|
case EscapeValues:
|
||||||
return ValueEncodingEscaping, nil
|
return ValueEncodingEscaping, nil
|
||||||
default:
|
default:
|
||||||
return NoEscaping, fmt.Errorf("unknown format scheme " + s)
|
return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
8
vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go
generated
vendored
8
vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp"
|
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp"
|
||||||
"go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
|
"go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
|
||||||
"go.opentelemetry.io/otel/sdk/log"
|
"go.opentelemetry.io/otel/sdk/log"
|
||||||
@ -31,6 +32,8 @@ var logsSignal = newSignal[log.Exporter]("OTEL_LOGS_EXPORTER")
|
|||||||
// supported values:
|
// supported values:
|
||||||
// - "http/protobuf" (default) - protobuf-encoded data over HTTP connection;
|
// - "http/protobuf" (default) - protobuf-encoded data over HTTP connection;
|
||||||
// see: [go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp]
|
// see: [go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp]
|
||||||
|
// - "grpc" - gRPC with protobuf-encoded data over HTTP/2 connection;
|
||||||
|
// see: [go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc]
|
||||||
//
|
//
|
||||||
// OTEL_EXPORTER_OTLP_LOGS_PROTOCOL defines OTLP exporter's transport protocol for the logs signal;
|
// OTEL_EXPORTER_OTLP_LOGS_PROTOCOL defines OTLP exporter's transport protocol for the logs signal;
|
||||||
// supported values are the same as OTEL_EXPORTER_OTLP_PROTOCOL.
|
// supported values are the same as OTEL_EXPORTER_OTLP_PROTOCOL.
|
||||||
@ -67,9 +70,8 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch proto {
|
switch proto {
|
||||||
// grpc is not supported yet, should comment out when it is supported
|
case "grpc":
|
||||||
// case "grpc":
|
return otlploggrpc.New(ctx)
|
||||||
// return otlploggrpc.New(ctx)
|
|
||||||
case "http/protobuf":
|
case "http/protobuf":
|
||||||
return otlploghttp.New(ctx)
|
return otlploghttp.New(ctx)
|
||||||
default:
|
default:
|
||||||
|
7
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
generated
vendored
7
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
generated
vendored
@ -18,13 +18,6 @@ const (
|
|||||||
WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
|
WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Client HTTP metrics.
|
|
||||||
const (
|
|
||||||
clientRequestSize = "http.client.request.size" // Outgoing request bytes total
|
|
||||||
clientResponseSize = "http.client.response.size" // Outgoing response bytes total
|
|
||||||
clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds
|
|
||||||
)
|
|
||||||
|
|
||||||
// Filter is a predicate used to determine whether a given http.request should
|
// Filter is a predicate used to determine whether a given http.request should
|
||||||
// be traced. A Filter must return true if the request should be traced.
|
// be traced. A Filter must return true if the request should be traced.
|
||||||
type Filter func(*http.Request) bool
|
type Filter func(*http.Request) bool
|
||||||
|
19
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
generated
vendored
19
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
generated
vendored
@ -81,12 +81,6 @@ func (h *middleware) configure(c *config) {
|
|||||||
h.semconv = semconv.NewHTTPServer(c.Meter)
|
h.semconv = semconv.NewHTTPServer(c.Meter)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleErr(err error) {
|
|
||||||
if err != nil {
|
|
||||||
otel.Handle(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// serveHTTP sets up tracing and calls the given next http.Handler with the span
|
// serveHTTP sets up tracing and calls the given next http.Handler with the span
|
||||||
// context injected into the request context.
|
// context injected into the request context.
|
||||||
func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
|
func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
|
||||||
@ -123,6 +117,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if startTime := StartTimeFromContext(ctx); !startTime.IsZero() {
|
||||||
|
opts = append(opts, trace.WithTimestamp(startTime))
|
||||||
|
requestStartTime = startTime
|
||||||
|
}
|
||||||
|
|
||||||
ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
|
ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
@ -190,14 +189,18 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
|||||||
// Use floating point division here for higher precision (instead of Millisecond method).
|
// Use floating point division here for higher precision (instead of Millisecond method).
|
||||||
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
|
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
|
||||||
|
|
||||||
h.semconv.RecordMetrics(ctx, semconv.MetricData{
|
h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{
|
||||||
ServerName: h.server,
|
ServerName: h.server,
|
||||||
|
ResponseSize: bytesWritten,
|
||||||
|
MetricAttributes: semconv.MetricAttributes{
|
||||||
Req: r,
|
Req: r,
|
||||||
StatusCode: statusCode,
|
StatusCode: statusCode,
|
||||||
AdditionalAttributes: labeler.Get(),
|
AdditionalAttributes: labeler.Get(),
|
||||||
|
},
|
||||||
|
MetricData: semconv.MetricData{
|
||||||
RequestSize: bw.BytesRead(),
|
RequestSize: bw.BytesRead(),
|
||||||
ResponseSize: bytesWritten,
|
|
||||||
ElapsedTime: elapsedTime,
|
ElapsedTime: elapsedTime,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,7 +44,9 @@ func (w *RespWriterWrapper) Write(p []byte) (int, error) {
|
|||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
defer w.mu.Unlock()
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
if !w.wroteHeader {
|
||||||
w.writeHeader(http.StatusOK)
|
w.writeHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
n, err := w.ResponseWriter.Write(p)
|
n, err := w.ResponseWriter.Write(p)
|
||||||
n1 := int64(n)
|
n1 := int64(n)
|
||||||
@ -80,7 +82,12 @@ func (w *RespWriterWrapper) writeHeader(statusCode int) {
|
|||||||
|
|
||||||
// Flush implements [http.Flusher].
|
// Flush implements [http.Flusher].
|
||||||
func (w *RespWriterWrapper) Flush() {
|
func (w *RespWriterWrapper) Flush() {
|
||||||
w.WriteHeader(http.StatusOK)
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
if !w.wroteHeader {
|
||||||
|
w.writeHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
if f, ok := w.ResponseWriter.(http.Flusher); ok {
|
if f, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||||
f.Flush()
|
f.Flush()
|
||||||
|
@ -83,18 +83,26 @@ func (s HTTPServer) Status(code int) (codes.Code, string) {
|
|||||||
return codes.Unset, ""
|
return codes.Unset, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
type MetricData struct {
|
type ServerMetricData struct {
|
||||||
ServerName string
|
ServerName string
|
||||||
|
ResponseSize int64
|
||||||
|
|
||||||
|
MetricData
|
||||||
|
MetricAttributes
|
||||||
|
}
|
||||||
|
|
||||||
|
type MetricAttributes struct {
|
||||||
Req *http.Request
|
Req *http.Request
|
||||||
StatusCode int
|
StatusCode int
|
||||||
AdditionalAttributes []attribute.KeyValue
|
AdditionalAttributes []attribute.KeyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
type MetricData struct {
|
||||||
RequestSize int64
|
RequestSize int64
|
||||||
ResponseSize int64
|
|
||||||
ElapsedTime float64
|
ElapsedTime float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) {
|
func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
|
||||||
if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil {
|
if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil {
|
||||||
// This will happen if an HTTPServer{} is used insted of NewHTTPServer.
|
// This will happen if an HTTPServer{} is used insted of NewHTTPServer.
|
||||||
return
|
return
|
||||||
@ -102,7 +110,7 @@ func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) {
|
|||||||
|
|
||||||
attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
|
attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
|
||||||
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
|
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
|
||||||
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
|
addOpts := []metric.AddOption{o}
|
||||||
s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...)
|
s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...)
|
||||||
s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...)
|
s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...)
|
||||||
s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
|
s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
|
||||||
@ -122,11 +130,20 @@ func NewHTTPServer(meter metric.Meter) HTTPServer {
|
|||||||
|
|
||||||
type HTTPClient struct {
|
type HTTPClient struct {
|
||||||
duplicate bool
|
duplicate bool
|
||||||
|
|
||||||
|
// old metrics
|
||||||
|
requestBytesCounter metric.Int64Counter
|
||||||
|
responseBytesCounter metric.Int64Counter
|
||||||
|
latencyMeasure metric.Float64Histogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHTTPClient() HTTPClient {
|
func NewHTTPClient(meter metric.Meter) HTTPClient {
|
||||||
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
|
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
|
||||||
return HTTPClient{duplicate: env == "http/dup"}
|
client := HTTPClient{
|
||||||
|
duplicate: env == "http/dup",
|
||||||
|
}
|
||||||
|
client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter)
|
||||||
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
|
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
|
||||||
@ -163,3 +180,48 @@ func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
|
|||||||
|
|
||||||
return attribute.KeyValue{}
|
return attribute.KeyValue{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MetricOpts struct {
|
||||||
|
measurement metric.MeasurementOption
|
||||||
|
addOptions metric.AddOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o MetricOpts) MeasurementOption() metric.MeasurementOption {
|
||||||
|
return o.measurement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o MetricOpts) AddOptions() metric.AddOption {
|
||||||
|
return o.addOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts {
|
||||||
|
attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
|
||||||
|
// TODO: Duplicate Metrics
|
||||||
|
set := metric.WithAttributeSet(attribute.NewSet(attributes...))
|
||||||
|
return MetricOpts{
|
||||||
|
measurement: set,
|
||||||
|
addOptions: set,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) {
|
||||||
|
if s.requestBytesCounter == nil || s.latencyMeasure == nil {
|
||||||
|
// This will happen if an HTTPClient{} is used insted of NewHTTPClient().
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions())
|
||||||
|
s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption())
|
||||||
|
|
||||||
|
// TODO: Duplicate Metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) {
|
||||||
|
if s.responseBytesCounter == nil {
|
||||||
|
// This will happen if an HTTPClient{} is used insted of NewHTTPClient().
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.responseBytesCounter.Add(ctx, responseData, opts)
|
||||||
|
// TODO: Duplicate Metrics
|
||||||
|
}
|
||||||
|
@ -144,7 +144,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status
|
|||||||
|
|
||||||
attributes := slices.Grow(additionalAttributes, n)
|
attributes := slices.Grow(additionalAttributes, n)
|
||||||
attributes = append(attributes,
|
attributes = append(attributes,
|
||||||
o.methodMetric(req.Method),
|
standardizeHTTPMethodMetric(req.Method),
|
||||||
o.scheme(req.TLS != nil),
|
o.scheme(req.TLS != nil),
|
||||||
semconv.NetHostName(host))
|
semconv.NetHostName(host))
|
||||||
|
|
||||||
@ -164,16 +164,6 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status
|
|||||||
return attributes
|
return attributes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue {
|
|
||||||
method = strings.ToUpper(method)
|
|
||||||
switch method {
|
|
||||||
case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
|
|
||||||
default:
|
|
||||||
method = "_OTHER"
|
|
||||||
}
|
|
||||||
return semconv.HTTPMethod(method)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
|
func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
|
||||||
if https {
|
if https {
|
||||||
return semconv.HTTPSchemeHTTPS
|
return semconv.HTTPSchemeHTTPS
|
||||||
@ -190,3 +180,95 @@ func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue
|
|||||||
func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
|
func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
|
||||||
return semconvutil.HTTPClientResponse(resp)
|
return semconvutil.HTTPClientResponse(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
|
||||||
|
/* The following semantic conventions are returned if present:
|
||||||
|
http.method string
|
||||||
|
http.status_code int
|
||||||
|
net.peer.name string
|
||||||
|
net.peer.port int
|
||||||
|
*/
|
||||||
|
|
||||||
|
n := 2 // method, peer name.
|
||||||
|
var h string
|
||||||
|
if req.URL != nil {
|
||||||
|
h = req.URL.Host
|
||||||
|
}
|
||||||
|
var requestHost string
|
||||||
|
var requestPort int
|
||||||
|
for _, hostport := range []string{h, req.Header.Get("Host")} {
|
||||||
|
requestHost, requestPort = splitHostPort(hostport)
|
||||||
|
if requestHost != "" || requestPort > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
|
||||||
|
if port > 0 {
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
|
||||||
|
if statusCode > 0 {
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes := slices.Grow(additionalAttributes, n)
|
||||||
|
attributes = append(attributes,
|
||||||
|
standardizeHTTPMethodMetric(req.Method),
|
||||||
|
semconv.NetPeerName(requestHost),
|
||||||
|
)
|
||||||
|
|
||||||
|
if port > 0 {
|
||||||
|
attributes = append(attributes, semconv.NetPeerPort(port))
|
||||||
|
}
|
||||||
|
|
||||||
|
if statusCode > 0 {
|
||||||
|
attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
|
||||||
|
}
|
||||||
|
return attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client HTTP metrics.
|
||||||
|
const (
|
||||||
|
clientRequestSize = "http.client.request.size" // Incoming request bytes total
|
||||||
|
clientResponseSize = "http.client.response.size" // Incoming response bytes total
|
||||||
|
clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
|
||||||
|
)
|
||||||
|
|
||||||
|
func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
|
||||||
|
if meter == nil {
|
||||||
|
return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
|
||||||
|
}
|
||||||
|
requestBytesCounter, err := meter.Int64Counter(
|
||||||
|
clientRequestSize,
|
||||||
|
metric.WithUnit("By"),
|
||||||
|
metric.WithDescription("Measures the size of HTTP request messages."),
|
||||||
|
)
|
||||||
|
handleErr(err)
|
||||||
|
|
||||||
|
responseBytesCounter, err := meter.Int64Counter(
|
||||||
|
clientResponseSize,
|
||||||
|
metric.WithUnit("By"),
|
||||||
|
metric.WithDescription("Measures the size of HTTP response messages."),
|
||||||
|
)
|
||||||
|
handleErr(err)
|
||||||
|
|
||||||
|
latencyMeasure, err := meter.Float64Histogram(
|
||||||
|
clientDuration,
|
||||||
|
metric.WithUnit("ms"),
|
||||||
|
metric.WithDescription("Measures the duration of outbound HTTP requests."),
|
||||||
|
)
|
||||||
|
handleErr(err)
|
||||||
|
|
||||||
|
return requestBytesCounter, responseBytesCounter, latencyMeasure
|
||||||
|
}
|
||||||
|
|
||||||
|
func standardizeHTTPMethodMetric(method string) attribute.KeyValue {
|
||||||
|
method = strings.ToUpper(method)
|
||||||
|
switch method {
|
||||||
|
case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
|
||||||
|
default:
|
||||||
|
method = "_OTHER"
|
||||||
|
}
|
||||||
|
return semconv.HTTPMethod(method)
|
||||||
|
}
|
||||||
|
29
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
generated
vendored
Normal file
29
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type startTimeContextKeyType int
|
||||||
|
|
||||||
|
const startTimeContextKey startTimeContextKeyType = 0
|
||||||
|
|
||||||
|
// ContextWithStartTime returns a new context with the provided start time. The
|
||||||
|
// start time will be used for metrics and traces emitted by the
|
||||||
|
// instrumentation. Only one labeller can be injected into the context.
|
||||||
|
// Injecting it multiple times will override the previous calls.
|
||||||
|
func ContextWithStartTime(parent context.Context, start time.Time) context.Context {
|
||||||
|
return context.WithValue(parent, startTimeContextKey, start)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartTimeFromContext retrieves a time.Time from the provided context if one
|
||||||
|
// is available. If no start time was found in the provided context, a new,
|
||||||
|
// zero start time is returned and the second return value is false.
|
||||||
|
func StartTimeFromContext(ctx context.Context) time.Time {
|
||||||
|
t, _ := ctx.Value(startTimeContextKey).(time.Time)
|
||||||
|
return t
|
||||||
|
}
|
54
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
generated
vendored
54
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
generated
vendored
@ -13,11 +13,9 @@ import (
|
|||||||
|
|
||||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
||||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
||||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
|
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/codes"
|
"go.opentelemetry.io/otel/codes"
|
||||||
"go.opentelemetry.io/otel/metric"
|
|
||||||
"go.opentelemetry.io/otel/propagation"
|
"go.opentelemetry.io/otel/propagation"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/trace"
|
"go.opentelemetry.io/otel/trace"
|
||||||
@ -29,7 +27,6 @@ type Transport struct {
|
|||||||
rt http.RoundTripper
|
rt http.RoundTripper
|
||||||
|
|
||||||
tracer trace.Tracer
|
tracer trace.Tracer
|
||||||
meter metric.Meter
|
|
||||||
propagators propagation.TextMapPropagator
|
propagators propagation.TextMapPropagator
|
||||||
spanStartOptions []trace.SpanStartOption
|
spanStartOptions []trace.SpanStartOption
|
||||||
filters []Filter
|
filters []Filter
|
||||||
@ -38,9 +35,6 @@ type Transport struct {
|
|||||||
metricAttributesFn func(*http.Request) []attribute.KeyValue
|
metricAttributesFn func(*http.Request) []attribute.KeyValue
|
||||||
|
|
||||||
semconv semconv.HTTPClient
|
semconv semconv.HTTPClient
|
||||||
requestBytesCounter metric.Int64Counter
|
|
||||||
responseBytesCounter metric.Int64Counter
|
|
||||||
latencyMeasure metric.Float64Histogram
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ http.RoundTripper = &Transport{}
|
var _ http.RoundTripper = &Transport{}
|
||||||
@ -58,7 +52,6 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
|
|||||||
|
|
||||||
t := Transport{
|
t := Transport{
|
||||||
rt: base,
|
rt: base,
|
||||||
semconv: semconv.NewHTTPClient(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultOpts := []Option{
|
defaultOpts := []Option{
|
||||||
@ -68,46 +61,21 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
|
|||||||
|
|
||||||
c := newConfig(append(defaultOpts, opts...)...)
|
c := newConfig(append(defaultOpts, opts...)...)
|
||||||
t.applyConfig(c)
|
t.applyConfig(c)
|
||||||
t.createMeasures()
|
|
||||||
|
|
||||||
return &t
|
return &t
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) applyConfig(c *config) {
|
func (t *Transport) applyConfig(c *config) {
|
||||||
t.tracer = c.Tracer
|
t.tracer = c.Tracer
|
||||||
t.meter = c.Meter
|
|
||||||
t.propagators = c.Propagators
|
t.propagators = c.Propagators
|
||||||
t.spanStartOptions = c.SpanStartOptions
|
t.spanStartOptions = c.SpanStartOptions
|
||||||
t.filters = c.Filters
|
t.filters = c.Filters
|
||||||
t.spanNameFormatter = c.SpanNameFormatter
|
t.spanNameFormatter = c.SpanNameFormatter
|
||||||
t.clientTrace = c.ClientTrace
|
t.clientTrace = c.ClientTrace
|
||||||
|
t.semconv = semconv.NewHTTPClient(c.Meter)
|
||||||
t.metricAttributesFn = c.MetricAttributesFn
|
t.metricAttributesFn = c.MetricAttributesFn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) createMeasures() {
|
|
||||||
var err error
|
|
||||||
t.requestBytesCounter, err = t.meter.Int64Counter(
|
|
||||||
clientRequestSize,
|
|
||||||
metric.WithUnit("By"),
|
|
||||||
metric.WithDescription("Measures the size of HTTP request messages."),
|
|
||||||
)
|
|
||||||
handleErr(err)
|
|
||||||
|
|
||||||
t.responseBytesCounter, err = t.meter.Int64Counter(
|
|
||||||
clientResponseSize,
|
|
||||||
metric.WithUnit("By"),
|
|
||||||
metric.WithDescription("Measures the size of HTTP response messages."),
|
|
||||||
)
|
|
||||||
handleErr(err)
|
|
||||||
|
|
||||||
t.latencyMeasure, err = t.meter.Float64Histogram(
|
|
||||||
clientDuration,
|
|
||||||
metric.WithUnit("ms"),
|
|
||||||
metric.WithDescription("Measures the duration of outbound HTTP requests."),
|
|
||||||
)
|
|
||||||
handleErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultTransportFormatter(_ string, r *http.Request) string {
|
func defaultTransportFormatter(_ string, r *http.Request) string {
|
||||||
return "HTTP " + r.Method
|
return "HTTP " + r.Method
|
||||||
}
|
}
|
||||||
@ -177,16 +145,15 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// metrics
|
// metrics
|
||||||
metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...)
|
metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{
|
||||||
if res.StatusCode > 0 {
|
Req: r,
|
||||||
metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode))
|
StatusCode: res.StatusCode,
|
||||||
}
|
AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
|
||||||
o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...))
|
})
|
||||||
|
|
||||||
t.requestBytesCounter.Add(ctx, bw.BytesRead(), o)
|
|
||||||
// For handling response bytes we leverage a callback when the client reads the http response
|
// For handling response bytes we leverage a callback when the client reads the http response
|
||||||
readRecordFunc := func(n int64) {
|
readRecordFunc := func(n int64) {
|
||||||
t.responseBytesCounter.Add(ctx, n, o)
|
t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions())
|
||||||
}
|
}
|
||||||
|
|
||||||
// traces
|
// traces
|
||||||
@ -198,9 +165,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
|||||||
// Use floating point division here for higher precision (instead of Millisecond method).
|
// Use floating point division here for higher precision (instead of Millisecond method).
|
||||||
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
|
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
|
||||||
|
|
||||||
t.latencyMeasure.Record(ctx, elapsedTime, o)
|
t.semconv.RecordMetrics(ctx, semconv.MetricData{
|
||||||
|
RequestSize: bw.BytesRead(),
|
||||||
|
ElapsedTime: elapsedTime,
|
||||||
|
}, metricOpts)
|
||||||
|
|
||||||
return res, err
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
|
func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
|
||||||
|
2
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
generated
vendored
2
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
generated
vendored
@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
|
|||||||
|
|
||||||
// Version is the current release version of the otelhttp instrumentation.
|
// Version is the current release version of the otelhttp instrumentation.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "0.54.0"
|
return "0.57.0"
|
||||||
// This string is updated by the pre_release.sh script during release
|
// This string is updated by the pre_release.sh script during release
|
||||||
}
|
}
|
||||||
|
|
||||||
|
8
vendor/go.opentelemetry.io/otel/.gitignore
generated
vendored
8
vendor/go.opentelemetry.io/otel/.gitignore
generated
vendored
@ -12,11 +12,3 @@ go.work
|
|||||||
go.work.sum
|
go.work.sum
|
||||||
|
|
||||||
gen/
|
gen/
|
||||||
|
|
||||||
/example/dice/dice
|
|
||||||
/example/namedtracer/namedtracer
|
|
||||||
/example/otel-collector/otel-collector
|
|
||||||
/example/opencensus/opencensus
|
|
||||||
/example/passthrough/passthrough
|
|
||||||
/example/prometheus/prometheus
|
|
||||||
/example/zipkin/zipkin
|
|
||||||
|
13
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
13
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
@ -25,6 +25,7 @@ linters:
|
|||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- tenv
|
- tenv
|
||||||
|
- testifylint
|
||||||
- typecheck
|
- typecheck
|
||||||
- unconvert
|
- unconvert
|
||||||
- unused
|
- unused
|
||||||
@ -64,12 +65,12 @@ issues:
|
|||||||
- path: _test\.go
|
- path: _test\.go
|
||||||
linters:
|
linters:
|
||||||
- gosec
|
- gosec
|
||||||
# Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
# Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||||
# as we commonly use it in tests and examples.
|
# as we commonly use it in tests and examples.
|
||||||
- text: "G404:"
|
- text: "G404:"
|
||||||
linters:
|
linters:
|
||||||
- gosec
|
- gosec
|
||||||
# Igonoring gosec G402: TLS MinVersion too low
|
# Ignoring gosec G402: TLS MinVersion too low
|
||||||
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
|
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
|
||||||
- text: "G402: TLS MinVersion too low."
|
- text: "G402: TLS MinVersion too low."
|
||||||
linters:
|
linters:
|
||||||
@ -126,8 +127,6 @@ linters-settings:
|
|||||||
- "**/metric/**/*.go"
|
- "**/metric/**/*.go"
|
||||||
- "**/bridge/*.go"
|
- "**/bridge/*.go"
|
||||||
- "**/bridge/**/*.go"
|
- "**/bridge/**/*.go"
|
||||||
- "**/example/*.go"
|
|
||||||
- "**/example/**/*.go"
|
|
||||||
- "**/trace/*.go"
|
- "**/trace/*.go"
|
||||||
- "**/trace/**/*.go"
|
- "**/trace/**/*.go"
|
||||||
- "**/log/*.go"
|
- "**/log/*.go"
|
||||||
@ -302,3 +301,9 @@ linters-settings:
|
|||||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
|
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
|
||||||
- name: waitgroup-by-value
|
- name: waitgroup-by-value
|
||||||
disabled: false
|
disabled: false
|
||||||
|
testifylint:
|
||||||
|
enable-all: true
|
||||||
|
disable:
|
||||||
|
- float-compare
|
||||||
|
- go-require
|
||||||
|
- require-error
|
||||||
|
103
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
103
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
@ -11,6 +11,100 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
|||||||
<!-- Released section -->
|
<!-- Released section -->
|
||||||
<!-- Don't change this section unless doing release -->
|
<!-- Don't change this section unless doing release -->
|
||||||
|
|
||||||
|
## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850)
|
||||||
|
- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
|
||||||
|
- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
|
||||||
|
- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
|
||||||
|
- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861)
|
||||||
|
- The `go.opentelemetry.io/otel/semconv/v1.27.0` package.
|
||||||
|
The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894)
|
||||||
|
- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903)
|
||||||
|
- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924)
|
||||||
|
- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926)
|
||||||
|
- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925)
|
||||||
|
- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931)
|
||||||
|
- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911)
|
||||||
|
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915)
|
||||||
|
- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
|
||||||
|
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944)
|
||||||
|
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944)
|
||||||
|
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944)
|
||||||
|
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944)
|
||||||
|
- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930)
|
||||||
|
|
||||||
|
## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
|
||||||
|
- Add `WithExportBufferSize` option to log batch processor.(#5877)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
|
||||||
|
- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
|
||||||
|
- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
|
||||||
|
- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
|
||||||
|
- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
|
||||||
|
- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
|
||||||
|
- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
|
||||||
|
- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
|
||||||
|
- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
|
||||||
|
- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
|
||||||
|
- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
|
||||||
|
- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
|
||||||
|
|
||||||
|
## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
|
||||||
|
- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
|
||||||
|
- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
|
||||||
|
- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
|
||||||
|
- Fix panic on instruments creation when setting meter provider. (#5758)
|
||||||
|
- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
- Drop support for [Go 1.21]. (#5736, #5740, #5800)
|
||||||
|
|
||||||
## [1.29.0/0.51.0/0.5.0] 2024-08-23
|
## [1.29.0/0.51.0/0.5.0] 2024-08-23
|
||||||
|
|
||||||
This release is the last to support [Go 1.21].
|
This release is the last to support [Go 1.21].
|
||||||
@ -1895,7 +1989,7 @@ with major version 0.
|
|||||||
- Setting error status while recording error with Span from oteltest package. (#1729)
|
- Setting error status while recording error with Span from oteltest package. (#1729)
|
||||||
- The concept of a remote and local Span stored in a context is unified to just the current Span.
|
- The concept of a remote and local Span stored in a context is unified to just the current Span.
|
||||||
Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
|
Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
|
||||||
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
|
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
|
||||||
If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
|
If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
|
||||||
- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
|
- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
|
||||||
This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
|
This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
|
||||||
@ -2469,7 +2563,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco
|
|||||||
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
|
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
|
||||||
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
|
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
|
||||||
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
|
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
|
||||||
- Update otel-colector example to use the v0.5.0 collector. (#915)
|
- Update otel-collector example to use the v0.5.0 collector. (#915)
|
||||||
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
|
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
|
||||||
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
|
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
|
||||||
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
|
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
|
||||||
@ -3062,7 +3156,10 @@ It contains api and sdk for trace and meter.
|
|||||||
- CircleCI build CI manifest files.
|
- CircleCI build CI manifest files.
|
||||||
- CODEOWNERS file to track owners of this project.
|
- CODEOWNERS file to track owners of this project.
|
||||||
|
|
||||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD
|
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD
|
||||||
|
[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0
|
||||||
|
[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
|
||||||
|
[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
|
||||||
[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
|
[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
|
||||||
[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
|
[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
|
||||||
[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
|
[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
|
||||||
|
4
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
4
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
@ -12,6 +12,6 @@
|
|||||||
# https://help.github.com/en/articles/about-code-owners
|
# https://help.github.com/en/articles/about-code-owners
|
||||||
#
|
#
|
||||||
|
|
||||||
* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
|
* @MrAlias @XSAM @dashpole @pellared @dmathieu
|
||||||
|
|
||||||
CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu
|
CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
|
||||||
|
24
vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
generated
vendored
24
vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
generated
vendored
@ -578,7 +578,10 @@ See also:
|
|||||||
The tests should never leak goroutines.
|
The tests should never leak goroutines.
|
||||||
|
|
||||||
Use the term `ConcurrentSafe` in the test name when it aims to verify the
|
Use the term `ConcurrentSafe` in the test name when it aims to verify the
|
||||||
absence of race conditions.
|
absence of race conditions. The top-level tests with this term will be run
|
||||||
|
many times in the `test-concurrent-safe` CI job to increase the chance of
|
||||||
|
catching concurrency issues. This does not apply to subtests when this term
|
||||||
|
is not in their root name.
|
||||||
|
|
||||||
### Internal packages
|
### Internal packages
|
||||||
|
|
||||||
@ -626,13 +629,14 @@ should be canceled.
|
|||||||
|
|
||||||
## Approvers and Maintainers
|
## Approvers and Maintainers
|
||||||
|
|
||||||
### Approvers
|
### Triagers
|
||||||
|
|
||||||
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
|
- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
|
||||||
|
|
||||||
|
### Approvers
|
||||||
|
|
||||||
### Maintainers
|
### Maintainers
|
||||||
|
|
||||||
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
|
|
||||||
- [Damien Mathieu](https://github.com/dmathieu), Elastic
|
- [Damien Mathieu](https://github.com/dmathieu), Elastic
|
||||||
- [David Ashpole](https://github.com/dashpole), Google
|
- [David Ashpole](https://github.com/dashpole), Google
|
||||||
- [Robert Pająk](https://github.com/pellared), Splunk
|
- [Robert Pająk](https://github.com/pellared), Splunk
|
||||||
@ -641,11 +645,13 @@ should be canceled.
|
|||||||
|
|
||||||
### Emeritus
|
### Emeritus
|
||||||
|
|
||||||
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
|
- [Aaron Clawson](https://github.com/MadVikingGod)
|
||||||
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
|
- [Anthony Mirabella](https://github.com/Aneurysm9)
|
||||||
- [Josh MacDonald](https://github.com/jmacd), LightStep
|
- [Chester Cheung](https://github.com/hanyuancheung)
|
||||||
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
|
- [Evan Torrie](https://github.com/evantorrie)
|
||||||
- [Evan Torrie](https://github.com/evantorrie), Yahoo
|
- [Gustavo Silva Paiva](https://github.com/paivagustavo)
|
||||||
|
- [Josh MacDonald](https://github.com/jmacd)
|
||||||
|
- [Liz Fong-Jones](https://github.com/lizthegrey)
|
||||||
|
|
||||||
### Become an Approver or a Maintainer
|
### Become an Approver or a Maintainer
|
||||||
|
|
||||||
|
11
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
11
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
|
|||||||
PORTO = $(TOOLS)/porto
|
PORTO = $(TOOLS)/porto
|
||||||
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
|
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
|
||||||
|
|
||||||
GOJQ = $(TOOLS)/gojq
|
|
||||||
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
|
|
||||||
|
|
||||||
GOTMPL = $(TOOLS)/gotmpl
|
GOTMPL = $(TOOLS)/gotmpl
|
||||||
$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
|
$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
|
||||||
|
|
||||||
@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
|
|||||||
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
|
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
|
||||||
|
|
||||||
.PHONY: tools
|
.PHONY: tools
|
||||||
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
|
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
|
||||||
|
|
||||||
# Virtualized python tools via docker
|
# Virtualized python tools via docker
|
||||||
|
|
||||||
@ -145,12 +142,14 @@ build-tests/%:
|
|||||||
|
|
||||||
# Tests
|
# Tests
|
||||||
|
|
||||||
TEST_TARGETS := test-default test-bench test-short test-verbose test-race
|
TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
|
||||||
.PHONY: $(TEST_TARGETS) test
|
.PHONY: $(TEST_TARGETS) test
|
||||||
test-default test-race: ARGS=-race
|
test-default test-race: ARGS=-race
|
||||||
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
|
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
|
||||||
test-short: ARGS=-short
|
test-short: ARGS=-short
|
||||||
test-verbose: ARGS=-v -race
|
test-verbose: ARGS=-v -race
|
||||||
|
test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
|
||||||
|
test-concurrent-safe: TIMEOUT=120
|
||||||
$(TEST_TARGETS): test
|
$(TEST_TARGETS): test
|
||||||
test: $(OTEL_GO_MOD_DIRS:%=test/%)
|
test: $(OTEL_GO_MOD_DIRS:%=test/%)
|
||||||
test/%: DIR=$*
|
test/%: DIR=$*
|
||||||
@ -261,7 +260,7 @@ SEMCONVPKG ?= "semconv/"
|
|||||||
semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
|
semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
|
||||||
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
|
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
|
||||||
[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
|
[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
|
||||||
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
|
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
|
||||||
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
|
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
|
||||||
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
|
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
|
||||||
|
|
||||||
|
11
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
11
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
@ -51,25 +51,18 @@ Currently, this project supports the following environments.
|
|||||||
|----------|------------|--------------|
|
|----------|------------|--------------|
|
||||||
| Ubuntu | 1.23 | amd64 |
|
| Ubuntu | 1.23 | amd64 |
|
||||||
| Ubuntu | 1.22 | amd64 |
|
| Ubuntu | 1.22 | amd64 |
|
||||||
| Ubuntu | 1.21 | amd64 |
|
|
||||||
| Ubuntu | 1.23 | 386 |
|
| Ubuntu | 1.23 | 386 |
|
||||||
| Ubuntu | 1.22 | 386 |
|
| Ubuntu | 1.22 | 386 |
|
||||||
| Ubuntu | 1.21 | 386 |
|
|
||||||
| Linux | 1.23 | arm64 |
|
| Linux | 1.23 | arm64 |
|
||||||
| Linux | 1.22 | arm64 |
|
| Linux | 1.22 | arm64 |
|
||||||
| Linux | 1.21 | arm64 |
|
|
||||||
| macOS 13 | 1.23 | amd64 |
|
| macOS 13 | 1.23 | amd64 |
|
||||||
| macOS 13 | 1.22 | amd64 |
|
| macOS 13 | 1.22 | amd64 |
|
||||||
| macOS 13 | 1.21 | amd64 |
|
|
||||||
| macOS | 1.23 | arm64 |
|
| macOS | 1.23 | arm64 |
|
||||||
| macOS | 1.22 | arm64 |
|
| macOS | 1.22 | arm64 |
|
||||||
| macOS | 1.21 | arm64 |
|
|
||||||
| Windows | 1.23 | amd64 |
|
| Windows | 1.23 | amd64 |
|
||||||
| Windows | 1.22 | amd64 |
|
| Windows | 1.22 | amd64 |
|
||||||
| Windows | 1.21 | amd64 |
|
|
||||||
| Windows | 1.23 | 386 |
|
| Windows | 1.23 | 386 |
|
||||||
| Windows | 1.22 | 386 |
|
| Windows | 1.22 | 386 |
|
||||||
| Windows | 1.21 | 386 |
|
|
||||||
|
|
||||||
While this project should work for other systems, no compatibility guarantees
|
While this project should work for other systems, no compatibility guarantees
|
||||||
are made for those systems currently.
|
are made for those systems currently.
|
||||||
@ -96,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want
|
|||||||
to build your own instrumentation for your application directly you will need
|
to build your own instrumentation for your application directly you will need
|
||||||
to use the
|
to use the
|
||||||
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
|
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
|
||||||
package. The included [examples](./example/) are a good way to see some
|
package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples)
|
||||||
practical uses of this process.
|
are a good way to see some practical uses of this process.
|
||||||
|
|
||||||
### Export
|
### Export
|
||||||
|
|
||||||
|
11
vendor/go.opentelemetry.io/otel/RELEASING.md
generated
vendored
11
vendor/go.opentelemetry.io/otel/RELEASING.md
generated
vendored
@ -111,17 +111,6 @@ It is critical you make sure the version you push upstream is correct.
|
|||||||
Finally create a Release for the new `<new tag>` on GitHub.
|
Finally create a Release for the new `<new tag>` on GitHub.
|
||||||
The release body should include all the release notes from the Changelog for this release.
|
The release body should include all the release notes from the Changelog for this release.
|
||||||
|
|
||||||
## Verify Examples
|
|
||||||
|
|
||||||
After releasing verify that examples build outside of the repository.
|
|
||||||
|
|
||||||
```
|
|
||||||
./verify_examples.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
|
|
||||||
This ensures they build with the published release, not the local copy.
|
|
||||||
|
|
||||||
## Post-Release
|
## Post-Release
|
||||||
|
|
||||||
### Contrib Repository
|
### Contrib Repository
|
||||||
|
40
vendor/go.opentelemetry.io/otel/attribute/set.go
generated
vendored
40
vendor/go.opentelemetry.io/otel/attribute/set.go
generated
vendored
@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct {
|
|||||||
func computeDistinctFixed(kvs []KeyValue) interface{} {
|
func computeDistinctFixed(kvs []KeyValue) interface{} {
|
||||||
switch len(kvs) {
|
switch len(kvs) {
|
||||||
case 1:
|
case 1:
|
||||||
ptr := new([1]KeyValue)
|
return [1]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 2:
|
case 2:
|
||||||
ptr := new([2]KeyValue)
|
return [2]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 3:
|
case 3:
|
||||||
ptr := new([3]KeyValue)
|
return [3]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 4:
|
case 4:
|
||||||
ptr := new([4]KeyValue)
|
return [4]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 5:
|
case 5:
|
||||||
ptr := new([5]KeyValue)
|
return [5]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 6:
|
case 6:
|
||||||
ptr := new([6]KeyValue)
|
return [6]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 7:
|
case 7:
|
||||||
ptr := new([7]KeyValue)
|
return [7]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 8:
|
case 8:
|
||||||
ptr := new([8]KeyValue)
|
return [8]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 9:
|
case 9:
|
||||||
ptr := new([9]KeyValue)
|
return [9]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
case 10:
|
case 10:
|
||||||
ptr := new([10]KeyValue)
|
return [10]KeyValue(kvs)
|
||||||
copy((*ptr)[:], kvs)
|
|
||||||
return *ptr
|
|
||||||
default:
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
6
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
6
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
@ -50,7 +50,7 @@ type Property struct {
|
|||||||
// component boundaries may impose their own restrictions on Property key.
|
// component boundaries may impose their own restrictions on Property key.
|
||||||
// For example, the W3C Baggage specification restricts the Property keys to strings that
|
// For example, the W3C Baggage specification restricts the Property keys to strings that
|
||||||
// satisfy the token definition from RFC7230, Section 3.2.6.
|
// satisfy the token definition from RFC7230, Section 3.2.6.
|
||||||
// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key.
|
// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
|
||||||
func NewKeyProperty(key string) (Property, error) {
|
func NewKeyProperty(key string) (Property, error) {
|
||||||
if !validateBaggageName(key) {
|
if !validateBaggageName(key) {
|
||||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
@ -90,7 +90,7 @@ func NewKeyValueProperty(key, value string) (Property, error) {
|
|||||||
// component boundaries may impose their own restrictions on Property key.
|
// component boundaries may impose their own restrictions on Property key.
|
||||||
// For example, the W3C Baggage specification restricts the Property keys to strings that
|
// For example, the W3C Baggage specification restricts the Property keys to strings that
|
||||||
// satisfy the token definition from RFC7230, Section 3.2.6.
|
// satisfy the token definition from RFC7230, Section 3.2.6.
|
||||||
// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key.
|
// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
|
||||||
func NewKeyValuePropertyRaw(key, value string) (Property, error) {
|
func NewKeyValuePropertyRaw(key, value string) (Property, error) {
|
||||||
if !validateBaggageName(key) {
|
if !validateBaggageName(key) {
|
||||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
@ -287,7 +287,7 @@ func NewMember(key, value string, props ...Property) (Member, error) {
|
|||||||
// component boundaries may impose their own restrictions on baggage key.
|
// component boundaries may impose their own restrictions on baggage key.
|
||||||
// For example, the W3C Baggage specification restricts the baggage keys to strings that
|
// For example, the W3C Baggage specification restricts the baggage keys to strings that
|
||||||
// satisfy the token definition from RFC7230, Section 3.2.6.
|
// satisfy the token definition from RFC7230, Section 3.2.6.
|
||||||
// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key.
|
// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key.
|
||||||
func NewMemberRaw(key, value string, props ...Property) (Member, error) {
|
func NewMemberRaw(key, value string, props ...Property) (Member, error) {
|
||||||
m := Member{
|
m := Member{
|
||||||
key: key,
|
key: key,
|
||||||
|
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
3
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md
generated
vendored
Normal file
3
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# OTLP Log gRPC Exporter
|
||||||
|
|
||||||
|
[](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc)
|
258
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go
generated
vendored
Normal file
258
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go
generated
vendored
Normal file
@ -0,0 +1,258 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/backoff"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/encoding/gzip"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry"
|
||||||
|
collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1"
|
||||||
|
logpb "go.opentelemetry.io/proto/otlp/logs/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The methods of this type are not expected to be called concurrently.
|
||||||
|
type client struct {
|
||||||
|
metadata metadata.MD
|
||||||
|
exportTimeout time.Duration
|
||||||
|
requestFunc retry.RequestFunc
|
||||||
|
|
||||||
|
// ourConn keeps track of where conn was created: true if created here in
|
||||||
|
// NewClient, or false if passed with an option. This is important on
|
||||||
|
// Shutdown as conn should only be closed if we created it. Otherwise,
|
||||||
|
// it is up to the processes that passed conn to close it.
|
||||||
|
ourConn bool
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
lsc collogpb.LogsServiceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used for testing.
|
||||||
|
var newGRPCClientFn = grpc.NewClient
|
||||||
|
|
||||||
|
// newClient creates a new gRPC log client.
|
||||||
|
func newClient(cfg config) (*client, error) {
|
||||||
|
c := &client{
|
||||||
|
exportTimeout: cfg.timeout.Value,
|
||||||
|
requestFunc: cfg.retryCfg.Value.RequestFunc(retryable),
|
||||||
|
conn: cfg.gRPCConn.Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cfg.headers.Value) > 0 {
|
||||||
|
c.metadata = metadata.New(cfg.headers.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.conn == nil {
|
||||||
|
// If the caller did not provide a ClientConn when the client was
|
||||||
|
// created, create one using the configuration they did provide.
|
||||||
|
dialOpts := newGRPCDialOptions(cfg)
|
||||||
|
|
||||||
|
conn, err := newGRPCClientFn(cfg.endpoint.Value, dialOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Keep track that we own the lifecycle of this conn and need to close
|
||||||
|
// it on Shutdown.
|
||||||
|
c.ourConn = true
|
||||||
|
c.conn = conn
|
||||||
|
}
|
||||||
|
|
||||||
|
c.lsc = collogpb.NewLogsServiceClient(c.conn)
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGRPCDialOptions(cfg config) []grpc.DialOption {
|
||||||
|
userAgent := "OTel Go OTLP over gRPC logs exporter/" + Version()
|
||||||
|
dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)}
|
||||||
|
dialOpts = append(dialOpts, cfg.dialOptions.Value...)
|
||||||
|
|
||||||
|
// Convert other grpc configs to the dial options.
|
||||||
|
// Service config
|
||||||
|
if cfg.serviceConfig.Value != "" {
|
||||||
|
dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(cfg.serviceConfig.Value))
|
||||||
|
}
|
||||||
|
// Prioritize GRPCCredentials over Insecure (passing both is an error).
|
||||||
|
if cfg.gRPCCredentials.Value != nil {
|
||||||
|
dialOpts = append(dialOpts, grpc.WithTransportCredentials(cfg.gRPCCredentials.Value))
|
||||||
|
} else if cfg.insecure.Value {
|
||||||
|
dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
|
} else {
|
||||||
|
// Default to using the host's root CA.
|
||||||
|
dialOpts = append(dialOpts, grpc.WithTransportCredentials(
|
||||||
|
credentials.NewTLS(nil),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
// Compression
|
||||||
|
if cfg.compression.Value == GzipCompression {
|
||||||
|
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
|
||||||
|
}
|
||||||
|
// Reconnection period
|
||||||
|
if cfg.reconnectionPeriod.Value != 0 {
|
||||||
|
p := grpc.ConnectParams{
|
||||||
|
Backoff: backoff.DefaultConfig,
|
||||||
|
MinConnectTimeout: cfg.reconnectionPeriod.Value,
|
||||||
|
}
|
||||||
|
dialOpts = append(dialOpts, grpc.WithConnectParams(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
return dialOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadLogs sends proto logs to connected endpoint.
|
||||||
|
//
|
||||||
|
// Retryable errors from the server will be handled according to any
|
||||||
|
// RetryConfig the client was created with.
|
||||||
|
//
|
||||||
|
// The otlplog.Exporter synchronizes access to client methods, and
|
||||||
|
// ensures this is not called after the Exporter is shutdown. Only thing
|
||||||
|
// to do here is send data.
|
||||||
|
func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Do not upload if the context is already expired.
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := c.exportContext(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
return c.requestFunc(ctx, func(ctx context.Context) error {
|
||||||
|
resp, err := c.lsc.Export(ctx, &collogpb.ExportLogsServiceRequest{
|
||||||
|
ResourceLogs: rl,
|
||||||
|
})
|
||||||
|
if resp != nil && resp.PartialSuccess != nil {
|
||||||
|
msg := resp.PartialSuccess.GetErrorMessage()
|
||||||
|
n := resp.PartialSuccess.GetRejectedLogRecords()
|
||||||
|
if n != 0 || msg != "" {
|
||||||
|
err := fmt.Errorf("OTLP partial success: %s (%d log records rejected)", msg, n)
|
||||||
|
otel.Handle(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// nil is converted to OK.
|
||||||
|
if status.Code(err) == codes.OK {
|
||||||
|
// Success.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the client, freeing all resources.
|
||||||
|
//
|
||||||
|
// Any active connections to a remote endpoint are closed if they were created
|
||||||
|
// by the client. Any gRPC connection passed during creation using
|
||||||
|
// WithGRPCConn will not be closed. It is the caller's responsibility to
|
||||||
|
// handle cleanup of that resource.
|
||||||
|
//
|
||||||
|
// The otlplog.Exporter synchronizes access to client methods and
|
||||||
|
// ensures this is called only once. The only thing that needs to be done
|
||||||
|
// here is to release any computational resources the client holds.
|
||||||
|
func (c *client) Shutdown(ctx context.Context) error {
|
||||||
|
c.metadata = nil
|
||||||
|
c.requestFunc = nil
|
||||||
|
c.lsc = nil
|
||||||
|
|
||||||
|
// Release the connection if we created it.
|
||||||
|
err := ctx.Err()
|
||||||
|
if c.ourConn {
|
||||||
|
closeErr := c.conn.Close()
|
||||||
|
// A context timeout error takes precedence over this error.
|
||||||
|
if err == nil && closeErr != nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.conn = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// exportContext returns a copy of parent with an appropriate deadline and
|
||||||
|
// cancellation function based on the clients configured export timeout.
|
||||||
|
//
|
||||||
|
// It is the callers responsibility to cancel the returned context once its
|
||||||
|
// use is complete, via the parent or directly with the returned CancelFunc, to
|
||||||
|
// ensure all resources are correctly released.
|
||||||
|
func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
|
||||||
|
var (
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
)
|
||||||
|
|
||||||
|
if c.exportTimeout > 0 {
|
||||||
|
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
|
||||||
|
} else {
|
||||||
|
ctx, cancel = context.WithCancel(parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.metadata.Len() > 0 {
|
||||||
|
md := c.metadata
|
||||||
|
if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||||
|
md = metadata.Join(md, outMD)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx, cancel
|
||||||
|
}
|
||||||
|
|
||||||
|
type noopClient struct{}
|
||||||
|
|
||||||
|
func newNoopClient() *noopClient {
|
||||||
|
return &noopClient{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil }
|
||||||
|
|
||||||
|
func (c *noopClient) Shutdown(context.Context) error { return nil }
|
||||||
|
|
||||||
|
// retryable returns if err identifies a request that can be retried and a
|
||||||
|
// duration to wait for if an explicit throttle time is included in err.
|
||||||
|
func retryable(err error) (bool, time.Duration) {
|
||||||
|
s := status.Convert(err)
|
||||||
|
return retryableGRPCStatus(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
|
||||||
|
switch s.Code() {
|
||||||
|
case codes.Canceled,
|
||||||
|
codes.DeadlineExceeded,
|
||||||
|
codes.Aborted,
|
||||||
|
codes.OutOfRange,
|
||||||
|
codes.Unavailable,
|
||||||
|
codes.DataLoss:
|
||||||
|
// Additionally, handle RetryInfo.
|
||||||
|
_, d := throttleDelay(s)
|
||||||
|
return true, d
|
||||||
|
case codes.ResourceExhausted:
|
||||||
|
// Retry only if the server signals that the recovery from resource exhaustion is possible.
|
||||||
|
return throttleDelay(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a retry-able error.
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// throttleDelay returns if the status is RetryInfo
|
||||||
|
// and the duration to wait for if an explicit throttle time is included.
|
||||||
|
func throttleDelay(s *status.Status) (bool, time.Duration) {
|
||||||
|
for _, detail := range s.Details() {
|
||||||
|
if t, ok := detail.(*errdetails.RetryInfo); ok {
|
||||||
|
return true, t.RetryDelay.AsDuration()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, 0
|
||||||
|
}
|
653
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go
generated
vendored
Normal file
653
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go
generated
vendored
Normal file
@ -0,0 +1,653 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry"
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Default values.
|
||||||
|
var (
|
||||||
|
defaultEndpoint = "localhost:4317"
|
||||||
|
defaultTimeout = 10 * time.Second
|
||||||
|
defaultRetryCfg = retry.DefaultConfig
|
||||||
|
)
|
||||||
|
|
||||||
|
// Environment variable keys.
|
||||||
|
var (
|
||||||
|
envEndpoint = []string{
|
||||||
|
"OTEL_EXPORTER_OTLP_LOGS_ENDPOINT",
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT",
|
||||||
|
}
|
||||||
|
envInsecure = []string{
|
||||||
|
"OTEL_EXPORTER_OTLP_LOGS_INSECURE",
|
||||||
|
"OTEL_EXPORTER_OTLP_INSECURE",
|
||||||
|
}
|
||||||
|
|
||||||
|
envHeaders = []string{
|
||||||
|
"OTEL_EXPORTER_OTLP_LOGS_HEADERS",
|
||||||
|
"OTEL_EXPORTER_OTLP_HEADERS",
|
||||||
|
}
|
||||||
|
|
||||||
|
envCompression = []string{
|
||||||
|
"OTEL_EXPORTER_OTLP_LOGS_COMPRESSION",
|
||||||
|
"OTEL_EXPORTER_OTLP_COMPRESSION",
|
||||||
|
}
|
||||||
|
|
||||||
|
envTimeout = []string{
|
||||||
|
"OTEL_EXPORTER_OTLP_LOGS_TIMEOUT",
|
||||||
|
"OTEL_EXPORTER_OTLP_TIMEOUT",
|
||||||
|
}
|
||||||
|
|
||||||
|
envTLSCert = []string{
|
||||||
|
"OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE",
|
||||||
|
"OTEL_EXPORTER_OTLP_CERTIFICATE",
|
||||||
|
}
|
||||||
|
envTLSClient = []struct {
|
||||||
|
Certificate string
|
||||||
|
Key string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE",
|
||||||
|
"OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE",
|
||||||
|
"OTEL_EXPORTER_OTLP_CLIENT_KEY",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type fnOpt func(config) config
|
||||||
|
|
||||||
|
func (f fnOpt) applyOption(c config) config { return f(c) }
|
||||||
|
|
||||||
|
// Option applies an option to the Exporter.
|
||||||
|
type Option interface {
|
||||||
|
applyOption(config) config
|
||||||
|
}
|
||||||
|
|
||||||
|
type config struct {
|
||||||
|
endpoint setting[string]
|
||||||
|
insecure setting[bool]
|
||||||
|
tlsCfg setting[*tls.Config]
|
||||||
|
headers setting[map[string]string]
|
||||||
|
compression setting[Compression]
|
||||||
|
timeout setting[time.Duration]
|
||||||
|
retryCfg setting[retry.Config]
|
||||||
|
|
||||||
|
// gRPC configurations
|
||||||
|
gRPCCredentials setting[credentials.TransportCredentials]
|
||||||
|
serviceConfig setting[string]
|
||||||
|
reconnectionPeriod setting[time.Duration]
|
||||||
|
dialOptions setting[[]grpc.DialOption]
|
||||||
|
gRPCConn setting[*grpc.ClientConn]
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConfig(options []Option) config {
|
||||||
|
var c config
|
||||||
|
for _, opt := range options {
|
||||||
|
c = opt.applyOption(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply environment value and default value
|
||||||
|
c.endpoint = c.endpoint.Resolve(
|
||||||
|
getEnv[string](envEndpoint, convEndpoint),
|
||||||
|
fallback[string](defaultEndpoint),
|
||||||
|
)
|
||||||
|
c.insecure = c.insecure.Resolve(
|
||||||
|
loadInsecureFromEnvEndpoint(envEndpoint),
|
||||||
|
getEnv[bool](envInsecure, convInsecure),
|
||||||
|
)
|
||||||
|
c.tlsCfg = c.tlsCfg.Resolve(
|
||||||
|
loadEnvTLS[*tls.Config](),
|
||||||
|
)
|
||||||
|
c.headers = c.headers.Resolve(
|
||||||
|
getEnv[map[string]string](envHeaders, convHeaders),
|
||||||
|
)
|
||||||
|
c.compression = c.compression.Resolve(
|
||||||
|
getEnv[Compression](envCompression, convCompression),
|
||||||
|
)
|
||||||
|
c.timeout = c.timeout.Resolve(
|
||||||
|
getEnv[time.Duration](envTimeout, convDuration),
|
||||||
|
fallback[time.Duration](defaultTimeout),
|
||||||
|
)
|
||||||
|
c.retryCfg = c.retryCfg.Resolve(
|
||||||
|
fallback[retry.Config](defaultRetryCfg),
|
||||||
|
)
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryConfig defines configuration for retrying the export of log data
|
||||||
|
// that failed.
|
||||||
|
//
|
||||||
|
// This configuration does not define any network retry strategy. That is
|
||||||
|
// entirely handled by the gRPC ClientConn.
|
||||||
|
type RetryConfig retry.Config
|
||||||
|
|
||||||
|
// WithInsecure disables client transport security for the Exporter's gRPC
|
||||||
|
// connection, just like grpc.WithInsecure()
|
||||||
|
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
|
||||||
|
// environment variable is set, and this option is not passed, that variable
|
||||||
|
// value will be used to determine client security. If the endpoint has a
|
||||||
|
// scheme of "http" or "unix" client security will be disabled. If both are
|
||||||
|
// set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, client security will be used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithInsecure() Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.insecure = newSetting(true)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEndpoint sets the target endpoint the Exporter will connect to.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
|
||||||
|
// environment variable is set, and this option is not passed, that variable
|
||||||
|
// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
|
||||||
|
// will take precedence.
|
||||||
|
//
|
||||||
|
// If both this option and WithEndpointURL are used, the last used option will
|
||||||
|
// take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, "localhost:4317" will be used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithEndpoint(endpoint string) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.endpoint = newSetting(endpoint)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
|
||||||
|
// environment variable is set, and this option is not passed, that variable
|
||||||
|
// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
|
||||||
|
// will take precedence.
|
||||||
|
//
|
||||||
|
// If both this option and WithEndpoint are used, the last used option will
|
||||||
|
// take precedence.
|
||||||
|
//
|
||||||
|
// If an invalid URL is provided, the default value will be kept.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, "localhost:4317" will be used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithEndpointURL(rawURL string) Option {
|
||||||
|
u, err := url.Parse(rawURL)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "otlplog: parse endpoint url", "url", rawURL)
|
||||||
|
return fnOpt(func(c config) config { return c })
|
||||||
|
}
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.endpoint = newSetting(u.Host)
|
||||||
|
c.insecure = insecureFromScheme(c.insecure, u.Scheme)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithReconnectionPeriod set the minimum amount of time between connection
|
||||||
|
// attempts to the target endpoint.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithReconnectionPeriod(rp time.Duration) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.reconnectionPeriod = newSetting(rp)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compression describes the compression used for exported payloads.
|
||||||
|
type Compression int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NoCompression represents that no compression should be used.
|
||||||
|
NoCompression Compression = iota
|
||||||
|
// GzipCompression represents that gzip compression should be used.
|
||||||
|
GzipCompression
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithCompressor sets the compressor the gRPC client uses.
|
||||||
|
// Supported compressor values: "gzip".
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_COMPRESSION or
|
||||||
|
// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION environment variable is set, and
|
||||||
|
// this option is not passed, that variable value will be used. That value can
|
||||||
|
// be either "none" or "gzip". If both are set,
|
||||||
|
// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, no compression strategy will be used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithCompressor(compressor string) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.compression = newSetting(compressorToCompression(compressor))
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHeaders will send the provided headers with each gRPC requests.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_LOGS_HEADERS
|
||||||
|
// environment variable is set, and this option is not passed, that variable
|
||||||
|
// value will be used. The value will be parsed as a list of key value pairs.
|
||||||
|
// These pairs are expected to be in the W3C Correlation-Context format
|
||||||
|
// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
|
||||||
|
// both are set, OTEL_EXPORTER_OTLP_LOGS_HEADERS will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, no user headers will be set.
|
||||||
|
func WithHeaders(headers map[string]string) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.headers = newSetting(headers)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTLSCredentials sets the gRPC connection to use creds.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
|
||||||
|
// OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE environment variable is set, and
|
||||||
|
// this option is not passed, that variable value will be used. The value will
|
||||||
|
// be parsed the filepath of the TLS certificate chain to use. If both are
|
||||||
|
// set, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, no TLS credentials will be used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithTLSCredentials(credential credentials.TransportCredentials) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.gRPCCredentials = newSetting(credential)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithServiceConfig defines the default gRPC service config used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithServiceConfig(serviceConfig string) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.serviceConfig = newSetting(serviceConfig)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDialOption sets explicit grpc.DialOptions to use when establishing a
|
||||||
|
// gRPC connection. The options here are appended to the internal grpc.DialOptions
|
||||||
|
// used so they will take precedence over any other internal grpc.DialOptions
|
||||||
|
// they might conflict with.
|
||||||
|
// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
|
||||||
|
// grpc.DialOptions are ignored.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithDialOption(opts ...grpc.DialOption) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.dialOptions = newSetting(opts)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
|
||||||
|
//
|
||||||
|
// This option takes precedence over any other option that relates to
|
||||||
|
// establishing or persisting a gRPC connection to a target endpoint. Any
|
||||||
|
// other option of those types passed will be ignored.
|
||||||
|
//
|
||||||
|
// It is the callers responsibility to close the passed conn. The Exporter
|
||||||
|
// Shutdown method will not close this connection.
|
||||||
|
func WithGRPCConn(conn *grpc.ClientConn) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.gRPCConn = newSetting(conn)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTimeout sets the max amount of time an Exporter will attempt an export.
|
||||||
|
//
|
||||||
|
// This takes precedence over any retry settings defined by WithRetry. Once
|
||||||
|
// this time limit has been reached the export is abandoned and the log
|
||||||
|
// data is dropped.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_LOGS_TIMEOUT
|
||||||
|
// environment variable is set, and this option is not passed, that variable
|
||||||
|
// value will be used. The value will be parsed as an integer representing the
|
||||||
|
// timeout in milliseconds. If both are set,
|
||||||
|
// OTEL_EXPORTER_OTLP_LOGS_TIMEOUT will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, a timeout of 10 seconds will be used.
|
||||||
|
func WithTimeout(duration time.Duration) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.timeout = newSetting(duration)
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetry sets the retry policy for transient retryable errors that are
|
||||||
|
// returned by the target endpoint.
|
||||||
|
//
|
||||||
|
// If the target endpoint responds with not only a retryable error, but
|
||||||
|
// explicitly returns a backoff time in the response, that time will take
|
||||||
|
// precedence over these settings.
|
||||||
|
//
|
||||||
|
// These settings do not define any network retry strategy. That is entirely
|
||||||
|
// handled by the gRPC ClientConn.
|
||||||
|
//
|
||||||
|
// If unset, the default retry policy will be used. It will retry the export
|
||||||
|
// 5 seconds after receiving a retryable error and increase exponentially
|
||||||
|
// after each error for no more than a total time of 1 minute.
|
||||||
|
func WithRetry(rc RetryConfig) Option {
|
||||||
|
return fnOpt(func(c config) config {
|
||||||
|
c.retryCfg = newSetting(retry.Config(rc))
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// convCompression returns the parsed compression encoded in s. NoCompression
|
||||||
|
// and an errors are returned if s is unknown.
|
||||||
|
func convCompression(s string) (Compression, error) {
|
||||||
|
switch s {
|
||||||
|
case "gzip":
|
||||||
|
return GzipCompression, nil
|
||||||
|
case "none", "":
|
||||||
|
return NoCompression, nil
|
||||||
|
}
|
||||||
|
return NoCompression, fmt.Errorf("unknown compression: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convEndpoint converts s from a URL string to an endpoint if s is a valid
|
||||||
|
// URL. Otherwise, "" and an error are returned.
|
||||||
|
func convEndpoint(s string) (string, error) {
|
||||||
|
u, err := url.Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return u.Host, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// convInsecure converts s from string to bool without case sensitivity.
|
||||||
|
// If s is not valid returns error.
|
||||||
|
func convInsecure(s string) (bool, error) {
|
||||||
|
s = strings.ToLower(s)
|
||||||
|
if s != "true" && s != "false" {
|
||||||
|
return false, fmt.Errorf("can't convert %q to bool", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s == "true", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadInsecureFromEnvEndpoint returns a resolver that fetches
|
||||||
|
// insecure setting from envEndpoint is it possible.
|
||||||
|
func loadInsecureFromEnvEndpoint(envEndpoint []string) resolver[bool] {
|
||||||
|
return func(s setting[bool]) setting[bool] {
|
||||||
|
if s.Set {
|
||||||
|
// Passed, valid, options have precedence.
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range envEndpoint {
|
||||||
|
if vStr := os.Getenv(key); vStr != "" {
|
||||||
|
u, err := url.Parse(vStr)
|
||||||
|
if err != nil {
|
||||||
|
otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, vStr, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return insecureFromScheme(s, u.Scheme)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// convHeaders converts the OTel environment variable header value s into a
|
||||||
|
// mapping of header key to value. If s is invalid a partial result and error
|
||||||
|
// are returned.
|
||||||
|
func convHeaders(s string) (map[string]string, error) {
|
||||||
|
out := make(map[string]string)
|
||||||
|
var err error
|
||||||
|
for _, header := range strings.Split(s, ",") {
|
||||||
|
rawKey, rawVal, found := strings.Cut(header, "=")
|
||||||
|
if !found {
|
||||||
|
err = errors.Join(err, fmt.Errorf("invalid header: %s", header))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
escKey, e := url.PathUnescape(rawKey)
|
||||||
|
if e != nil {
|
||||||
|
err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key := strings.TrimSpace(escKey)
|
||||||
|
|
||||||
|
escVal, e := url.PathUnescape(rawVal)
|
||||||
|
if e != nil {
|
||||||
|
err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
val := strings.TrimSpace(escVal)
|
||||||
|
|
||||||
|
out[key] = val
|
||||||
|
}
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// convDuration converts s into a duration of milliseconds. If s does not
|
||||||
|
// contain an integer, 0 and an error are returned.
|
||||||
|
func convDuration(s string) (time.Duration, error) {
|
||||||
|
d, err := strconv.Atoi(s)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
// OTel durations are defined in milliseconds.
|
||||||
|
return time.Duration(d) * time.Millisecond, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadEnvTLS returns a resolver that loads a *tls.Config from files defined by
|
||||||
|
// the OTLP TLS environment variables. This will load both the rootCAs and
|
||||||
|
// certificates used for mTLS.
|
||||||
|
//
|
||||||
|
// If the filepath defined is invalid or does not contain valid TLS files, an
|
||||||
|
// error is passed to the OTel ErrorHandler and no TLS configuration is
|
||||||
|
// provided.
|
||||||
|
func loadEnvTLS[T *tls.Config]() resolver[T] {
|
||||||
|
return func(s setting[T]) setting[T] {
|
||||||
|
if s.Set {
|
||||||
|
// Passed, valid, options have precedence.
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
var rootCAs *x509.CertPool
|
||||||
|
var err error
|
||||||
|
for _, key := range envTLSCert {
|
||||||
|
if v := os.Getenv(key); v != "" {
|
||||||
|
rootCAs, err = loadCertPool(v)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var certs []tls.Certificate
|
||||||
|
for _, pair := range envTLSClient {
|
||||||
|
cert := os.Getenv(pair.Certificate)
|
||||||
|
key := os.Getenv(pair.Key)
|
||||||
|
if cert != "" && key != "" {
|
||||||
|
var e error
|
||||||
|
certs, e = loadCertificates(cert, key)
|
||||||
|
err = errors.Join(err, e)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("failed to load TLS: %w", err)
|
||||||
|
otel.Handle(err)
|
||||||
|
} else if rootCAs != nil || certs != nil {
|
||||||
|
s.Set = true
|
||||||
|
s.Value = &tls.Config{RootCAs: rootCAs, Certificates: certs}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readFile is used for testing.
|
||||||
|
var readFile = os.ReadFile
|
||||||
|
|
||||||
|
// loadCertPool loads and returns the *x509.CertPool found at path if it exists
|
||||||
|
// and is valid. Otherwise, nil and an error is returned.
|
||||||
|
func loadCertPool(path string) (*x509.CertPool, error) {
|
||||||
|
b, err := readFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cp := x509.NewCertPool()
|
||||||
|
if ok := cp.AppendCertsFromPEM(b); !ok {
|
||||||
|
return nil, errors.New("certificate not added")
|
||||||
|
}
|
||||||
|
return cp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadCertificates loads and returns the tls.Certificate found at path if it
|
||||||
|
// exists and is valid. Otherwise, nil and an error is returned.
|
||||||
|
func loadCertificates(certPath, keyPath string) ([]tls.Certificate, error) {
|
||||||
|
cert, err := readFile(certPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
key, err := readFile(keyPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
crt, err := tls.X509KeyPair(cert, key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []tls.Certificate{crt}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// insecureFromScheme return setting if the connection should
|
||||||
|
// use client transport security or not.
|
||||||
|
// Empty scheme doesn't force insecure setting.
|
||||||
|
func insecureFromScheme(prev setting[bool], scheme string) setting[bool] {
|
||||||
|
if scheme == "https" {
|
||||||
|
return newSetting(false)
|
||||||
|
} else if len(scheme) > 0 {
|
||||||
|
return newSetting(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
return prev
|
||||||
|
}
|
||||||
|
|
||||||
|
func compressorToCompression(compressor string) Compression {
|
||||||
|
c, err := convCompression(compressor)
|
||||||
|
if err != nil {
|
||||||
|
otel.Handle(fmt.Errorf("%w, using no compression as default", err))
|
||||||
|
return NoCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// setting is a configuration setting value.
|
||||||
|
type setting[T any] struct {
|
||||||
|
Value T
|
||||||
|
Set bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// newSetting returns a new setting with the value set.
|
||||||
|
func newSetting[T any](value T) setting[T] {
|
||||||
|
return setting[T]{Value: value, Set: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolver returns an updated setting after applying an resolution operation.
|
||||||
|
type resolver[T any] func(setting[T]) setting[T]
|
||||||
|
|
||||||
|
// Resolve returns a resolved version of s.
|
||||||
|
//
|
||||||
|
// It will apply all the passed fn in the order provided, chaining together the
|
||||||
|
// return setting to the next input. The setting s is used as the initial
|
||||||
|
// argument to the first fn.
|
||||||
|
//
|
||||||
|
// Each fn needs to validate if it should apply given the Set state of the
|
||||||
|
// setting. This will not perform any checks on the set state when chaining
|
||||||
|
// function.
|
||||||
|
func (s setting[T]) Resolve(fn ...resolver[T]) setting[T] {
|
||||||
|
for _, f := range fn {
|
||||||
|
s = f(s)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// getEnv returns a resolver that will apply an environment variable value
|
||||||
|
// associated with the first set key to a setting value. The conv function is
|
||||||
|
// used to convert between the environment variable value and the setting type.
|
||||||
|
//
|
||||||
|
// If the input setting to the resolver is set, the environment variable will
|
||||||
|
// not be applied.
|
||||||
|
//
|
||||||
|
// Any error returned from conv is sent to the OTel ErrorHandler and the
|
||||||
|
// setting will not be updated.
|
||||||
|
func getEnv[T any](keys []string, conv func(string) (T, error)) resolver[T] {
|
||||||
|
return func(s setting[T]) setting[T] {
|
||||||
|
if s.Set {
|
||||||
|
// Passed, valid, options have precedence.
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
if vStr := os.Getenv(key); vStr != "" {
|
||||||
|
v, err := conv(vStr)
|
||||||
|
if err == nil {
|
||||||
|
s.Value = v
|
||||||
|
s.Set = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, vStr, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fallback returns a resolve that will set a setting value to val if it is not
|
||||||
|
// already set.
|
||||||
|
//
|
||||||
|
// This is usually passed at the end of a resolver chain to ensure a default is
|
||||||
|
// applied if the setting has not already been set.
|
||||||
|
func fallback[T any](val T) resolver[T] {
|
||||||
|
return func(s setting[T]) setting[T] {
|
||||||
|
if !s.Set {
|
||||||
|
s.Value = val
|
||||||
|
s.Set = true
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
63
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go
generated
vendored
Normal file
63
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package otlploggrpc provides an OTLP log exporter using gRPC. The exporter uses gRPC to
|
||||||
|
transport OTLP protobuf payloads.
|
||||||
|
|
||||||
|
All Exporters must be created with [New].
|
||||||
|
|
||||||
|
The environment variables described below can be used for configuration.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT (default: "https://localhost:4317") -
|
||||||
|
target to which the exporter sends telemetry.
|
||||||
|
The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||||
|
The value must contain a scheme ("http" or "https") and host.
|
||||||
|
The value may additionally contain a port, and a path.
|
||||||
|
The value should not contain a query string or fragment.
|
||||||
|
OTEL_EXPORTER_OTLP_LOGS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
|
||||||
|
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_LOGS_INSECURE (default: "false") -
|
||||||
|
setting "true" disables client transport security for the exporter's gRPC connection.
|
||||||
|
You can use this only when an endpoint is provided without scheme.
|
||||||
|
OTEL_EXPORTER_OTLP_LOGS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
|
||||||
|
The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) -
|
||||||
|
key-value pairs used as gRPC metadata associated with gRPC requests.
|
||||||
|
The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
|
||||||
|
except that additional semi-colon delimited metadata is not supported.
|
||||||
|
Example value: "key1=value1,key2=value2".
|
||||||
|
OTEL_EXPORTER_OTLP_LOGS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
|
||||||
|
The configuration can be overridden by [WithHeaders] option.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT (default: "10000") -
|
||||||
|
maximum time in milliseconds the OTLP exporter waits for each batch export.
|
||||||
|
OTEL_EXPORTER_OTLP_LOGS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
|
||||||
|
The configuration can be overridden by [WithTimeout] option.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION (default: none) -
|
||||||
|
the gRPC compressor the exporter uses.
|
||||||
|
Supported value: "gzip".
|
||||||
|
OTEL_EXPORTER_OTLP_LOGS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
|
||||||
|
The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE (default: none) -
|
||||||
|
the filepath to the trusted certificate to use when verifying a server's TLS credentials.
|
||||||
|
OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
|
||||||
|
The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE (default: none) -
|
||||||
|
the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
|
||||||
|
OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
|
||||||
|
The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY (default: none) -
|
||||||
|
the filepath to the client's private key to use in mTLS communication in PEM format.
|
||||||
|
OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
|
||||||
|
The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
|
||||||
|
|
||||||
|
[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
|
||||||
|
*/
|
||||||
|
package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
|
93
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go
generated
vendored
Normal file
93
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform"
|
||||||
|
"go.opentelemetry.io/otel/sdk/log"
|
||||||
|
logpb "go.opentelemetry.io/proto/otlp/logs/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type logClient interface {
|
||||||
|
UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error
|
||||||
|
Shutdown(context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exporter is a OpenTelemetry log Exporter. It transports log data encoded as
|
||||||
|
// OTLP protobufs using gRPC.
|
||||||
|
// All Exporters must be created with [New].
|
||||||
|
type Exporter struct {
|
||||||
|
// Ensure synchronous access to the client across all functionality.
|
||||||
|
clientMu sync.Mutex
|
||||||
|
client logClient
|
||||||
|
|
||||||
|
stopped atomic.Bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile-time check Exporter implements [log.Exporter].
|
||||||
|
var _ log.Exporter = (*Exporter)(nil)
|
||||||
|
|
||||||
|
// New returns a new [Exporter].
|
||||||
|
//
|
||||||
|
// It is recommended to use it with a [BatchProcessor]
|
||||||
|
// or other processor exporting records asynchronously.
|
||||||
|
func New(_ context.Context, options ...Option) (*Exporter, error) {
|
||||||
|
cfg := newConfig(options)
|
||||||
|
c, err := newClient(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newExporter(c), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newExporter(c logClient) *Exporter {
|
||||||
|
var e Exporter
|
||||||
|
e.client = c
|
||||||
|
return &e
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformResourceLogs = transform.ResourceLogs
|
||||||
|
|
||||||
|
// Export transforms and transmits log records to an OTLP receiver.
|
||||||
|
//
|
||||||
|
// This method returns nil and drops records if called after Shutdown.
|
||||||
|
// This method returns an error if the method is canceled by the passed context.
|
||||||
|
func (e *Exporter) Export(ctx context.Context, records []log.Record) error {
|
||||||
|
if e.stopped.Load() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
otlp := transformResourceLogs(records)
|
||||||
|
if otlp == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
e.clientMu.Lock()
|
||||||
|
defer e.clientMu.Unlock()
|
||||||
|
return e.client.UploadLogs(ctx, otlp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the Exporter. Calls to Export or ForceFlush will perform
|
||||||
|
// no operation after this is called.
|
||||||
|
func (e *Exporter) Shutdown(ctx context.Context) error {
|
||||||
|
if e.stopped.Swap(true) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
e.clientMu.Lock()
|
||||||
|
defer e.clientMu.Unlock()
|
||||||
|
|
||||||
|
err := e.client.Shutdown(ctx)
|
||||||
|
e.client = newNoopClient()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForceFlush does nothing. The Exporter holds no state.
|
||||||
|
func (e *Exporter) ForceFlush(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
145
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go
generated
vendored
Normal file
145
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go
generated
vendored
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
// Package retry provides request retry functionality that can perform
|
||||||
|
// configurable exponential backoff for transient errors and honor any
|
||||||
|
// explicit throttle responses received.
|
||||||
|
package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cenkalti/backoff/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultConfig are the recommended defaults to use.
|
||||||
|
var DefaultConfig = Config{
|
||||||
|
Enabled: true,
|
||||||
|
InitialInterval: 5 * time.Second,
|
||||||
|
MaxInterval: 30 * time.Second,
|
||||||
|
MaxElapsedTime: time.Minute,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config defines configuration for retrying batches in case of export failure
|
||||||
|
// using an exponential backoff.
|
||||||
|
type Config struct {
|
||||||
|
// Enabled indicates whether to not retry sending batches in case of
|
||||||
|
// export failure.
|
||||||
|
Enabled bool
|
||||||
|
// InitialInterval the time to wait after the first failure before
|
||||||
|
// retrying.
|
||||||
|
InitialInterval time.Duration
|
||||||
|
// MaxInterval is the upper bound on backoff interval. Once this value is
|
||||||
|
// reached the delay between consecutive retries will always be
|
||||||
|
// `MaxInterval`.
|
||||||
|
MaxInterval time.Duration
|
||||||
|
// MaxElapsedTime is the maximum amount of time (including retries) spent
|
||||||
|
// trying to send a request/batch. Once this value is reached, the data
|
||||||
|
// is discarded.
|
||||||
|
MaxElapsedTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFunc wraps a request with retry logic.
|
||||||
|
type RequestFunc func(context.Context, func(context.Context) error) error
|
||||||
|
|
||||||
|
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
|
||||||
|
// duration should be honored that was included in the error.
|
||||||
|
//
|
||||||
|
// The function must return true if the error argument is retry-able,
|
||||||
|
// otherwise it must return false for the first return parameter.
|
||||||
|
//
|
||||||
|
// The function must return a non-zero time.Duration if the error contains
|
||||||
|
// explicit throttle duration that should be honored, otherwise it must return
|
||||||
|
// a zero valued time.Duration.
|
||||||
|
type EvaluateFunc func(error) (bool, time.Duration)
|
||||||
|
|
||||||
|
// RequestFunc returns a RequestFunc using the evaluate function to determine
|
||||||
|
// if requests can be retried and based on the exponential backoff
|
||||||
|
// configuration of c.
|
||||||
|
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||||
|
if !c.Enabled {
|
||||||
|
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||||
|
return fn(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||||
|
// Do not use NewExponentialBackOff since it calls Reset and the code here
|
||||||
|
// must call Reset after changing the InitialInterval (this saves an
|
||||||
|
// unnecessary call to Now).
|
||||||
|
b := &backoff.ExponentialBackOff{
|
||||||
|
InitialInterval: c.InitialInterval,
|
||||||
|
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||||
|
Multiplier: backoff.DefaultMultiplier,
|
||||||
|
MaxInterval: c.MaxInterval,
|
||||||
|
MaxElapsedTime: c.MaxElapsedTime,
|
||||||
|
Stop: backoff.Stop,
|
||||||
|
Clock: backoff.SystemClock,
|
||||||
|
}
|
||||||
|
b.Reset()
|
||||||
|
|
||||||
|
for {
|
||||||
|
err := fn(ctx)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
retryable, throttle := evaluate(err)
|
||||||
|
if !retryable {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bOff := b.NextBackOff()
|
||||||
|
if bOff == backoff.Stop {
|
||||||
|
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the greater of the backoff or throttle delay.
|
||||||
|
var delay time.Duration
|
||||||
|
if bOff > throttle {
|
||||||
|
delay = bOff
|
||||||
|
} else {
|
||||||
|
elapsed := b.GetElapsedTime()
|
||||||
|
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||||
|
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||||
|
}
|
||||||
|
delay = throttle
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||||
|
return fmt.Errorf("%w: %w", ctxErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow override for testing.
|
||||||
|
var waitFunc = wait
|
||||||
|
|
||||||
|
// wait takes the caller's context, and the amount of time to wait. It will
|
||||||
|
// return nil if the timer fires before or at the same time as the context's
|
||||||
|
// deadline. This indicates that the call can be retried.
|
||||||
|
func wait(ctx context.Context, delay time.Duration) error {
|
||||||
|
timer := time.NewTimer(delay)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Handle the case where the timer and context deadline end
|
||||||
|
// simultaneously by prioritizing the timer expiration nil value
|
||||||
|
// response.
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
default:
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
case <-timer.C:
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
390
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go
generated
vendored
Normal file
390
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go
generated
vendored
Normal file
@ -0,0 +1,390 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlplog/transform/log.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
// Package transform provides transformation functionality from the
|
||||||
|
// sdk/log data-types into OTLP data-types.
|
||||||
|
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
lpb "go.opentelemetry.io/proto/otlp/logs/v1"
|
||||||
|
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
api "go.opentelemetry.io/otel/log"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceLogs returns an slice of OTLP ResourceLogs generated from records.
|
||||||
|
func ResourceLogs(records []log.Record) []*lpb.ResourceLogs {
|
||||||
|
if len(records) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resMap := make(map[attribute.Distinct]*lpb.ResourceLogs)
|
||||||
|
|
||||||
|
type key struct {
|
||||||
|
r attribute.Distinct
|
||||||
|
is instrumentation.Scope
|
||||||
|
}
|
||||||
|
scopeMap := make(map[key]*lpb.ScopeLogs)
|
||||||
|
|
||||||
|
var resources int
|
||||||
|
for _, r := range records {
|
||||||
|
res := r.Resource()
|
||||||
|
rKey := res.Equivalent()
|
||||||
|
scope := r.InstrumentationScope()
|
||||||
|
k := key{
|
||||||
|
r: rKey,
|
||||||
|
is: scope,
|
||||||
|
}
|
||||||
|
sl, iOk := scopeMap[k]
|
||||||
|
if !iOk {
|
||||||
|
sl = new(lpb.ScopeLogs)
|
||||||
|
var emptyScope instrumentation.Scope
|
||||||
|
if scope != emptyScope {
|
||||||
|
sl.Scope = &cpb.InstrumentationScope{
|
||||||
|
Name: scope.Name,
|
||||||
|
Version: scope.Version,
|
||||||
|
Attributes: AttrIter(scope.Attributes.Iter()),
|
||||||
|
}
|
||||||
|
sl.SchemaUrl = scope.SchemaURL
|
||||||
|
}
|
||||||
|
scopeMap[k] = sl
|
||||||
|
}
|
||||||
|
|
||||||
|
sl.LogRecords = append(sl.LogRecords, LogRecord(r))
|
||||||
|
rl, rOk := resMap[rKey]
|
||||||
|
if !rOk {
|
||||||
|
resources++
|
||||||
|
rl = new(lpb.ResourceLogs)
|
||||||
|
if res.Len() > 0 {
|
||||||
|
rl.Resource = &rpb.Resource{
|
||||||
|
Attributes: AttrIter(res.Iter()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rl.SchemaUrl = res.SchemaURL()
|
||||||
|
resMap[rKey] = rl
|
||||||
|
}
|
||||||
|
if !iOk {
|
||||||
|
rl.ScopeLogs = append(rl.ScopeLogs, sl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform the categorized map into a slice
|
||||||
|
resLogs := make([]*lpb.ResourceLogs, 0, resources)
|
||||||
|
for _, rl := range resMap {
|
||||||
|
resLogs = append(resLogs, rl)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resLogs
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogRecord returns an OTLP LogRecord generated from record.
|
||||||
|
func LogRecord(record log.Record) *lpb.LogRecord {
|
||||||
|
r := &lpb.LogRecord{
|
||||||
|
TimeUnixNano: timeUnixNano(record.Timestamp()),
|
||||||
|
ObservedTimeUnixNano: timeUnixNano(record.ObservedTimestamp()),
|
||||||
|
SeverityNumber: SeverityNumber(record.Severity()),
|
||||||
|
SeverityText: record.SeverityText(),
|
||||||
|
Body: LogAttrValue(record.Body()),
|
||||||
|
Attributes: make([]*cpb.KeyValue, 0, record.AttributesLen()),
|
||||||
|
Flags: uint32(record.TraceFlags()),
|
||||||
|
// TODO: DroppedAttributesCount: /* ... */,
|
||||||
|
}
|
||||||
|
record.WalkAttributes(func(kv api.KeyValue) bool {
|
||||||
|
r.Attributes = append(r.Attributes, LogAttr(kv))
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if tID := record.TraceID(); tID.IsValid() {
|
||||||
|
r.TraceId = tID[:]
|
||||||
|
}
|
||||||
|
if sID := record.SpanID(); sID.IsValid() {
|
||||||
|
r.SpanId = sID[:]
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
|
||||||
|
// since January 1, 1970 UTC as uint64. The result is undefined if the Unix
|
||||||
|
// time in nanoseconds cannot be represented by an int64 (a date before the
|
||||||
|
// year 1678 or after 2262). timeUnixNano on the zero Time returns 0. The
|
||||||
|
// result does not depend on the location associated with t.
|
||||||
|
func timeUnixNano(t time.Time) uint64 {
|
||||||
|
nano := t.UnixNano()
|
||||||
|
if nano < 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return uint64(nano) // nolint:gosec // Overflow checked.
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttrIter transforms an [attribute.Iterator] into OTLP key-values.
|
||||||
|
func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
|
||||||
|
l := iter.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.KeyValue, 0, l)
|
||||||
|
for iter.Next() {
|
||||||
|
out = append(out, Attr(iter.Attribute()))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attrs transforms a slice of [attribute.KeyValue] into OTLP key-values.
|
||||||
|
func Attrs(attrs []attribute.KeyValue) []*cpb.KeyValue {
|
||||||
|
if len(attrs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.KeyValue, 0, len(attrs))
|
||||||
|
for _, kv := range attrs {
|
||||||
|
out = append(out, Attr(kv))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attr transforms an [attribute.KeyValue] into an OTLP key-value.
|
||||||
|
func Attr(kv attribute.KeyValue) *cpb.KeyValue {
|
||||||
|
return &cpb.KeyValue{Key: string(kv.Key), Value: AttrValue(kv.Value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttrValue transforms an [attribute.Value] into an OTLP AnyValue.
|
||||||
|
func AttrValue(v attribute.Value) *cpb.AnyValue {
|
||||||
|
av := new(cpb.AnyValue)
|
||||||
|
switch v.Type() {
|
||||||
|
case attribute.BOOL:
|
||||||
|
av.Value = &cpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v.AsBool(),
|
||||||
|
}
|
||||||
|
case attribute.BOOLSLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: boolSliceValues(v.AsBoolSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.INT64:
|
||||||
|
av.Value = &cpb.AnyValue_IntValue{
|
||||||
|
IntValue: v.AsInt64(),
|
||||||
|
}
|
||||||
|
case attribute.INT64SLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: int64SliceValues(v.AsInt64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64:
|
||||||
|
av.Value = &cpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v.AsFloat64(),
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64SLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.STRING:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: v.AsString(),
|
||||||
|
}
|
||||||
|
case attribute.STRINGSLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: stringSliceValues(v.AsStringSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: "INVALID",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return av
|
||||||
|
}
|
||||||
|
|
||||||
|
func boolSliceValues(vals []bool) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func int64SliceValues(vals []int64) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_IntValue{
|
||||||
|
IntValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func float64SliceValues(vals []float64) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attrs transforms a slice of [api.KeyValue] into OTLP key-values.
|
||||||
|
func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue {
|
||||||
|
if len(attrs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.KeyValue, 0, len(attrs))
|
||||||
|
for _, kv := range attrs {
|
||||||
|
out = append(out, LogAttr(kv))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogAttr transforms an [api.KeyValue] into an OTLP key-value.
|
||||||
|
func LogAttr(attr api.KeyValue) *cpb.KeyValue {
|
||||||
|
return &cpb.KeyValue{
|
||||||
|
Key: attr.Key,
|
||||||
|
Value: LogAttrValue(attr.Value),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogAttrValues transforms a slice of [api.Value] into an OTLP []AnyValue.
|
||||||
|
func LogAttrValues(vals []api.Value) []*cpb.AnyValue {
|
||||||
|
if len(vals) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.AnyValue, 0, len(vals))
|
||||||
|
for _, v := range vals {
|
||||||
|
out = append(out, LogAttrValue(v))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogAttrValue transforms an [api.Value] into an OTLP AnyValue.
|
||||||
|
func LogAttrValue(v api.Value) *cpb.AnyValue {
|
||||||
|
av := new(cpb.AnyValue)
|
||||||
|
switch v.Kind() {
|
||||||
|
case api.KindBool:
|
||||||
|
av.Value = &cpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v.AsBool(),
|
||||||
|
}
|
||||||
|
case api.KindInt64:
|
||||||
|
av.Value = &cpb.AnyValue_IntValue{
|
||||||
|
IntValue: v.AsInt64(),
|
||||||
|
}
|
||||||
|
case api.KindFloat64:
|
||||||
|
av.Value = &cpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v.AsFloat64(),
|
||||||
|
}
|
||||||
|
case api.KindString:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: v.AsString(),
|
||||||
|
}
|
||||||
|
case api.KindBytes:
|
||||||
|
av.Value = &cpb.AnyValue_BytesValue{
|
||||||
|
BytesValue: v.AsBytes(),
|
||||||
|
}
|
||||||
|
case api.KindSlice:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: LogAttrValues(v.AsSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case api.KindMap:
|
||||||
|
av.Value = &cpb.AnyValue_KvlistValue{
|
||||||
|
KvlistValue: &cpb.KeyValueList{
|
||||||
|
Values: LogAttrs(v.AsMap()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: "INVALID",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return av
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeverityNumber transforms a [log.Severity] into an OTLP SeverityNumber.
|
||||||
|
func SeverityNumber(s api.Severity) lpb.SeverityNumber {
|
||||||
|
switch s {
|
||||||
|
case api.SeverityTrace:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE
|
||||||
|
case api.SeverityTrace2:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE2
|
||||||
|
case api.SeverityTrace3:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE3
|
||||||
|
case api.SeverityTrace4:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE4
|
||||||
|
case api.SeverityDebug:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG
|
||||||
|
case api.SeverityDebug2:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG2
|
||||||
|
case api.SeverityDebug3:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG3
|
||||||
|
case api.SeverityDebug4:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG4
|
||||||
|
case api.SeverityInfo:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_INFO
|
||||||
|
case api.SeverityInfo2:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_INFO2
|
||||||
|
case api.SeverityInfo3:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_INFO3
|
||||||
|
case api.SeverityInfo4:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_INFO4
|
||||||
|
case api.SeverityWarn:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_WARN
|
||||||
|
case api.SeverityWarn2:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_WARN2
|
||||||
|
case api.SeverityWarn3:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_WARN3
|
||||||
|
case api.SeverityWarn4:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_WARN4
|
||||||
|
case api.SeverityError:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR
|
||||||
|
case api.SeverityError2:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR2
|
||||||
|
case api.SeverityError3:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR3
|
||||||
|
case api.SeverityError4:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR4
|
||||||
|
case api.SeverityFatal:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL
|
||||||
|
case api.SeverityFatal2:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL2
|
||||||
|
case api.SeverityFatal3:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL3
|
||||||
|
case api.SeverityFatal4:
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL4
|
||||||
|
}
|
||||||
|
return lpb.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED
|
||||||
|
}
|
9
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go
generated
vendored
Normal file
9
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc"
|
||||||
|
|
||||||
|
// Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use.
|
||||||
|
func Version() string {
|
||||||
|
return "0.8.0"
|
||||||
|
}
|
8
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go
generated
vendored
@ -183,11 +183,7 @@ func WithEndpointURL(rawURL string) Option {
|
|||||||
return fnOpt(func(c config) config {
|
return fnOpt(func(c config) config {
|
||||||
c.endpoint = newSetting(u.Host)
|
c.endpoint = newSetting(u.Host)
|
||||||
c.path = newSetting(u.Path)
|
c.path = newSetting(u.Path)
|
||||||
if u.Scheme != "https" {
|
c.insecure = newSetting(u.Scheme != "https")
|
||||||
c.insecure = newSetting(true)
|
|
||||||
} else {
|
|
||||||
c.insecure = newSetting(false)
|
|
||||||
}
|
|
||||||
return c
|
return c
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -377,7 +373,7 @@ func (s setting[T]) Resolve(fn ...resolver[T]) setting[T] {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadEnvTLS returns a resolver that loads a *tls.Config from files defeind by
|
// loadEnvTLS returns a resolver that loads a *tls.Config from files defined by
|
||||||
// the OTLP TLS environment variables. This will load both the rootCAs and
|
// the OTLP TLS environment variables. This will load both the rootCAs and
|
||||||
// certificates used for mTLS.
|
// certificates used for mTLS.
|
||||||
//
|
//
|
||||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go
generated
vendored
@ -22,7 +22,7 @@ target URL to which the exporter sends telemetry.
|
|||||||
The value must contain a scheme ("http" or "https") and host.
|
The value must contain a scheme ("http" or "https") and host.
|
||||||
The value may additionally contain a port and a path.
|
The value may additionally contain a port and a path.
|
||||||
The value should not contain a query string or fragment.
|
The value should not contain a query string or fragment.
|
||||||
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options.
|
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options.
|
||||||
|
|
||||||
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) -
|
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) -
|
||||||
key-value pairs used as headers associated with HTTP requests.
|
key-value pairs used as headers associated with HTTP requests.
|
||||||
|
109
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go
generated
vendored
109
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go
generated
vendored
@ -9,7 +9,6 @@
|
|||||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform"
|
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
@ -28,31 +27,42 @@ func ResourceLogs(records []log.Record) []*lpb.ResourceLogs {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resMap := resourceLogsMapPool.Get().(map[attribute.Distinct]*lpb.ResourceLogs)
|
resMap := make(map[attribute.Distinct]*lpb.ResourceLogs)
|
||||||
defer func() {
|
|
||||||
clear(resMap)
|
|
||||||
resourceLogsMapPool.Put(resMap)
|
|
||||||
}()
|
|
||||||
resourceLogsMap(&resMap, records)
|
|
||||||
|
|
||||||
out := make([]*lpb.ResourceLogs, 0, len(resMap))
|
type key struct {
|
||||||
for _, rl := range resMap {
|
r attribute.Distinct
|
||||||
out = append(out, rl)
|
is instrumentation.Scope
|
||||||
}
|
}
|
||||||
return out
|
scopeMap := make(map[key]*lpb.ScopeLogs)
|
||||||
}
|
|
||||||
|
|
||||||
var resourceLogsMapPool = sync.Pool{
|
var resources int
|
||||||
New: func() any {
|
|
||||||
return make(map[attribute.Distinct]*lpb.ResourceLogs)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceLogsMap(dst *map[attribute.Distinct]*lpb.ResourceLogs, records []log.Record) {
|
|
||||||
for _, r := range records {
|
for _, r := range records {
|
||||||
res := r.Resource()
|
res := r.Resource()
|
||||||
rl, ok := (*dst)[res.Equivalent()]
|
rKey := res.Equivalent()
|
||||||
if !ok {
|
scope := r.InstrumentationScope()
|
||||||
|
k := key{
|
||||||
|
r: rKey,
|
||||||
|
is: scope,
|
||||||
|
}
|
||||||
|
sl, iOk := scopeMap[k]
|
||||||
|
if !iOk {
|
||||||
|
sl = new(lpb.ScopeLogs)
|
||||||
|
var emptyScope instrumentation.Scope
|
||||||
|
if scope != emptyScope {
|
||||||
|
sl.Scope = &cpb.InstrumentationScope{
|
||||||
|
Name: scope.Name,
|
||||||
|
Version: scope.Version,
|
||||||
|
Attributes: AttrIter(scope.Attributes.Iter()),
|
||||||
|
}
|
||||||
|
sl.SchemaUrl = scope.SchemaURL
|
||||||
|
}
|
||||||
|
scopeMap[k] = sl
|
||||||
|
}
|
||||||
|
|
||||||
|
sl.LogRecords = append(sl.LogRecords, LogRecord(r))
|
||||||
|
rl, rOk := resMap[rKey]
|
||||||
|
if !rOk {
|
||||||
|
resources++
|
||||||
rl = new(lpb.ResourceLogs)
|
rl = new(lpb.ResourceLogs)
|
||||||
if res.Len() > 0 {
|
if res.Len() > 0 {
|
||||||
rl.Resource = &rpb.Resource{
|
rl.Resource = &rpb.Resource{
|
||||||
@ -60,52 +70,20 @@ func resourceLogsMap(dst *map[attribute.Distinct]*lpb.ResourceLogs, records []lo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
rl.SchemaUrl = res.SchemaURL()
|
rl.SchemaUrl = res.SchemaURL()
|
||||||
(*dst)[res.Equivalent()] = rl
|
resMap[rKey] = rl
|
||||||
|
}
|
||||||
|
if !iOk {
|
||||||
|
rl.ScopeLogs = append(rl.ScopeLogs, sl)
|
||||||
}
|
}
|
||||||
rl.ScopeLogs = ScopeLogs(records)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// ScopeLogs returns a slice of OTLP ScopeLogs generated from recoreds.
|
// Transform the categorized map into a slice
|
||||||
func ScopeLogs(records []log.Record) []*lpb.ScopeLogs {
|
resLogs := make([]*lpb.ResourceLogs, 0, resources)
|
||||||
scopeMap := scopeLogsMapPool.Get().(map[instrumentation.Scope]*lpb.ScopeLogs)
|
for _, rl := range resMap {
|
||||||
defer func() {
|
resLogs = append(resLogs, rl)
|
||||||
clear(scopeMap)
|
}
|
||||||
scopeLogsMapPool.Put(scopeMap)
|
|
||||||
}()
|
|
||||||
scopeLogsMap(&scopeMap, records)
|
|
||||||
|
|
||||||
out := make([]*lpb.ScopeLogs, 0, len(scopeMap))
|
return resLogs
|
||||||
for _, sl := range scopeMap {
|
|
||||||
out = append(out, sl)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
var scopeLogsMapPool = sync.Pool{
|
|
||||||
New: func() any {
|
|
||||||
return make(map[instrumentation.Scope]*lpb.ScopeLogs)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func scopeLogsMap(dst *map[instrumentation.Scope]*lpb.ScopeLogs, records []log.Record) {
|
|
||||||
for _, r := range records {
|
|
||||||
scope := r.InstrumentationScope()
|
|
||||||
sl, ok := (*dst)[scope]
|
|
||||||
if !ok {
|
|
||||||
sl = new(lpb.ScopeLogs)
|
|
||||||
var emptyScope instrumentation.Scope
|
|
||||||
if scope != emptyScope {
|
|
||||||
sl.Scope = &cpb.InstrumentationScope{
|
|
||||||
Name: scope.Name,
|
|
||||||
Version: scope.Version,
|
|
||||||
}
|
|
||||||
sl.SchemaUrl = scope.SchemaURL
|
|
||||||
}
|
|
||||||
(*dst)[scope] = sl
|
|
||||||
}
|
|
||||||
sl.LogRecords = append(sl.LogRecords, LogRecord(r))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogRecord returns an OTLP LogRecord generated from record.
|
// LogRecord returns an OTLP LogRecord generated from record.
|
||||||
@ -139,10 +117,11 @@ func LogRecord(record log.Record) *lpb.LogRecord {
|
|||||||
// year 1678 or after 2262). timeUnixNano on the zero Time returns 0. The
|
// year 1678 or after 2262). timeUnixNano on the zero Time returns 0. The
|
||||||
// result does not depend on the location associated with t.
|
// result does not depend on the location associated with t.
|
||||||
func timeUnixNano(t time.Time) uint64 {
|
func timeUnixNano(t time.Time) uint64 {
|
||||||
if t.IsZero() {
|
nano := t.UnixNano()
|
||||||
|
if nano < 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return uint64(t.UnixNano())
|
return uint64(nano) // nolint:gosec // Overflow checked.
|
||||||
}
|
}
|
||||||
|
|
||||||
// AttrIter transforms an [attribute.Iterator] into OTLP key-values.
|
// AttrIter transforms an [attribute.Iterator] into OTLP key-values.
|
||||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go
generated
vendored
@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o
|
|||||||
|
|
||||||
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use.
|
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "0.5.0"
|
return "0.8.0"
|
||||||
}
|
}
|
||||||
|
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
@ -155,7 +155,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
|
|||||||
}
|
}
|
||||||
|
|
||||||
if c.metadata.Len() > 0 {
|
if c.metadata.Len() > 0 {
|
||||||
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
|
md := c.metadata
|
||||||
|
if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||||
|
md = metadata.Join(md, outMD)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx, cancel
|
return ctx, cancel
|
||||||
|
@ -139,7 +139,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
|
|||||||
if cfg.ServiceConfig != "" {
|
if cfg.ServiceConfig != "" {
|
||||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||||
}
|
}
|
||||||
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
// Prioritize GRPCCredentials over Insecure (passing both is an error).
|
||||||
if cfg.Metrics.GRPCCredentials != nil {
|
if cfg.Metrics.GRPCCredentials != nil {
|
||||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
||||||
} else if cfg.Metrics.Insecure {
|
} else if cfg.Metrics.Insecure {
|
||||||
@ -287,9 +287,7 @@ func WithEndpointURL(v string) GenericOption {
|
|||||||
|
|
||||||
cfg.Metrics.Endpoint = u.Host
|
cfg.Metrics.Endpoint = u.Host
|
||||||
cfg.Metrics.URLPath = u.Path
|
cfg.Metrics.URLPath = u.Path
|
||||||
if u.Scheme != "https" {
|
cfg.Metrics.Insecure = u.Scheme != "https"
|
||||||
cfg.Metrics.Insecure = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
})
|
})
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||||
// a tls.Config that will use this certifate to verify a server certificate.
|
// a tls.Config that will use this certificate to verify a server certificate.
|
||||||
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||||
b, err := os.ReadFile(path)
|
b, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -48,6 +48,7 @@ func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
|||||||
Scope: &cpb.InstrumentationScope{
|
Scope: &cpb.InstrumentationScope{
|
||||||
Name: sm.Scope.Name,
|
Name: sm.Scope.Name,
|
||||||
Version: sm.Scope.Version,
|
Version: sm.Scope.Version,
|
||||||
|
Attributes: AttrIter(sm.Scope.Attributes.Iter()),
|
||||||
},
|
},
|
||||||
Metrics: ms,
|
Metrics: ms,
|
||||||
SchemaUrl: sm.Scope.SchemaURL,
|
SchemaUrl: sm.Scope.SchemaURL,
|
||||||
@ -83,13 +84,13 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
|||||||
}
|
}
|
||||||
switch a := m.Data.(type) {
|
switch a := m.Data.(type) {
|
||||||
case metricdata.Gauge[int64]:
|
case metricdata.Gauge[int64]:
|
||||||
out.Data = Gauge[int64](a)
|
out.Data = Gauge(a)
|
||||||
case metricdata.Gauge[float64]:
|
case metricdata.Gauge[float64]:
|
||||||
out.Data = Gauge[float64](a)
|
out.Data = Gauge(a)
|
||||||
case metricdata.Sum[int64]:
|
case metricdata.Sum[int64]:
|
||||||
out.Data, err = Sum[int64](a)
|
out.Data, err = Sum(a)
|
||||||
case metricdata.Sum[float64]:
|
case metricdata.Sum[float64]:
|
||||||
out.Data, err = Sum[float64](a)
|
out.Data, err = Sum(a)
|
||||||
case metricdata.Histogram[int64]:
|
case metricdata.Histogram[int64]:
|
||||||
out.Data, err = Histogram(a)
|
out.Data, err = Histogram(a)
|
||||||
case metricdata.Histogram[float64]:
|
case metricdata.Histogram[float64]:
|
||||||
@ -279,10 +280,7 @@ func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
|||||||
// timeUnixNano on the zero Time returns 0.
|
// timeUnixNano on the zero Time returns 0.
|
||||||
// The result does not depend on the location associated with t.
|
// The result does not depend on the location associated with t.
|
||||||
func timeUnixNano(t time.Time) uint64 {
|
func timeUnixNano(t time.Time) uint64 {
|
||||||
if t.IsZero() {
|
return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked.
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return uint64(t.UnixNano())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
|
// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
|
||||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
|
|||||||
|
|
||||||
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
|
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "1.29.0"
|
return "1.32.0"
|
||||||
}
|
}
|
||||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
generated
vendored
@ -22,7 +22,7 @@ target URL to which the exporter sends telemetry.
|
|||||||
The value must contain a scheme ("http" or "https") and host.
|
The value must contain a scheme ("http" or "https") and host.
|
||||||
The value may additionally contain a port and a path.
|
The value may additionally contain a port and a path.
|
||||||
The value should not contain a query string or fragment.
|
The value should not contain a query string or fragment.
|
||||||
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options.
|
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options.
|
||||||
|
|
||||||
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
|
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
|
||||||
key-value pairs used as headers associated with HTTP requests.
|
key-value pairs used as headers associated with HTTP requests.
|
||||||
|
@ -139,7 +139,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
|
|||||||
if cfg.ServiceConfig != "" {
|
if cfg.ServiceConfig != "" {
|
||||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||||
}
|
}
|
||||||
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
// Prioritize GRPCCredentials over Insecure (passing both is an error).
|
||||||
if cfg.Metrics.GRPCCredentials != nil {
|
if cfg.Metrics.GRPCCredentials != nil {
|
||||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
||||||
} else if cfg.Metrics.Insecure {
|
} else if cfg.Metrics.Insecure {
|
||||||
@ -287,9 +287,7 @@ func WithEndpointURL(v string) GenericOption {
|
|||||||
|
|
||||||
cfg.Metrics.Endpoint = u.Host
|
cfg.Metrics.Endpoint = u.Host
|
||||||
cfg.Metrics.URLPath = u.Path
|
cfg.Metrics.URLPath = u.Path
|
||||||
if u.Scheme != "https" {
|
cfg.Metrics.Insecure = u.Scheme != "https"
|
||||||
cfg.Metrics.Insecure = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
})
|
})
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||||
// a tls.Config that will use this certifate to verify a server certificate.
|
// a tls.Config that will use this certificate to verify a server certificate.
|
||||||
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||||
b, err := os.ReadFile(path)
|
b, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -48,6 +48,7 @@ func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
|||||||
Scope: &cpb.InstrumentationScope{
|
Scope: &cpb.InstrumentationScope{
|
||||||
Name: sm.Scope.Name,
|
Name: sm.Scope.Name,
|
||||||
Version: sm.Scope.Version,
|
Version: sm.Scope.Version,
|
||||||
|
Attributes: AttrIter(sm.Scope.Attributes.Iter()),
|
||||||
},
|
},
|
||||||
Metrics: ms,
|
Metrics: ms,
|
||||||
SchemaUrl: sm.Scope.SchemaURL,
|
SchemaUrl: sm.Scope.SchemaURL,
|
||||||
@ -83,13 +84,13 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
|||||||
}
|
}
|
||||||
switch a := m.Data.(type) {
|
switch a := m.Data.(type) {
|
||||||
case metricdata.Gauge[int64]:
|
case metricdata.Gauge[int64]:
|
||||||
out.Data = Gauge[int64](a)
|
out.Data = Gauge(a)
|
||||||
case metricdata.Gauge[float64]:
|
case metricdata.Gauge[float64]:
|
||||||
out.Data = Gauge[float64](a)
|
out.Data = Gauge(a)
|
||||||
case metricdata.Sum[int64]:
|
case metricdata.Sum[int64]:
|
||||||
out.Data, err = Sum[int64](a)
|
out.Data, err = Sum(a)
|
||||||
case metricdata.Sum[float64]:
|
case metricdata.Sum[float64]:
|
||||||
out.Data, err = Sum[float64](a)
|
out.Data, err = Sum(a)
|
||||||
case metricdata.Histogram[int64]:
|
case metricdata.Histogram[int64]:
|
||||||
out.Data, err = Histogram(a)
|
out.Data, err = Histogram(a)
|
||||||
case metricdata.Histogram[float64]:
|
case metricdata.Histogram[float64]:
|
||||||
@ -279,10 +280,7 @@ func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
|||||||
// timeUnixNano on the zero Time returns 0.
|
// timeUnixNano on the zero Time returns 0.
|
||||||
// The result does not depend on the location associated with t.
|
// The result does not depend on the location associated with t.
|
||||||
func timeUnixNano(t time.Time) uint64 {
|
func timeUnixNano(t time.Time) uint64 {
|
||||||
if t.IsZero() {
|
return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked.
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return uint64(t.UnixNano())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
|
// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
|
||||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
generated
vendored
@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
|
|||||||
|
|
||||||
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use.
|
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "1.29.0"
|
return "1.32.0"
|
||||||
}
|
}
|
||||||
|
@ -15,5 +15,6 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco
|
|||||||
return &commonpb.InstrumentationScope{
|
return &commonpb.InstrumentationScope{
|
||||||
Name: il.Name,
|
Name: il.Name,
|
||||||
Version: il.Version,
|
Version: il.Version,
|
||||||
|
Attributes: Iterator(il.Attributes.Iter()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -97,8 +97,8 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
|
|||||||
SpanId: sid[:],
|
SpanId: sid[:],
|
||||||
TraceState: sd.SpanContext().TraceState().String(),
|
TraceState: sd.SpanContext().TraceState().String(),
|
||||||
Status: status(sd.Status().Code, sd.Status().Description),
|
Status: status(sd.Status().Code, sd.Status().Description),
|
||||||
StartTimeUnixNano: uint64(sd.StartTime().UnixNano()),
|
StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked.
|
||||||
EndTimeUnixNano: uint64(sd.EndTime().UnixNano()),
|
EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked.
|
||||||
Links: links(sd.Links()),
|
Links: links(sd.Links()),
|
||||||
Kind: spanKind(sd.SpanKind()),
|
Kind: spanKind(sd.SpanKind()),
|
||||||
Name: sd.Name(),
|
Name: sd.Name(),
|
||||||
@ -178,7 +178,7 @@ func buildSpanFlags(sc trace.SpanContext) uint32 {
|
|||||||
flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
|
flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
|
||||||
}
|
}
|
||||||
|
|
||||||
return uint32(flags)
|
return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative
|
||||||
}
|
}
|
||||||
|
|
||||||
// spanEvents transforms span Events to an OTLP span events.
|
// spanEvents transforms span Events to an OTLP span events.
|
||||||
@ -192,7 +192,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
|
|||||||
for i := 0; i < len(es); i++ {
|
for i := 0; i < len(es); i++ {
|
||||||
events[i] = &tracepb.Span_Event{
|
events[i] = &tracepb.Span_Event{
|
||||||
Name: es[i].Name,
|
Name: es[i].Name,
|
||||||
TimeUnixNano: uint64(es[i].Time.UnixNano()),
|
TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked.
|
||||||
Attributes: KeyValues(es[i].Attributes),
|
Attributes: KeyValues(es[i].Attributes),
|
||||||
DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount),
|
DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount),
|
||||||
}
|
}
|
||||||
|
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
|
|||||||
}
|
}
|
||||||
|
|
||||||
if c.metadata.Len() > 0 {
|
if c.metadata.Len() > 0 {
|
||||||
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
|
md := c.metadata
|
||||||
|
if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||||
|
md = metadata.Join(md, outMD)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unify the client stopCtx with the parent.
|
// Unify the client stopCtx with the parent.
|
||||||
|
@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
|
|||||||
if cfg.ServiceConfig != "" {
|
if cfg.ServiceConfig != "" {
|
||||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||||
}
|
}
|
||||||
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
// Prioritize GRPCCredentials over Insecure (passing both is an error).
|
||||||
if cfg.Traces.GRPCCredentials != nil {
|
if cfg.Traces.GRPCCredentials != nil {
|
||||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
|
||||||
} else if cfg.Traces.Insecure {
|
} else if cfg.Traces.Insecure {
|
||||||
@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption {
|
|||||||
|
|
||||||
cfg.Traces.Endpoint = u.Host
|
cfg.Traces.Endpoint = u.Host
|
||||||
cfg.Traces.URLPath = u.Path
|
cfg.Traces.URLPath = u.Path
|
||||||
if u.Scheme != "https" {
|
cfg.Traces.Insecure = u.Scheme != "https"
|
||||||
cfg.Traces.Insecure = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
})
|
})
|
||||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go
generated
vendored
@ -22,7 +22,7 @@ target URL to which the exporter sends telemetry.
|
|||||||
The value must contain a scheme ("http" or "https") and host.
|
The value must contain a scheme ("http" or "https") and host.
|
||||||
The value may additionally contain a port and a path.
|
The value may additionally contain a port and a path.
|
||||||
The value should not contain a query string or fragment.
|
The value should not contain a query string or fragment.
|
||||||
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options.
|
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options.
|
||||||
|
|
||||||
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
|
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
|
||||||
key-value pairs used as headers associated with HTTP requests.
|
key-value pairs used as headers associated with HTTP requests.
|
||||||
|
@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
|
|||||||
if cfg.ServiceConfig != "" {
|
if cfg.ServiceConfig != "" {
|
||||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||||
}
|
}
|
||||||
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
// Prioritize GRPCCredentials over Insecure (passing both is an error).
|
||||||
if cfg.Traces.GRPCCredentials != nil {
|
if cfg.Traces.GRPCCredentials != nil {
|
||||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
|
||||||
} else if cfg.Traces.Insecure {
|
} else if cfg.Traces.Insecure {
|
||||||
@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption {
|
|||||||
|
|
||||||
cfg.Traces.Endpoint = u.Host
|
cfg.Traces.Endpoint = u.Host
|
||||||
cfg.Traces.URLPath = u.Path
|
cfg.Traces.URLPath = u.Path
|
||||||
if u.Scheme != "https" {
|
cfg.Traces.Insecure = u.Scheme != "https"
|
||||||
cfg.Traces.Insecure = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
})
|
})
|
||||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
|||||||
|
|
||||||
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
|
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
|
||||||
func Version() string {
|
func Version() string {
|
||||||
return "1.29.0"
|
return "1.32.0"
|
||||||
}
|
}
|
||||||
|
6
vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
generated
vendored
6
vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/sdk/metric"
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
@ -131,7 +132,10 @@ func WithoutScopeInfo() Option {
|
|||||||
// have special behavior based on their name.
|
// have special behavior based on their name.
|
||||||
func WithNamespace(ns string) Option {
|
func WithNamespace(ns string) Option {
|
||||||
return optionFunc(func(cfg config) config {
|
return optionFunc(func(cfg config) config {
|
||||||
ns = sanitizeName(ns)
|
if model.NameValidationScheme != model.UTF8Validation {
|
||||||
|
// Only sanitize if prometheus does not support UTF-8.
|
||||||
|
ns = model.EscapeName(ns, model.NameEscapingScheme)
|
||||||
|
}
|
||||||
if !strings.HasSuffix(ns, "_") {
|
if !strings.HasSuffix(ns, "_") {
|
||||||
// namespace and metric names should be separated with an underscore,
|
// namespace and metric names should be separated with an underscore,
|
||||||
// adds a trailing underscore if there is not one already.
|
// adds a trailing underscore if there is not one already.
|
||||||
|
175
vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
generated
vendored
175
vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
generated
vendored
@ -11,11 +11,10 @@ import (
|
|||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
@ -34,15 +33,14 @@ const (
|
|||||||
scopeInfoMetricName = "otel_scope_info"
|
scopeInfoMetricName = "otel_scope_info"
|
||||||
scopeInfoDescription = "Instrumentation Scope metadata"
|
scopeInfoDescription = "Instrumentation Scope metadata"
|
||||||
|
|
||||||
|
scopeNameLabel = "otel_scope_name"
|
||||||
|
scopeVersionLabel = "otel_scope_version"
|
||||||
|
|
||||||
traceIDExemplarKey = "trace_id"
|
traceIDExemplarKey = "trace_id"
|
||||||
spanIDExemplarKey = "span_id"
|
spanIDExemplarKey = "span_id"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var errScopeInvalid = errors.New("invalid scope")
|
||||||
scopeInfoKeys = [2]string{"otel_scope_name", "otel_scope_version"}
|
|
||||||
|
|
||||||
errScopeInvalid = errors.New("invalid scope")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
|
// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
|
||||||
// interface for easy instantiation with a MeterProvider.
|
// interface for easy instantiation with a MeterProvider.
|
||||||
@ -188,7 +186,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, scopeMetrics := range metrics.ScopeMetrics {
|
for _, scopeMetrics := range metrics.ScopeMetrics {
|
||||||
var keys, values [2]string
|
n := len(c.resourceKeyVals.keys) + 2 // resource attrs + scope name + scope version
|
||||||
|
kv := keyVals{
|
||||||
|
keys: make([]string, 0, n),
|
||||||
|
vals: make([]string, 0, n),
|
||||||
|
}
|
||||||
|
|
||||||
if !c.disableScopeInfo {
|
if !c.disableScopeInfo {
|
||||||
scopeInfo, err := c.scopeInfo(scopeMetrics.Scope)
|
scopeInfo, err := c.scopeInfo(scopeMetrics.Scope)
|
||||||
@ -203,10 +205,13 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||||||
|
|
||||||
ch <- scopeInfo
|
ch <- scopeInfo
|
||||||
|
|
||||||
keys = scopeInfoKeys
|
kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel)
|
||||||
values = [2]string{scopeMetrics.Scope.Name, scopeMetrics.Scope.Version}
|
kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kv.keys = append(kv.keys, c.resourceKeyVals.keys...)
|
||||||
|
kv.vals = append(kv.vals, c.resourceKeyVals.vals...)
|
||||||
|
|
||||||
for _, m := range scopeMetrics.Metrics {
|
for _, m := range scopeMetrics.Metrics {
|
||||||
typ := c.metricType(m)
|
typ := c.metricType(m)
|
||||||
if typ == nil {
|
if typ == nil {
|
||||||
@ -225,25 +230,27 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||||||
|
|
||||||
switch v := m.Data.(type) {
|
switch v := m.Data.(type) {
|
||||||
case metricdata.Histogram[int64]:
|
case metricdata.Histogram[int64]:
|
||||||
addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
|
addHistogramMetric(ch, v, m, name, kv)
|
||||||
case metricdata.Histogram[float64]:
|
case metricdata.Histogram[float64]:
|
||||||
addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
|
addHistogramMetric(ch, v, m, name, kv)
|
||||||
case metricdata.Sum[int64]:
|
case metricdata.Sum[int64]:
|
||||||
addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
|
addSumMetric(ch, v, m, name, kv)
|
||||||
case metricdata.Sum[float64]:
|
case metricdata.Sum[float64]:
|
||||||
addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
|
addSumMetric(ch, v, m, name, kv)
|
||||||
case metricdata.Gauge[int64]:
|
case metricdata.Gauge[int64]:
|
||||||
addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
|
addGaugeMetric(ch, v, m, name, kv)
|
||||||
case metricdata.Gauge[float64]:
|
case metricdata.Gauge[float64]:
|
||||||
addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
|
addGaugeMetric(ch, v, m, name, kv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) {
|
func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, name string, kv keyVals) {
|
||||||
for _, dp := range histogram.DataPoints {
|
for _, dp := range histogram.DataPoints {
|
||||||
keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV)
|
keys, values := getAttrs(dp.Attributes)
|
||||||
|
keys = append(keys, kv.keys...)
|
||||||
|
values = append(values, kv.vals...)
|
||||||
|
|
||||||
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
||||||
buckets := make(map[float64]uint64, len(dp.Bounds))
|
buckets := make(map[float64]uint64, len(dp.Bounds))
|
||||||
@ -263,14 +270,16 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) {
|
func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, name string, kv keyVals) {
|
||||||
valueType := prometheus.CounterValue
|
valueType := prometheus.CounterValue
|
||||||
if !sum.IsMonotonic {
|
if !sum.IsMonotonic {
|
||||||
valueType = prometheus.GaugeValue
|
valueType = prometheus.GaugeValue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dp := range sum.DataPoints {
|
for _, dp := range sum.DataPoints {
|
||||||
keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV)
|
keys, values := getAttrs(dp.Attributes)
|
||||||
|
keys = append(keys, kv.keys...)
|
||||||
|
values = append(values, kv.vals...)
|
||||||
|
|
||||||
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
||||||
m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...)
|
m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...)
|
||||||
@ -278,14 +287,20 @@ func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata
|
|||||||
otel.Handle(err)
|
otel.Handle(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// GaugeValues don't support Exemplars at this time
|
||||||
|
// https://github.com/prometheus/client_golang/blob/aef8aedb4b6e1fb8ac1c90790645169125594096/prometheus/metric.go#L199
|
||||||
|
if valueType != prometheus.GaugeValue {
|
||||||
m = addExemplars(m, dp.Exemplars)
|
m = addExemplars(m, dp.Exemplars)
|
||||||
|
}
|
||||||
ch <- m
|
ch <- m
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) {
|
func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, name string, kv keyVals) {
|
||||||
for _, dp := range gauge.DataPoints {
|
for _, dp := range gauge.DataPoints {
|
||||||
keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV)
|
keys, values := getAttrs(dp.Attributes)
|
||||||
|
keys = append(keys, kv.keys...)
|
||||||
|
values = append(values, kv.vals...)
|
||||||
|
|
||||||
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
||||||
m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...)
|
m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...)
|
||||||
@ -297,15 +312,27 @@ func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metric
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAttrs parses the attribute.Set to two lists of matching Prometheus-style
|
// getAttrs converts the attribute.Set to two lists of matching Prometheus-style
|
||||||
// keys and values. It sanitizes invalid characters and handles duplicate keys
|
// keys and values.
|
||||||
// (due to sanitization) by sorting and concatenating the values following the spec.
|
func getAttrs(attrs attribute.Set) ([]string, []string) {
|
||||||
func getAttrs(attrs attribute.Set, ks, vs [2]string, resourceKV keyVals) ([]string, []string) {
|
keys := make([]string, 0, attrs.Len())
|
||||||
keysMap := make(map[string][]string)
|
values := make([]string, 0, attrs.Len())
|
||||||
itr := attrs.Iter()
|
itr := attrs.Iter()
|
||||||
|
|
||||||
|
if model.NameValidationScheme == model.UTF8Validation {
|
||||||
|
// Do not perform sanitization if prometheus supports UTF-8.
|
||||||
for itr.Next() {
|
for itr.Next() {
|
||||||
kv := itr.Attribute()
|
kv := itr.Attribute()
|
||||||
key := strings.Map(sanitizeRune, string(kv.Key))
|
keys = append(keys, string(kv.Key))
|
||||||
|
values = append(values, kv.Value.Emit())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// It sanitizes invalid characters and handles duplicate keys
|
||||||
|
// (due to sanitization) by sorting and concatenating the values following the spec.
|
||||||
|
keysMap := make(map[string][]string)
|
||||||
|
for itr.Next() {
|
||||||
|
kv := itr.Attribute()
|
||||||
|
key := model.EscapeName(string(kv.Key), model.NameEscapingScheme)
|
||||||
if _, ok := keysMap[key]; !ok {
|
if _, ok := keysMap[key]; !ok {
|
||||||
keysMap[key] = []string{kv.Value.Emit()}
|
keysMap[key] = []string{kv.Value.Emit()}
|
||||||
} else {
|
} else {
|
||||||
@ -313,45 +340,30 @@ func getAttrs(attrs attribute.Set, ks, vs [2]string, resourceKV keyVals) ([]stri
|
|||||||
keysMap[key] = append(keysMap[key], kv.Value.Emit())
|
keysMap[key] = append(keysMap[key], kv.Value.Emit())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
keys := make([]string, 0, attrs.Len())
|
|
||||||
values := make([]string, 0, attrs.Len())
|
|
||||||
for key, vals := range keysMap {
|
for key, vals := range keysMap {
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
slices.Sort(vals)
|
slices.Sort(vals)
|
||||||
values = append(values, strings.Join(vals, ";"))
|
values = append(values, strings.Join(vals, ";"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if ks[0] != "" {
|
|
||||||
keys = append(keys, ks[:]...)
|
|
||||||
values = append(values, vs[:]...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for idx := range resourceKV.keys {
|
|
||||||
keys = append(keys, resourceKV.keys[idx])
|
|
||||||
values = append(values, resourceKV.vals[idx])
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys, values
|
return keys, values
|
||||||
}
|
}
|
||||||
|
|
||||||
func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) {
|
func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) {
|
||||||
keys, values := getAttrs(*res.Set(), [2]string{}, [2]string{}, keyVals{})
|
keys, values := getAttrs(*res.Set())
|
||||||
desc := prometheus.NewDesc(name, description, keys, nil)
|
desc := prometheus.NewDesc(name, description, keys, nil)
|
||||||
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
|
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) {
|
func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) {
|
||||||
keys := scopeInfoKeys[:]
|
attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version
|
||||||
desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil)
|
attrs = append(attrs, scope.Attributes.ToSlice()...)
|
||||||
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), scope.Name, scope.Version)
|
attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name))
|
||||||
}
|
attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version))
|
||||||
|
|
||||||
func sanitizeRune(r rune) rune {
|
keys, values := getAttrs(attribute.NewSet(attrs...))
|
||||||
if unicode.IsLetter(r) || unicode.IsDigit(r) || r == ':' || r == '_' {
|
desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil)
|
||||||
return r
|
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
|
||||||
}
|
|
||||||
return '_'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var unitSuffixes = map[string]string{
|
var unitSuffixes = map[string]string{
|
||||||
@ -392,7 +404,11 @@ var unitSuffixes = map[string]string{
|
|||||||
|
|
||||||
// getName returns the sanitized name, prefixed with the namespace and suffixed with unit.
|
// getName returns the sanitized name, prefixed with the namespace and suffixed with unit.
|
||||||
func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
|
func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
|
||||||
name := sanitizeName(m.Name)
|
name := m.Name
|
||||||
|
if model.NameValidationScheme != model.UTF8Validation {
|
||||||
|
// Only sanitize if prometheus does not support UTF-8.
|
||||||
|
name = model.EscapeName(name, model.NameEscapingScheme)
|
||||||
|
}
|
||||||
addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER
|
addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER
|
||||||
if addCounterSuffix {
|
if addCounterSuffix {
|
||||||
// Remove the _total suffix here, as we will re-add the total suffix
|
// Remove the _total suffix here, as we will re-add the total suffix
|
||||||
@ -411,59 +427,6 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
|
|||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
func sanitizeName(n string) string {
|
|
||||||
// This algorithm is based on strings.Map from Go 1.19.
|
|
||||||
const replacement = '_'
|
|
||||||
|
|
||||||
valid := func(i int, r rune) bool {
|
|
||||||
// Taken from
|
|
||||||
// https://github.com/prometheus/common/blob/dfbc25bd00225c70aca0d94c3c4bb7744f28ace0/model/metric.go#L92-L102
|
|
||||||
if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' || r == ':' || (r >= '0' && r <= '9' && i > 0) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// This output buffer b is initialized on demand, the first time a
|
|
||||||
// character needs to be replaced.
|
|
||||||
var b strings.Builder
|
|
||||||
for i, c := range n {
|
|
||||||
if valid(i, c) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if i == 0 && c >= '0' && c <= '9' {
|
|
||||||
// Prefix leading number with replacement character.
|
|
||||||
b.Grow(len(n) + 1)
|
|
||||||
_ = b.WriteByte(byte(replacement))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
b.Grow(len(n))
|
|
||||||
_, _ = b.WriteString(n[:i])
|
|
||||||
_ = b.WriteByte(byte(replacement))
|
|
||||||
width := utf8.RuneLen(c)
|
|
||||||
n = n[i+width:]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fast path for unchanged input.
|
|
||||||
if b.Cap() == 0 { // b.Grow was not called above.
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range n {
|
|
||||||
// Due to inlining, it is more performant to invoke WriteByte rather then
|
|
||||||
// WriteRune.
|
|
||||||
if valid(1, c) { // We are guaranteed to not be at the start.
|
|
||||||
_ = b.WriteByte(byte(c))
|
|
||||||
} else {
|
|
||||||
_ = b.WriteByte(byte(replacement))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType {
|
func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType {
|
||||||
switch v := m.Data.(type) {
|
switch v := m.Data.(type) {
|
||||||
case metricdata.Histogram[int64], metricdata.Histogram[float64]:
|
case metricdata.Histogram[int64], metricdata.Histogram[float64]:
|
||||||
@ -489,7 +452,7 @@ func (c *collector) createResourceAttributes(res *resource.Resource) {
|
|||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter)
|
resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter)
|
||||||
resourceKeys, resourceValues := getAttrs(resourceAttrs, [2]string{}, [2]string{}, keyVals{})
|
resourceKeys, resourceValues := getAttrs(resourceAttrs)
|
||||||
c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues}
|
c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
14
vendor/go.opentelemetry.io/otel/internal/global/instruments.go
generated
vendored
14
vendor/go.opentelemetry.io/otel/internal/global/instruments.go
generated
vendored
@ -13,7 +13,7 @@ import (
|
|||||||
|
|
||||||
// unwrapper unwraps to return the underlying instrument implementation.
|
// unwrapper unwraps to return the underlying instrument implementation.
|
||||||
type unwrapper interface {
|
type unwrapper interface {
|
||||||
Unwrap() metric.Observable
|
unwrap() metric.Observable
|
||||||
}
|
}
|
||||||
|
|
||||||
type afCounter struct {
|
type afCounter struct {
|
||||||
@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) {
|
|||||||
i.delegate.Store(ctr)
|
i.delegate.Store(ctr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *afCounter) Unwrap() metric.Observable {
|
func (i *afCounter) unwrap() metric.Observable {
|
||||||
if ctr := i.delegate.Load(); ctr != nil {
|
if ctr := i.delegate.Load(); ctr != nil {
|
||||||
return ctr.(metric.Float64ObservableCounter)
|
return ctr.(metric.Float64ObservableCounter)
|
||||||
}
|
}
|
||||||
@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) {
|
|||||||
i.delegate.Store(ctr)
|
i.delegate.Store(ctr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *afUpDownCounter) Unwrap() metric.Observable {
|
func (i *afUpDownCounter) unwrap() metric.Observable {
|
||||||
if ctr := i.delegate.Load(); ctr != nil {
|
if ctr := i.delegate.Load(); ctr != nil {
|
||||||
return ctr.(metric.Float64ObservableUpDownCounter)
|
return ctr.(metric.Float64ObservableUpDownCounter)
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) {
|
|||||||
i.delegate.Store(ctr)
|
i.delegate.Store(ctr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *afGauge) Unwrap() metric.Observable {
|
func (i *afGauge) unwrap() metric.Observable {
|
||||||
if ctr := i.delegate.Load(); ctr != nil {
|
if ctr := i.delegate.Load(); ctr != nil {
|
||||||
return ctr.(metric.Float64ObservableGauge)
|
return ctr.(metric.Float64ObservableGauge)
|
||||||
}
|
}
|
||||||
@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) {
|
|||||||
i.delegate.Store(ctr)
|
i.delegate.Store(ctr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *aiCounter) Unwrap() metric.Observable {
|
func (i *aiCounter) unwrap() metric.Observable {
|
||||||
if ctr := i.delegate.Load(); ctr != nil {
|
if ctr := i.delegate.Load(); ctr != nil {
|
||||||
return ctr.(metric.Int64ObservableCounter)
|
return ctr.(metric.Int64ObservableCounter)
|
||||||
}
|
}
|
||||||
@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
|
|||||||
i.delegate.Store(ctr)
|
i.delegate.Store(ctr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *aiUpDownCounter) Unwrap() metric.Observable {
|
func (i *aiUpDownCounter) unwrap() metric.Observable {
|
||||||
if ctr := i.delegate.Load(); ctr != nil {
|
if ctr := i.delegate.Load(); ctr != nil {
|
||||||
return ctr.(metric.Int64ObservableUpDownCounter)
|
return ctr.(metric.Int64ObservableUpDownCounter)
|
||||||
}
|
}
|
||||||
@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) {
|
|||||||
i.delegate.Store(ctr)
|
i.delegate.Store(ctr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *aiGauge) Unwrap() metric.Observable {
|
func (i *aiGauge) unwrap() metric.Observable {
|
||||||
if ctr := i.delegate.Load(); ctr != nil {
|
if ctr := i.delegate.Load(); ctr != nil {
|
||||||
return ctr.(metric.Int64ObservableGauge)
|
return ctr.(metric.Int64ObservableGauge)
|
||||||
}
|
}
|
||||||
|
382
vendor/go.opentelemetry.io/otel/internal/global/meter.go
generated
vendored
382
vendor/go.opentelemetry.io/otel/internal/global/meter.go
generated
vendored
@ -5,8 +5,9 @@ package global // import "go.opentelemetry.io/otel/internal/global"
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"container/list"
|
"container/list"
|
||||||
|
"context"
|
||||||
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
"go.opentelemetry.io/otel/metric/embedded"
|
"go.opentelemetry.io/otel/metric/embedded"
|
||||||
@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
|
|||||||
name: name,
|
name: name,
|
||||||
version: c.InstrumentationVersion(),
|
version: c.InstrumentationVersion(),
|
||||||
schema: c.SchemaURL(),
|
schema: c.SchemaURL(),
|
||||||
|
attrs: c.InstrumentationAttributes(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.meters == nil {
|
if p.meters == nil {
|
||||||
@ -76,7 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
|
|||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &meter{name: name, opts: opts}
|
t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)}
|
||||||
p.meters[key] = t
|
p.meters[key] = t
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
@ -92,17 +94,29 @@ type meter struct {
|
|||||||
opts []metric.MeterOption
|
opts []metric.MeterOption
|
||||||
|
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex
|
||||||
instruments []delegatedInstrument
|
instruments map[instID]delegatedInstrument
|
||||||
|
|
||||||
registry list.List
|
registry list.List
|
||||||
|
|
||||||
delegate atomic.Value // metric.Meter
|
delegate metric.Meter
|
||||||
}
|
}
|
||||||
|
|
||||||
type delegatedInstrument interface {
|
type delegatedInstrument interface {
|
||||||
setDelegate(metric.Meter)
|
setDelegate(metric.Meter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// instID are the identifying properties of a instrument.
|
||||||
|
type instID struct {
|
||||||
|
// name is the name of the stream.
|
||||||
|
name string
|
||||||
|
// description is the description of the stream.
|
||||||
|
description string
|
||||||
|
// kind defines the functional group of the instrument.
|
||||||
|
kind reflect.Type
|
||||||
|
// unit is the unit of the stream.
|
||||||
|
unit string
|
||||||
|
}
|
||||||
|
|
||||||
// setDelegate configures m to delegate all Meter functionality to Meters
|
// setDelegate configures m to delegate all Meter functionality to Meters
|
||||||
// created by provider.
|
// created by provider.
|
||||||
//
|
//
|
||||||
@ -110,12 +124,12 @@ type delegatedInstrument interface {
|
|||||||
//
|
//
|
||||||
// It is guaranteed by the caller that this happens only once.
|
// It is guaranteed by the caller that this happens only once.
|
||||||
func (m *meter) setDelegate(provider metric.MeterProvider) {
|
func (m *meter) setDelegate(provider metric.MeterProvider) {
|
||||||
meter := provider.Meter(m.name, m.opts...)
|
|
||||||
m.delegate.Store(meter)
|
|
||||||
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
meter := provider.Meter(m.name, m.opts...)
|
||||||
|
m.delegate = meter
|
||||||
|
|
||||||
for _, inst := range m.instruments {
|
for _, inst := range m.instruments {
|
||||||
inst.setDelegate(meter)
|
inst.setDelegate(meter)
|
||||||
}
|
}
|
||||||
@ -133,169 +147,336 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
|
func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64Counter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64Counter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64CounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*siCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64Counter), nil
|
||||||
|
}
|
||||||
i := &siCounter{name: name, opts: options}
|
i := &siCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64UpDownCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64UpDownCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64UpDownCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*siUpDownCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64UpDownCounter), nil
|
||||||
|
}
|
||||||
i := &siUpDownCounter{name: name, opts: options}
|
i := &siUpDownCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
|
func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64Histogram(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64Histogram(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64HistogramConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*siHistogram)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64Histogram), nil
|
||||||
|
}
|
||||||
i := &siHistogram{name: name, opts: options}
|
i := &siHistogram{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
|
func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64Gauge(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64Gauge(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64GaugeConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*siGauge)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64Gauge), nil
|
||||||
|
}
|
||||||
i := &siGauge{name: name, opts: options}
|
i := &siGauge{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64ObservableCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64ObservableCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64ObservableCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*aiCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64ObservableCounter), nil
|
||||||
|
}
|
||||||
i := &aiCounter{name: name, opts: options}
|
i := &aiCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64ObservableUpDownCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64ObservableUpDownCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64ObservableUpDownCounter), nil
|
||||||
|
}
|
||||||
i := &aiUpDownCounter{name: name, opts: options}
|
i := &aiUpDownCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Int64ObservableGauge(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Int64ObservableGauge(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewInt64ObservableGaugeConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*aiGauge)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Int64ObservableGauge), nil
|
||||||
|
}
|
||||||
i := &aiGauge{name: name, opts: options}
|
i := &aiGauge{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
|
func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64Counter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64Counter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64CounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*sfCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64Counter), nil
|
||||||
|
}
|
||||||
i := &sfCounter{name: name, opts: options}
|
i := &sfCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64UpDownCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64UpDownCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64UpDownCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64UpDownCounter), nil
|
||||||
|
}
|
||||||
i := &sfUpDownCounter{name: name, opts: options}
|
i := &sfUpDownCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64Histogram(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64Histogram(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64HistogramConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*sfHistogram)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64Histogram), nil
|
||||||
|
}
|
||||||
i := &sfHistogram{name: name, opts: options}
|
i := &sfHistogram{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
|
func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64Gauge(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64Gauge(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64GaugeConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*sfGauge)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64Gauge), nil
|
||||||
|
}
|
||||||
i := &sfGauge{name: name, opts: options}
|
i := &sfGauge{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64ObservableCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64ObservableCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64ObservableCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*afCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64ObservableCounter), nil
|
||||||
|
}
|
||||||
i := &afCounter{name: name, opts: options}
|
i := &afCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64ObservableUpDownCounter(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64ObservableUpDownCounter(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*afUpDownCounter)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64ObservableUpDownCounter), nil
|
||||||
|
}
|
||||||
i := &afUpDownCounter{name: name, opts: options}
|
i := &afUpDownCounter{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
return del.Float64ObservableGauge(name, options...)
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.Float64ObservableGauge(name, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
|
||||||
|
id := instID{
|
||||||
|
name: name,
|
||||||
|
kind: reflect.TypeOf((*afGauge)(nil)),
|
||||||
|
description: cfg.Description(),
|
||||||
|
unit: cfg.Unit(),
|
||||||
|
}
|
||||||
|
if f, ok := m.instruments[id]; ok {
|
||||||
|
return f.(metric.Float64ObservableGauge), nil
|
||||||
|
}
|
||||||
i := &afGauge{name: name, opts: options}
|
i := &afGauge{name: name, opts: options}
|
||||||
m.instruments = append(m.instruments, i)
|
m.instruments[id] = i
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterCallback captures the function that will be called during Collect.
|
// RegisterCallback captures the function that will be called during Collect.
|
||||||
func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
|
func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
|
||||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
|
||||||
insts = unwrapInstruments(insts)
|
|
||||||
return del.RegisterCallback(f, insts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mtx.Lock()
|
m.mtx.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
if m.delegate != nil {
|
||||||
|
return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...)
|
||||||
|
}
|
||||||
|
|
||||||
reg := ®istration{instruments: insts, function: f}
|
reg := ®istration{instruments: insts, function: f}
|
||||||
e := m.registry.PushBack(reg)
|
e := m.registry.PushBack(reg)
|
||||||
reg.unreg = func() error {
|
reg.unreg = func() error {
|
||||||
@ -307,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
|
|||||||
return reg, nil
|
return reg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type wrapped interface {
|
|
||||||
unwrap() metric.Observable
|
|
||||||
}
|
|
||||||
|
|
||||||
func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
|
func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
|
||||||
out := make([]metric.Observable, 0, len(instruments))
|
out := make([]metric.Observable, 0, len(instruments))
|
||||||
|
|
||||||
for _, inst := range instruments {
|
for _, inst := range instruments {
|
||||||
if in, ok := inst.(wrapped); ok {
|
if in, ok := inst.(unwrapper); ok {
|
||||||
out = append(out, in.unwrap())
|
out = append(out, in.unwrap())
|
||||||
} else {
|
} else {
|
||||||
out = append(out, inst)
|
out = append(out, inst)
|
||||||
@ -335,9 +512,61 @@ type registration struct {
|
|||||||
unregMu sync.Mutex
|
unregMu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *registration) setDelegate(m metric.Meter) {
|
type unwrapObs struct {
|
||||||
insts := unwrapInstruments(c.instruments)
|
embedded.Observer
|
||||||
|
obs metric.Observer
|
||||||
|
}
|
||||||
|
|
||||||
|
// unwrapFloat64Observable returns an expected metric.Float64Observable after
|
||||||
|
// unwrapping the global object.
|
||||||
|
func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable {
|
||||||
|
if unwrapped, ok := inst.(unwrapper); ok {
|
||||||
|
if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok {
|
||||||
|
// Note: if the unwrapped object does not
|
||||||
|
// unwrap as an observable for either of the
|
||||||
|
// predicates here, it means an internal bug in
|
||||||
|
// this package. We avoid logging an error in
|
||||||
|
// this case, because the SDK has to try its
|
||||||
|
// own type conversion on the object. The SDK
|
||||||
|
// will see this and be forced to respond with
|
||||||
|
// its own error.
|
||||||
|
//
|
||||||
|
// This code uses a double-nested if statement
|
||||||
|
// to avoid creating a branch that is
|
||||||
|
// impossible to cover.
|
||||||
|
inst = floatObs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return inst
|
||||||
|
}
|
||||||
|
|
||||||
|
// unwrapInt64Observable returns an expected metric.Int64Observable after
|
||||||
|
// unwrapping the global object.
|
||||||
|
func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable {
|
||||||
|
if unwrapped, ok := inst.(unwrapper); ok {
|
||||||
|
if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok {
|
||||||
|
// See the comment in unwrapFloat64Observable().
|
||||||
|
inst = unint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return inst
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) {
|
||||||
|
uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) {
|
||||||
|
uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unwrapCallback(f metric.Callback) metric.Callback {
|
||||||
|
return func(ctx context.Context, obs metric.Observer) error {
|
||||||
|
return f(ctx, &unwrapObs{obs: obs})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *registration) setDelegate(m metric.Meter) {
|
||||||
c.unregMu.Lock()
|
c.unregMu.Lock()
|
||||||
defer c.unregMu.Unlock()
|
defer c.unregMu.Unlock()
|
||||||
|
|
||||||
@ -346,9 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
reg, err := m.RegisterCallback(c.function, insts...)
|
reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
GetErrorHandler().Handle(err)
|
GetErrorHandler().Handle(err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.unreg = reg.Unregister
|
c.unreg = reg.Unregister
|
||||||
|
8
vendor/go.opentelemetry.io/otel/internal/global/trace.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/internal/global/trace.go
generated
vendored
@ -87,6 +87,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
|||||||
name: name,
|
name: name,
|
||||||
version: c.InstrumentationVersion(),
|
version: c.InstrumentationVersion(),
|
||||||
schema: c.SchemaURL(),
|
schema: c.SchemaURL(),
|
||||||
|
attrs: c.InstrumentationAttributes(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.tracers == nil {
|
if p.tracers == nil {
|
||||||
@ -102,7 +103,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
|||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
type il struct{ name, version, schema string }
|
type il struct {
|
||||||
|
name string
|
||||||
|
version string
|
||||||
|
schema string
|
||||||
|
attrs attribute.Set
|
||||||
|
}
|
||||||
|
|
||||||
// tracer is a placeholder for a trace.Tracer.
|
// tracer is a placeholder for a trace.Tracer.
|
||||||
//
|
//
|
||||||
|
3
vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
generated
vendored
3
vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
generated
vendored
@ -20,7 +20,8 @@ func RawToBool(r uint64) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Int64ToRaw(i int64) uint64 {
|
func Int64ToRaw(i int64) uint64 {
|
||||||
return uint64(i)
|
// Assumes original was a valid int64 (overflow not checked).
|
||||||
|
return uint64(i) // nolint: gosec
|
||||||
}
|
}
|
||||||
|
|
||||||
func RawToInt64(r uint64) int64 {
|
func RawToInt64(r uint64) int64 {
|
||||||
|
25
vendor/go.opentelemetry.io/otel/log/DESIGN.md
generated
vendored
25
vendor/go.opentelemetry.io/otel/log/DESIGN.md
generated
vendored
@ -26,14 +26,12 @@ This proposed design aims to:
|
|||||||
|
|
||||||
The API is published as a single `go.opentelemetry.io/otel/log` Go module.
|
The API is published as a single `go.opentelemetry.io/otel/log` Go module.
|
||||||
|
|
||||||
The module name is compliant with
|
The package structure is similar to Trace API and Metrics API.
|
||||||
[Artifact Naming](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/bridge-api.md#artifact-naming)
|
|
||||||
and the package structure is the same as for Trace API and Metrics API.
|
|
||||||
|
|
||||||
The Go module consists of the following packages:
|
The Go module consists of the following packages:
|
||||||
|
|
||||||
- `go.opentelemetry.io/otel/log`
|
- `go.opentelemetry.io/otel/log`
|
||||||
- `go.opentelemetry.io/otel/log/embedded`
|
- `go.opentelemetry.io/otel/log/embedded`
|
||||||
|
- `go.opentelemetry.io/otel/log/logtest`
|
||||||
- `go.opentelemetry.io/otel/log/noop`
|
- `go.opentelemetry.io/otel/log/noop`
|
||||||
|
|
||||||
Rejected alternative:
|
Rejected alternative:
|
||||||
@ -253,6 +251,23 @@ Rejected alternatives:
|
|||||||
- [Add XYZ method to Logger](#add-xyz-method-to-logger)
|
- [Add XYZ method to Logger](#add-xyz-method-to-logger)
|
||||||
- [Rename KeyValue to Attr](#rename-keyvalue-to-attr)
|
- [Rename KeyValue to Attr](#rename-keyvalue-to-attr)
|
||||||
|
|
||||||
|
### Logger.Enabled
|
||||||
|
|
||||||
|
The `Enabled` method implements the [`Enabled` operation](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#enabled).
|
||||||
|
|
||||||
|
[`Context` associated with the `LogRecord`](https://opentelemetry.io/docs/specs/otel/context/)
|
||||||
|
is accepted as a `context.Context` method argument.
|
||||||
|
|
||||||
|
Calls to `Enabled` are supposed to be on the hot path and the list of arguments
|
||||||
|
can be extendend in future. Therefore, in order to reduce the number of heap
|
||||||
|
allocations and make it possible to handle new arguments, `Enabled` accepts
|
||||||
|
a `EnabledParameters` struct, defined in [logger.go](logger.go), as the second
|
||||||
|
method argument.
|
||||||
|
|
||||||
|
The `EnabledParameters` getters are returning values using the `(value, ok)`
|
||||||
|
idiom in order to indicate if the values were actually set by the caller or if
|
||||||
|
there are unspecified.
|
||||||
|
|
||||||
### noop package
|
### noop package
|
||||||
|
|
||||||
The `go.opentelemetry.io/otel/log/noop` package provides
|
The `go.opentelemetry.io/otel/log/noop` package provides
|
||||||
@ -307,7 +322,7 @@ The API needs to evolve orthogonally to `slog`.
|
|||||||
`slog` is not compliant with the [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/).
|
`slog` is not compliant with the [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/).
|
||||||
and we cannot expect the Go team to make `slog` compliant with it.
|
and we cannot expect the Go team to make `slog` compliant with it.
|
||||||
|
|
||||||
The interoperabilty can be achieved using [a log bridge](https://opentelemetry.io/docs/specs/otel/glossary/#log-appender--bridge).
|
The interoperability can be achieved using [a log bridge](https://opentelemetry.io/docs/specs/otel/glossary/#log-appender--bridge).
|
||||||
|
|
||||||
You can read more about OpenTelemetry Logs design on [opentelemetry.io](https://opentelemetry.io/docs/concepts/signals/logs/).
|
You can read more about OpenTelemetry Logs design on [opentelemetry.io](https://opentelemetry.io/docs/concepts/signals/logs/).
|
||||||
|
|
||||||
|
10
vendor/go.opentelemetry.io/otel/log/doc.go
generated
vendored
10
vendor/go.opentelemetry.io/otel/log/doc.go
generated
vendored
@ -2,10 +2,12 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package log provides the OpenTelemetry Logs Bridge API.
|
Package log provides the OpenTelemetry Logs API.
|
||||||
|
|
||||||
This package is intended to be a bridge between existing logging libraries and
|
This package is intended to be used by bridges between existing logging
|
||||||
OpenTelemetry. It is not designed to be a logging API itself.
|
libraries and OpenTelemetry. Users should not directly use this package as a
|
||||||
|
logging library. Instead, install one of the bridges listed in the
|
||||||
|
[registry], and use the associated logging library.
|
||||||
|
|
||||||
# API Implementations
|
# API Implementations
|
||||||
|
|
||||||
@ -68,5 +70,7 @@ It is strongly recommended that authors only embed
|
|||||||
go.opentelemetry.io/otel/log/noop if they choose this default behavior. That
|
go.opentelemetry.io/otel/log/noop if they choose this default behavior. That
|
||||||
implementation is the only one OpenTelemetry authors can guarantee will fully
|
implementation is the only one OpenTelemetry authors can guarantee will fully
|
||||||
implement all the API interfaces when a user updates their API.
|
implement all the API interfaces when a user updates their API.
|
||||||
|
|
||||||
|
[registry]: https://opentelemetry.io/ecosystem/registry/?language=go&component=log-bridge
|
||||||
*/
|
*/
|
||||||
package log // import "go.opentelemetry.io/otel/log"
|
package log // import "go.opentelemetry.io/otel/log"
|
||||||
|
3
vendor/go.opentelemetry.io/otel/log/keyvalue.go
generated
vendored
3
vendor/go.opentelemetry.io/otel/log/keyvalue.go
generated
vendored
@ -76,7 +76,8 @@ func IntValue(v int) Value { return Int64Value(int64(v)) }
|
|||||||
|
|
||||||
// Int64Value returns a [Value] for an int64.
|
// Int64Value returns a [Value] for an int64.
|
||||||
func Int64Value(v int64) Value {
|
func Int64Value(v int64) Value {
|
||||||
return Value{num: uint64(v), any: KindInt64}
|
// This can be later converted back to int64 (overflow not checked).
|
||||||
|
return Value{num: uint64(v), any: KindInt64} // nolint:gosec
|
||||||
}
|
}
|
||||||
|
|
||||||
// Float64Value returns a [Value] for a float64.
|
// Float64Value returns a [Value] for a float64.
|
||||||
|
36
vendor/go.opentelemetry.io/otel/log/logger.go
generated
vendored
36
vendor/go.opentelemetry.io/otel/log/logger.go
generated
vendored
@ -28,29 +28,35 @@ type Logger interface {
|
|||||||
//
|
//
|
||||||
// Implementations of this method need to be safe for a user to call
|
// Implementations of this method need to be safe for a user to call
|
||||||
// concurrently.
|
// concurrently.
|
||||||
|
//
|
||||||
|
// Notice: Emit is intended to be used by log bridges.
|
||||||
|
// Is should not be used for writing instrumentation.
|
||||||
Emit(ctx context.Context, record Record)
|
Emit(ctx context.Context, record Record)
|
||||||
|
|
||||||
// Enabled returns whether the Logger emits for the given context and
|
// Enabled returns whether the Logger emits for the given context and
|
||||||
// record.
|
// param.
|
||||||
//
|
//
|
||||||
// The passed record is likely to be a partial record with only the
|
// The passed param is likely to be a partial record with only the
|
||||||
// bridge-relevant information being provided (e.g a record with only the
|
// bridge-relevant information being provided (e.g a param with only the
|
||||||
// Severity set). If a Logger needs more information than is provided, it
|
// Severity set). If a Logger needs more information than is provided, it
|
||||||
// is said to be in an indeterminate state (see below).
|
// is said to be in an indeterminate state (see below).
|
||||||
//
|
//
|
||||||
// The returned value will be true when the Logger will emit for the
|
// The returned value will be true when the Logger will emit for the
|
||||||
// provided context and record, and will be false if the Logger will not
|
// provided context and param, and will be false if the Logger will not
|
||||||
// emit. The returned value may be true or false in an indeterminate state.
|
// emit. The returned value may be true or false in an indeterminate state.
|
||||||
// An implementation should default to returning true for an indeterminate
|
// An implementation should default to returning true for an indeterminate
|
||||||
// state, but may return false if valid reasons in particular circumstances
|
// state, but may return false if valid reasons in particular circumstances
|
||||||
// exist (e.g. performance, correctness).
|
// exist (e.g. performance, correctness).
|
||||||
//
|
//
|
||||||
// The record should not be held by the implementation. A copy should be
|
// The param should not be held by the implementation. A copy should be
|
||||||
// made if the record needs to be held after the call returns.
|
// made if the record needs to be held after the call returns.
|
||||||
//
|
//
|
||||||
// Implementations of this method need to be safe for a user to call
|
// Implementations of this method need to be safe for a user to call
|
||||||
// concurrently.
|
// concurrently.
|
||||||
Enabled(ctx context.Context, record Record) bool
|
//
|
||||||
|
// Notice: Enabled is intended to be used by log bridges.
|
||||||
|
// Is should not be used for writing instrumentation.
|
||||||
|
Enabled(ctx context.Context, param EnabledParameters) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoggerOption applies configuration options to a [Logger].
|
// LoggerOption applies configuration options to a [Logger].
|
||||||
@ -129,3 +135,21 @@ func WithSchemaURL(schemaURL string) LoggerOption {
|
|||||||
return config
|
return config
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnabledParameters represents payload for [Logger]'s Enabled method.
|
||||||
|
type EnabledParameters struct {
|
||||||
|
severity Severity
|
||||||
|
severitySet bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Severity returns the [Severity] level value, or [SeverityUndefined] if no value was set.
|
||||||
|
// The ok result indicates whether the value was set.
|
||||||
|
func (r *EnabledParameters) Severity() (value Severity, ok bool) {
|
||||||
|
return r.severity, r.severitySet
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSeverity sets the [Severity] level.
|
||||||
|
func (r *EnabledParameters) SetSeverity(level Severity) {
|
||||||
|
r.severity = level
|
||||||
|
r.severitySet = true
|
||||||
|
}
|
||||||
|
2
vendor/go.opentelemetry.io/otel/log/noop/noop.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/log/noop/noop.go
generated
vendored
@ -47,4 +47,4 @@ type Logger struct{ embedded.Logger }
|
|||||||
func (Logger) Emit(context.Context, log.Record) {}
|
func (Logger) Emit(context.Context, log.Record) {}
|
||||||
|
|
||||||
// Enabled returns false. No log records are ever emitted.
|
// Enabled returns false. No log records are ever emitted.
|
||||||
func (Logger) Enabled(context.Context, log.Record) bool { return false }
|
func (Logger) Enabled(context.Context, log.EnabledParameters) bool { return false }
|
||||||
|
3
vendor/go.opentelemetry.io/otel/log/record.go
generated
vendored
3
vendor/go.opentelemetry.io/otel/log/record.go
generated
vendored
@ -16,6 +16,9 @@ const attributesInlineCount = 5
|
|||||||
|
|
||||||
// Record represents a log record.
|
// Record represents a log record.
|
||||||
type Record struct {
|
type Record struct {
|
||||||
|
// Ensure forward compatibility by explicitly making this not comparable.
|
||||||
|
noCmp [0]func() //nolint: unused // This is indeed used.
|
||||||
|
|
||||||
timestamp time.Time
|
timestamp time.Time
|
||||||
observedTimestamp time.Time
|
observedTimestamp time.Time
|
||||||
severity Severity
|
severity Severity
|
||||||
|
2
vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
generated
vendored
@ -213,7 +213,7 @@ type Float64Observer interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Float64Callback is a function registered with a Meter that makes
|
// Float64Callback is a function registered with a Meter that makes
|
||||||
// observations for a Float64Observerable instrument it is registered with.
|
// observations for a Float64Observable instrument it is registered with.
|
||||||
// Calls to the Float64Observer record measurement values for the
|
// Calls to the Float64Observer record measurement values for the
|
||||||
// Float64Observable.
|
// Float64Observable.
|
||||||
//
|
//
|
||||||
|
2
vendor/go.opentelemetry.io/otel/metric/asyncint64.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/asyncint64.go
generated
vendored
@ -212,7 +212,7 @@ type Int64Observer interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Int64Callback is a function registered with a Meter that makes observations
|
// Int64Callback is a function registered with a Meter that makes observations
|
||||||
// for an Int64Observerable instrument it is registered with. Calls to the
|
// for an Int64Observable instrument it is registered with. Calls to the
|
||||||
// Int64Observer record measurement values for the Int64Observable.
|
// Int64Observer record measurement values for the Int64Observable.
|
||||||
//
|
//
|
||||||
// The function needs to complete in a finite amount of time and the deadline
|
// The function needs to complete in a finite amount of time and the deadline
|
||||||
|
2
vendor/go.opentelemetry.io/otel/metric/instrument.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/instrument.go
generated
vendored
@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption {
|
|||||||
//
|
//
|
||||||
// cp := make([]attribute.KeyValue, len(attributes))
|
// cp := make([]attribute.KeyValue, len(attributes))
|
||||||
// copy(cp, attributes)
|
// copy(cp, attributes)
|
||||||
// WithAttributes(attribute.NewSet(cp...))
|
// WithAttributeSet(attribute.NewSet(cp...))
|
||||||
//
|
//
|
||||||
// [attribute.NewSet] may modify the passed attributes so this will make a copy
|
// [attribute.NewSet] may modify the passed attributes so this will make a copy
|
||||||
// of attributes before creating a set in order to ensure this function is
|
// of attributes before creating a set in order to ensure this function is
|
||||||
|
8
vendor/go.opentelemetry.io/otel/renovate.json
generated
vendored
8
vendor/go.opentelemetry.io/otel/renovate.json
generated
vendored
@ -19,6 +19,14 @@
|
|||||||
"matchManagers": ["gomod"],
|
"matchManagers": ["gomod"],
|
||||||
"matchDepTypes": ["indirect"],
|
"matchDepTypes": ["indirect"],
|
||||||
"enabled": false
|
"enabled": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
|
||||||
|
"groupName": "googleapis"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matchPackageNames": ["golang.org/x/**"],
|
||||||
|
"groupName": "golang.org/x"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
4
vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
generated
vendored
@ -3,6 +3,8 @@
|
|||||||
|
|
||||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
|
||||||
|
import "go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
// Scope represents the instrumentation scope.
|
// Scope represents the instrumentation scope.
|
||||||
type Scope struct {
|
type Scope struct {
|
||||||
// Name is the name of the instrumentation scope. This should be the
|
// Name is the name of the instrumentation scope. This should be the
|
||||||
@ -12,4 +14,6 @@ type Scope struct {
|
|||||||
Version string
|
Version string
|
||||||
// SchemaURL of the telemetry emitted by the scope.
|
// SchemaURL of the telemetry emitted by the scope.
|
||||||
SchemaURL string
|
SchemaURL string
|
||||||
|
// Attributes of the telemetry emitted by the scope.
|
||||||
|
Attributes attribute.Set
|
||||||
}
|
}
|
||||||
|
6
vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md
generated
vendored
6
vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md
generated
vendored
@ -122,12 +122,12 @@ The benchmark results can be found in [the prototype](https://github.com/open-te
|
|||||||
|
|
||||||
## Rejected alternatives
|
## Rejected alternatives
|
||||||
|
|
||||||
### Represent both LogRecordProcessor and LogRecordExporter as Expoter
|
### Represent both LogRecordProcessor and LogRecordExporter as Exporter
|
||||||
|
|
||||||
Because the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor)
|
Because the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor)
|
||||||
and the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter)
|
and the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter)
|
||||||
abstractions are so similar, there was a proposal to unify them under
|
abstractions are so similar, there was a proposal to unify them under
|
||||||
single `Expoter` interface.[^3]
|
single `Exporter` interface.[^3]
|
||||||
|
|
||||||
However, introducing a `Processor` interface makes it easier
|
However, introducing a `Processor` interface makes it easier
|
||||||
to create custom processor decorators[^4]
|
to create custom processor decorators[^4]
|
||||||
@ -170,7 +170,7 @@ parameters.
|
|||||||
|
|
||||||
[^1]: [A Guide to the Go Garbage Collector](https://tip.golang.org/doc/gc-guide)
|
[^1]: [A Guide to the Go Garbage Collector](https://tip.golang.org/doc/gc-guide)
|
||||||
[^2]: [OpenTelemetry Logging](https://opentelemetry.io/docs/specs/otel/logs)
|
[^2]: [OpenTelemetry Logging](https://opentelemetry.io/docs/specs/otel/logs)
|
||||||
[^3]: [Conversation on representing LogRecordProcessor and LogRecordExporter via a single Expoter interface](https://github.com/open-telemetry/opentelemetry-go/pull/4954#discussion_r1515050480)
|
[^3]: [Conversation on representing LogRecordProcessor and LogRecordExporter via a single Exporter interface](https://github.com/open-telemetry/opentelemetry-go/pull/4954#discussion_r1515050480)
|
||||||
[^4]: [Introduce Processor](https://github.com/pellared/opentelemetry-go/pull/9)
|
[^4]: [Introduce Processor](https://github.com/pellared/opentelemetry-go/pull/9)
|
||||||
[^5]: [Log record mutations do not have to be visible in next registered processors](https://github.com/open-telemetry/opentelemetry-specification/pull/4067)
|
[^5]: [Log record mutations do not have to be visible in next registered processors](https://github.com/open-telemetry/opentelemetry-specification/pull/4067)
|
||||||
[^6]: [Profile-guided optimization](https://go.dev/doc/pgo)
|
[^6]: [Profile-guided optimization](https://go.dev/doc/pgo)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user